diff --git a/logs_svd_qkvo/mode_13_param_qkvo_seed_41/config.json b/logs_svd_qkvo/mode_13_param_qkvo_seed_41/config.json new file mode 100644 index 0000000000000000000000000000000000000000..302f344e0e341888c8414116181e3fe9fea5ac7c --- /dev/null +++ b/logs_svd_qkvo/mode_13_param_qkvo_seed_41/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 41, + "optimizer_mode": 13, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "49d55af7-fa32-4333-8039-62b6137d700c", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_13_param_qkvo_seed_41/training_log_49d55af7-fa32-4333-8039-62b6137d700c.txt b/logs_svd_qkvo/mode_13_param_qkvo_seed_41/training_log_49d55af7-fa32-4333-8039-62b6137d700c.txt new file mode 100644 index 0000000000000000000000000000000000000000..3dde8d6d96a5827272b4c493f0a404f67e26d7ce --- /dev/null +++ b/logs_svd_qkvo/mode_13_param_qkvo_seed_41/training_log_49d55af7-fa32-4333-8039-62b6137d700c.txt @@ -0,0 +1,2984 @@ +[2025-09-02 04:03:41] [Rank 0] PRINT: --- Script Start: Tue Sep 2 04:03:41 2025 --- +[2025-09-02 04:03:41] [Rank 0] PRINT: --- Script Start: Tue Sep 2 04:03:41 2025 --- +[2025-09-02 04:03:41] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=41, optimizer_mode=13, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 04:03:41] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=41, optimizer_mode=13, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 04:03:41] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 04:03:41] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 04:03:41] [Rank 0] PRINT: Using fixed seed: 41 +[2025-09-02 04:03:41] [Rank 0] PRINT: Using fixed seed: 41 +[2025-09-02 04:03:41] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_13_param_qkvo_seed_41 +[2025-09-02 04:03:41] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_13_param_qkvo_seed_41 +[2025-09-02 04:03:41] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 04:03:41] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 04:03:41] [Rank 0] PRINT: Constructing model... +[2025-09-02 04:03:41] [Rank 0] PRINT: Constructing model... +[2025-09-02 04:03:43] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 04:03:43] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 04:03:43] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 04:03:43] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 04:03:43] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 04:03:43] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 04:03:43] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 13 +[2025-09-02 04:03:43] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 13 +[2025-09-02 04:03:43] [Rank 0] PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: 0.008). +[2025-09-02 04:03:43] [Rank 0] PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: 0.008). +[2025-09-02 04:03:43] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 04:03:43] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 04:03:43] [Rank 0] PRINT: Muon optimizer is active with 23 parameters. +[2025-09-02 04:03:43] [Rank 0] PRINT: Muon optimizer is active with 23 parameters. +[2025-09-02 04:03:43] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 04:03:43] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 04:03:43] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 04:03:43] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 04:03:43] [Rank 0] PRINT: Starting warmup... +[2025-09-02 04:03:43] [Rank 0] PRINT: Starting warmup... +[2025-09-02 04:13:06] [Rank 0] PRINT: Warmup complete. +[2025-09-02 04:13:06] [Rank 0] PRINT: Warmup complete. +[2025-09-02 04:13:06] [Rank 0] PRINT: Starting training... +[2025-09-02 04:13:06] [Rank 0] PRINT: Starting training... +[2025-09-02 04:13:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:13:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:20:24] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9248,top10E=0.05,eRank=465.9,q75/q25=10.27 attn_vo:H=0.4624,top10E=0.02,eRank=232.9,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 04:20:24] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9248,top10E=0.05,eRank=465.9,q75/q25=10.27 attn_vo:H=0.4624,top10E=0.02,eRank=232.9,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 04:20:25] [Rank 0] step:21/10000 train_time:1439ms step_avg:68.52ms +[2025-09-02 04:20:25] [Rank 0] step:21/10000 train_time:1439ms step_avg:68.52ms +[2025-09-02 04:20:27] [Rank 0] step:41/10000 train_time:2899ms step_avg:70.70ms +[2025-09-02 04:20:27] [Rank 0] step:41/10000 train_time:2899ms step_avg:70.70ms +[2025-09-02 04:20:28] [Rank 0] step:61/10000 train_time:4362ms step_avg:71.51ms +[2025-09-02 04:20:28] [Rank 0] step:61/10000 train_time:4362ms step_avg:71.51ms +[2025-09-02 04:20:30] [Rank 0] step:81/10000 train_time:5827ms step_avg:71.93ms +[2025-09-02 04:20:30] [Rank 0] step:81/10000 train_time:5827ms step_avg:71.93ms +[2025-09-02 04:20:31] [Rank 0] step:101/10000 train_time:7290ms step_avg:72.18ms +[2025-09-02 04:20:31] [Rank 0] step:101/10000 train_time:7290ms step_avg:72.18ms +[2025-09-02 04:20:33] [Rank 0] step:121/10000 train_time:8754ms step_avg:72.35ms +[2025-09-02 04:20:33] [Rank 0] step:121/10000 train_time:8754ms step_avg:72.35ms +[2025-09-02 04:20:34] [Rank 0] step:141/10000 train_time:10220ms step_avg:72.48ms +[2025-09-02 04:20:34] [Rank 0] step:141/10000 train_time:10220ms step_avg:72.48ms +[2025-09-02 04:20:36] [Rank 0] step:161/10000 train_time:11683ms step_avg:72.57ms +[2025-09-02 04:20:36] [Rank 0] step:161/10000 train_time:11683ms step_avg:72.57ms +[2025-09-02 04:20:37] [Rank 0] step:181/10000 train_time:13147ms step_avg:72.64ms +[2025-09-02 04:20:37] [Rank 0] step:181/10000 train_time:13147ms step_avg:72.64ms +[2025-09-02 04:20:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:20:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:20:50] [Rank 0] PRINT: step:200/10000 val_loss:6.2230 svd_entropy: attn_qk:H=0.6101,top10E=0.54,eRank=98.2,q75/q25=13.06 attn_vo:H=0.5194,top10E=0.57,eRank=77.0,q75/q25=inf mlp_w1:H=0.6599,top10E=0.52,eRank=95.4,q75/q25=2.95 mlp_w2:H=0.8065,top10E=0.17,eRank=216.8,q75/q25=16.81 vo_prod:H=0.3255,top10E=0.81,eRank=14.4,q75/q25=inf train_time:14688ms step_avg:73.44ms +[2025-09-02 04:20:50] [Rank 0] PRINT: step:200/10000 val_loss:6.2230 svd_entropy: attn_qk:H=0.6101,top10E=0.54,eRank=98.2,q75/q25=13.06 attn_vo:H=0.5194,top10E=0.57,eRank=77.0,q75/q25=inf mlp_w1:H=0.6599,top10E=0.52,eRank=95.4,q75/q25=2.95 mlp_w2:H=0.8065,top10E=0.17,eRank=216.8,q75/q25=16.81 vo_prod:H=0.3255,top10E=0.81,eRank=14.4,q75/q25=inf train_time:14688ms step_avg:73.44ms +[2025-09-02 04:20:50] [Rank 0] step:201/10000 train_time:14704ms step_avg:73.15ms +[2025-09-02 04:20:50] [Rank 0] step:201/10000 train_time:14704ms step_avg:73.15ms +[2025-09-02 04:20:52] [Rank 0] step:221/10000 train_time:16089ms step_avg:72.80ms +[2025-09-02 04:20:52] [Rank 0] step:221/10000 train_time:16089ms step_avg:72.80ms +[2025-09-02 04:20:53] [Rank 0] step:241/10000 train_time:17548ms step_avg:72.81ms +[2025-09-02 04:20:53] [Rank 0] step:241/10000 train_time:17548ms step_avg:72.81ms +[2025-09-02 04:20:55] [Rank 0] step:261/10000 train_time:19009ms step_avg:72.83ms +[2025-09-02 04:20:55] [Rank 0] step:261/10000 train_time:19009ms step_avg:72.83ms +[2025-09-02 04:20:56] [Rank 0] step:281/10000 train_time:20470ms step_avg:72.85ms +[2025-09-02 04:20:56] [Rank 0] step:281/10000 train_time:20470ms step_avg:72.85ms +[2025-09-02 04:20:58] [Rank 0] step:301/10000 train_time:21931ms step_avg:72.86ms +[2025-09-02 04:20:58] [Rank 0] step:301/10000 train_time:21931ms step_avg:72.86ms +[2025-09-02 04:20:59] [Rank 0] step:321/10000 train_time:23392ms step_avg:72.87ms +[2025-09-02 04:20:59] [Rank 0] step:321/10000 train_time:23392ms step_avg:72.87ms +[2025-09-02 04:21:01] [Rank 0] step:341/10000 train_time:24855ms step_avg:72.89ms +[2025-09-02 04:21:01] [Rank 0] step:341/10000 train_time:24855ms step_avg:72.89ms +[2025-09-02 04:21:02] [Rank 0] step:361/10000 train_time:26315ms step_avg:72.89ms +[2025-09-02 04:21:02] [Rank 0] step:361/10000 train_time:26315ms step_avg:72.89ms +[2025-09-02 04:21:04] [Rank 0] step:381/10000 train_time:27777ms step_avg:72.90ms +[2025-09-02 04:21:04] [Rank 0] step:381/10000 train_time:27777ms step_avg:72.90ms +[2025-09-02 04:21:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:21:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:21:17] [Rank 0] PRINT: step:400/10000 val_loss:5.7205 svd_entropy: attn_qk:H=0.6487,top10E=0.45,eRank=111.8,q75/q25=15.64 attn_vo:H=0.6036,top10E=0.41,eRank=106.8,q75/q25=inf mlp_w1:H=0.6860,top10E=0.42,eRank=115.5,q75/q25=4.55 mlp_w2:H=0.9296,top10E=0.06,eRank=482.7,q75/q25=6.40 vo_prod:H=0.4255,top10E=0.65,eRank=24.6,q75/q25=inf train_time:29313ms step_avg:73.28ms +[2025-09-02 04:21:17] [Rank 0] PRINT: step:400/10000 val_loss:5.7205 svd_entropy: attn_qk:H=0.6487,top10E=0.45,eRank=111.8,q75/q25=15.64 attn_vo:H=0.6036,top10E=0.41,eRank=106.8,q75/q25=inf mlp_w1:H=0.6860,top10E=0.42,eRank=115.5,q75/q25=4.55 mlp_w2:H=0.9296,top10E=0.06,eRank=482.7,q75/q25=6.40 vo_prod:H=0.4255,top10E=0.65,eRank=24.6,q75/q25=inf train_time:29313ms step_avg:73.28ms +[2025-09-02 04:21:17] [Rank 0] step:401/10000 train_time:29328ms step_avg:73.14ms +[2025-09-02 04:21:17] [Rank 0] step:401/10000 train_time:29328ms step_avg:73.14ms +[2025-09-02 04:21:18] [Rank 0] step:421/10000 train_time:30732ms step_avg:73.00ms +[2025-09-02 04:21:18] [Rank 0] step:421/10000 train_time:30732ms step_avg:73.00ms +[2025-09-02 04:21:20] [Rank 0] step:441/10000 train_time:32192ms step_avg:73.00ms +[2025-09-02 04:21:20] [Rank 0] step:441/10000 train_time:32192ms step_avg:73.00ms +[2025-09-02 04:21:21] [Rank 0] step:461/10000 train_time:33653ms step_avg:73.00ms +[2025-09-02 04:21:21] [Rank 0] step:461/10000 train_time:33653ms step_avg:73.00ms +[2025-09-02 04:21:23] [Rank 0] step:481/10000 train_time:35114ms step_avg:73.00ms +[2025-09-02 04:21:23] [Rank 0] step:481/10000 train_time:35114ms step_avg:73.00ms +[2025-09-02 04:21:24] [Rank 0] step:501/10000 train_time:36575ms step_avg:73.00ms +[2025-09-02 04:21:24] [Rank 0] step:501/10000 train_time:36575ms step_avg:73.00ms +[2025-09-02 04:21:26] [Rank 0] step:521/10000 train_time:38037ms step_avg:73.01ms +[2025-09-02 04:21:26] [Rank 0] step:521/10000 train_time:38037ms step_avg:73.01ms +[2025-09-02 04:21:27] [Rank 0] step:541/10000 train_time:39557ms step_avg:73.12ms +[2025-09-02 04:21:27] [Rank 0] step:541/10000 train_time:39557ms step_avg:73.12ms +[2025-09-02 04:21:29] [Rank 0] step:561/10000 train_time:41020ms step_avg:73.12ms +[2025-09-02 04:21:29] [Rank 0] step:561/10000 train_time:41020ms step_avg:73.12ms +[2025-09-02 04:21:30] [Rank 0] step:581/10000 train_time:42481ms step_avg:73.12ms +[2025-09-02 04:21:30] [Rank 0] step:581/10000 train_time:42481ms step_avg:73.12ms +[2025-09-02 04:21:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:21:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:21:43] [Rank 0] PRINT: step:600/10000 val_loss:5.4275 svd_entropy: attn_qk:H=0.6763,top10E=0.39,eRank=123.4,q75/q25=20.19 attn_vo:H=0.6513,top10E=0.34,eRank=132.2,q75/q25=inf mlp_w1:H=0.7292,top10E=0.35,eRank=145.9,q75/q25=6.36 mlp_w2:H=0.9487,top10E=0.05,eRank=546.9,q75/q25=4.51 vo_prod:H=0.4820,top10E=0.54,eRank=34.3,q75/q25=inf train_time:44018ms step_avg:73.36ms +[2025-09-02 04:21:43] [Rank 0] PRINT: step:600/10000 val_loss:5.4275 svd_entropy: attn_qk:H=0.6763,top10E=0.39,eRank=123.4,q75/q25=20.19 attn_vo:H=0.6513,top10E=0.34,eRank=132.2,q75/q25=inf mlp_w1:H=0.7292,top10E=0.35,eRank=145.9,q75/q25=6.36 mlp_w2:H=0.9487,top10E=0.05,eRank=546.9,q75/q25=4.51 vo_prod:H=0.4820,top10E=0.54,eRank=34.3,q75/q25=inf train_time:44018ms step_avg:73.36ms +[2025-09-02 04:21:43] [Rank 0] step:601/10000 train_time:44034ms step_avg:73.27ms +[2025-09-02 04:21:43] [Rank 0] step:601/10000 train_time:44034ms step_avg:73.27ms +[2025-09-02 04:21:45] [Rank 0] step:621/10000 train_time:45430ms step_avg:73.16ms +[2025-09-02 04:21:45] [Rank 0] step:621/10000 train_time:45430ms step_avg:73.16ms +[2025-09-02 04:21:46] [Rank 0] step:641/10000 train_time:46888ms step_avg:73.15ms +[2025-09-02 04:21:46] [Rank 0] step:641/10000 train_time:46888ms step_avg:73.15ms +[2025-09-02 04:21:48] [Rank 0] step:661/10000 train_time:48347ms step_avg:73.14ms +[2025-09-02 04:21:48] [Rank 0] step:661/10000 train_time:48347ms step_avg:73.14ms +[2025-09-02 04:21:49] [Rank 0] step:681/10000 train_time:49807ms step_avg:73.14ms +[2025-09-02 04:21:49] [Rank 0] step:681/10000 train_time:49807ms step_avg:73.14ms +[2025-09-02 04:21:51] [Rank 0] step:701/10000 train_time:51268ms step_avg:73.14ms +[2025-09-02 04:21:51] [Rank 0] step:701/10000 train_time:51268ms step_avg:73.14ms +[2025-09-02 04:21:52] [Rank 0] step:721/10000 train_time:52729ms step_avg:73.13ms +[2025-09-02 04:21:52] [Rank 0] step:721/10000 train_time:52729ms step_avg:73.13ms +[2025-09-02 04:21:54] [Rank 0] step:741/10000 train_time:54189ms step_avg:73.13ms +[2025-09-02 04:21:54] [Rank 0] step:741/10000 train_time:54189ms step_avg:73.13ms +[2025-09-02 04:21:55] [Rank 0] step:761/10000 train_time:55663ms step_avg:73.14ms +[2025-09-02 04:21:55] [Rank 0] step:761/10000 train_time:55663ms step_avg:73.14ms +[2025-09-02 04:21:57] [Rank 0] step:781/10000 train_time:57136ms step_avg:73.16ms +[2025-09-02 04:21:57] [Rank 0] step:781/10000 train_time:57136ms step_avg:73.16ms +[2025-09-02 04:21:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:21:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:22:10] [Rank 0] PRINT: step:800/10000 val_loss:5.2010 svd_entropy: attn_qk:H=0.6958,top10E=0.36,eRank=132.8,q75/q25=26.68 attn_vo:H=0.6834,top10E=0.29,eRank=153.3,q75/q25=inf mlp_w1:H=0.7612,top10E=0.31,eRank=173.6,q75/q25=7.29 mlp_w2:H=0.9550,top10E=0.05,eRank=570.2,q75/q25=3.99 vo_prod:H=0.5221,top10E=0.46,eRank=44.1,q75/q25=inf train_time:58685ms step_avg:73.36ms +[2025-09-02 04:22:10] [Rank 0] PRINT: step:800/10000 val_loss:5.2010 svd_entropy: attn_qk:H=0.6958,top10E=0.36,eRank=132.8,q75/q25=26.68 attn_vo:H=0.6834,top10E=0.29,eRank=153.3,q75/q25=inf mlp_w1:H=0.7612,top10E=0.31,eRank=173.6,q75/q25=7.29 mlp_w2:H=0.9550,top10E=0.05,eRank=570.2,q75/q25=3.99 vo_prod:H=0.5221,top10E=0.46,eRank=44.1,q75/q25=inf train_time:58685ms step_avg:73.36ms +[2025-09-02 04:22:10] [Rank 0] step:801/10000 train_time:58701ms step_avg:73.29ms +[2025-09-02 04:22:10] [Rank 0] step:801/10000 train_time:58701ms step_avg:73.29ms +[2025-09-02 04:22:11] [Rank 0] step:821/10000 train_time:60103ms step_avg:73.21ms +[2025-09-02 04:22:11] [Rank 0] step:821/10000 train_time:60103ms step_avg:73.21ms +[2025-09-02 04:22:13] [Rank 0] step:841/10000 train_time:61576ms step_avg:73.22ms +[2025-09-02 04:22:13] [Rank 0] step:841/10000 train_time:61576ms step_avg:73.22ms +[2025-09-02 04:22:14] [Rank 0] step:861/10000 train_time:63049ms step_avg:73.23ms +[2025-09-02 04:22:14] [Rank 0] step:861/10000 train_time:63049ms step_avg:73.23ms +[2025-09-02 04:22:16] [Rank 0] step:881/10000 train_time:64521ms step_avg:73.24ms +[2025-09-02 04:22:16] [Rank 0] step:881/10000 train_time:64521ms step_avg:73.24ms +[2025-09-02 04:22:17] [Rank 0] step:901/10000 train_time:65993ms step_avg:73.24ms +[2025-09-02 04:22:17] [Rank 0] step:901/10000 train_time:65993ms step_avg:73.24ms +[2025-09-02 04:22:19] [Rank 0] step:921/10000 train_time:67465ms step_avg:73.25ms +[2025-09-02 04:22:19] [Rank 0] step:921/10000 train_time:67465ms step_avg:73.25ms +[2025-09-02 04:22:20] [Rank 0] step:941/10000 train_time:68939ms step_avg:73.26ms +[2025-09-02 04:22:20] [Rank 0] step:941/10000 train_time:68939ms step_avg:73.26ms +[2025-09-02 04:22:22] [Rank 0] step:961/10000 train_time:70412ms step_avg:73.27ms +[2025-09-02 04:22:22] [Rank 0] step:961/10000 train_time:70412ms step_avg:73.27ms +[2025-09-02 04:22:23] [Rank 0] step:981/10000 train_time:71886ms step_avg:73.28ms +[2025-09-02 04:22:23] [Rank 0] step:981/10000 train_time:71886ms step_avg:73.28ms +[2025-09-02 04:22:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:22:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:22:36] [Rank 0] PRINT: step:1000/10000 val_loss:5.0258 svd_entropy: attn_qk:H=0.7109,top10E=0.33,eRank=141.0,q75/q25=33.41 attn_vo:H=0.7060,top10E=0.26,eRank=171.1,q75/q25=inf mlp_w1:H=0.7851,top10E=0.28,eRank=198.3,q75/q25=7.56 mlp_w2:H=0.9598,top10E=0.04,eRank=588.3,q75/q25=3.64 vo_prod:H=0.5493,top10E=0.41,eRank=52.9,q75/q25=inf train_time:73434ms step_avg:73.43ms +[2025-09-02 04:22:36] [Rank 0] PRINT: step:1000/10000 val_loss:5.0258 svd_entropy: attn_qk:H=0.7109,top10E=0.33,eRank=141.0,q75/q25=33.41 attn_vo:H=0.7060,top10E=0.26,eRank=171.1,q75/q25=inf mlp_w1:H=0.7851,top10E=0.28,eRank=198.3,q75/q25=7.56 mlp_w2:H=0.9598,top10E=0.04,eRank=588.3,q75/q25=3.64 vo_prod:H=0.5493,top10E=0.41,eRank=52.9,q75/q25=inf train_time:73434ms step_avg:73.43ms +[2025-09-02 04:22:36] [Rank 0] step:1001/10000 train_time:73450ms step_avg:73.38ms +[2025-09-02 04:22:36] [Rank 0] step:1001/10000 train_time:73450ms step_avg:73.38ms +[2025-09-02 04:22:38] [Rank 0] step:1021/10000 train_time:74863ms step_avg:73.32ms +[2025-09-02 04:22:38] [Rank 0] step:1021/10000 train_time:74863ms step_avg:73.32ms +[2025-09-02 04:22:39] [Rank 0] step:1041/10000 train_time:76334ms step_avg:73.33ms +[2025-09-02 04:22:39] [Rank 0] step:1041/10000 train_time:76334ms step_avg:73.33ms +[2025-09-02 04:22:41] [Rank 0] step:1061/10000 train_time:77808ms step_avg:73.33ms +[2025-09-02 04:22:41] [Rank 0] step:1061/10000 train_time:77808ms step_avg:73.33ms +[2025-09-02 04:22:42] [Rank 0] step:1081/10000 train_time:79281ms step_avg:73.34ms +[2025-09-02 04:22:42] [Rank 0] step:1081/10000 train_time:79281ms step_avg:73.34ms +[2025-09-02 04:22:44] [Rank 0] step:1101/10000 train_time:80755ms step_avg:73.35ms +[2025-09-02 04:22:44] [Rank 0] step:1101/10000 train_time:80755ms step_avg:73.35ms +[2025-09-02 04:22:45] [Rank 0] step:1121/10000 train_time:82228ms step_avg:73.35ms +[2025-09-02 04:22:45] [Rank 0] step:1121/10000 train_time:82228ms step_avg:73.35ms +[2025-09-02 04:22:47] [Rank 0] step:1141/10000 train_time:83702ms step_avg:73.36ms +[2025-09-02 04:22:47] [Rank 0] step:1141/10000 train_time:83702ms step_avg:73.36ms +[2025-09-02 04:22:48] [Rank 0] step:1161/10000 train_time:85175ms step_avg:73.36ms +[2025-09-02 04:22:48] [Rank 0] step:1161/10000 train_time:85175ms step_avg:73.36ms +[2025-09-02 04:22:50] [Rank 0] step:1181/10000 train_time:86649ms step_avg:73.37ms +[2025-09-02 04:22:50] [Rank 0] step:1181/10000 train_time:86649ms step_avg:73.37ms +[2025-09-02 04:22:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:22:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:23:03] [Rank 0] PRINT: step:1200/10000 val_loss:4.8611 svd_entropy: attn_qk:H=0.7227,top10E=0.32,eRank=148.3,q75/q25=39.96 attn_vo:H=0.7245,top10E=0.23,eRank=187.9,q75/q25=inf mlp_w1:H=0.8032,top10E=0.26,eRank=220.2,q75/q25=7.49 mlp_w2:H=0.9629,top10E=0.04,eRank=600.7,q75/q25=3.42 vo_prod:H=0.5701,top10E=0.37,eRank=60.9,q75/q25=inf train_time:88199ms step_avg:73.50ms +[2025-09-02 04:23:03] [Rank 0] PRINT: step:1200/10000 val_loss:4.8611 svd_entropy: attn_qk:H=0.7227,top10E=0.32,eRank=148.3,q75/q25=39.96 attn_vo:H=0.7245,top10E=0.23,eRank=187.9,q75/q25=inf mlp_w1:H=0.8032,top10E=0.26,eRank=220.2,q75/q25=7.49 mlp_w2:H=0.9629,top10E=0.04,eRank=600.7,q75/q25=3.42 vo_prod:H=0.5701,top10E=0.37,eRank=60.9,q75/q25=inf train_time:88199ms step_avg:73.50ms +[2025-09-02 04:23:03] [Rank 0] step:1201/10000 train_time:88215ms step_avg:73.45ms +[2025-09-02 04:23:03] [Rank 0] step:1201/10000 train_time:88215ms step_avg:73.45ms +[2025-09-02 04:23:04] [Rank 0] step:1221/10000 train_time:89636ms step_avg:73.41ms +[2025-09-02 04:23:04] [Rank 0] step:1221/10000 train_time:89636ms step_avg:73.41ms +[2025-09-02 04:23:06] [Rank 0] step:1241/10000 train_time:91110ms step_avg:73.42ms +[2025-09-02 04:23:06] [Rank 0] step:1241/10000 train_time:91110ms step_avg:73.42ms +[2025-09-02 04:23:07] [Rank 0] step:1261/10000 train_time:92585ms step_avg:73.42ms +[2025-09-02 04:23:07] [Rank 0] step:1261/10000 train_time:92585ms step_avg:73.42ms +[2025-09-02 04:23:09] [Rank 0] step:1281/10000 train_time:94058ms step_avg:73.43ms +[2025-09-02 04:23:09] [Rank 0] step:1281/10000 train_time:94058ms step_avg:73.43ms +[2025-09-02 04:23:10] [Rank 0] step:1301/10000 train_time:95531ms step_avg:73.43ms +[2025-09-02 04:23:10] [Rank 0] step:1301/10000 train_time:95531ms step_avg:73.43ms +[2025-09-02 04:23:12] [Rank 0] step:1321/10000 train_time:97006ms step_avg:73.43ms +[2025-09-02 04:23:12] [Rank 0] step:1321/10000 train_time:97006ms step_avg:73.43ms +[2025-09-02 04:23:13] [Rank 0] step:1341/10000 train_time:98480ms step_avg:73.44ms +[2025-09-02 04:23:13] [Rank 0] step:1341/10000 train_time:98480ms step_avg:73.44ms +[2025-09-02 04:23:15] [Rank 0] step:1361/10000 train_time:99955ms step_avg:73.44ms +[2025-09-02 04:23:15] [Rank 0] step:1361/10000 train_time:99955ms step_avg:73.44ms +[2025-09-02 04:23:16] [Rank 0] step:1381/10000 train_time:101430ms step_avg:73.45ms +[2025-09-02 04:23:16] [Rank 0] step:1381/10000 train_time:101430ms step_avg:73.45ms +[2025-09-02 04:23:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:23:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:23:29] [Rank 0] PRINT: step:1400/10000 val_loss:4.7394 svd_entropy: attn_qk:H=0.7315,top10E=0.30,eRank=154.3,q75/q25=44.96 attn_vo:H=0.7385,top10E=0.22,eRank=202.0,q75/q25=inf mlp_w1:H=0.8177,top10E=0.24,eRank=239.7,q75/q25=7.29 mlp_w2:H=0.9650,top10E=0.04,eRank=608.8,q75/q25=3.28 vo_prod:H=0.5846,top10E=0.35,eRank=67.4,q75/q25=inf train_time:102981ms step_avg:73.56ms +[2025-09-02 04:23:29] [Rank 0] PRINT: step:1400/10000 val_loss:4.7394 svd_entropy: attn_qk:H=0.7315,top10E=0.30,eRank=154.3,q75/q25=44.96 attn_vo:H=0.7385,top10E=0.22,eRank=202.0,q75/q25=inf mlp_w1:H=0.8177,top10E=0.24,eRank=239.7,q75/q25=7.29 mlp_w2:H=0.9650,top10E=0.04,eRank=608.8,q75/q25=3.28 vo_prod:H=0.5846,top10E=0.35,eRank=67.4,q75/q25=inf train_time:102981ms step_avg:73.56ms +[2025-09-02 04:23:29] [Rank 0] step:1401/10000 train_time:102996ms step_avg:73.52ms +[2025-09-02 04:23:29] [Rank 0] step:1401/10000 train_time:102996ms step_avg:73.52ms +[2025-09-02 04:23:31] [Rank 0] step:1421/10000 train_time:104410ms step_avg:73.48ms +[2025-09-02 04:23:31] [Rank 0] step:1421/10000 train_time:104410ms step_avg:73.48ms +[2025-09-02 04:23:32] [Rank 0] step:1441/10000 train_time:105882ms step_avg:73.48ms +[2025-09-02 04:23:32] [Rank 0] step:1441/10000 train_time:105882ms step_avg:73.48ms +[2025-09-02 04:23:34] [Rank 0] step:1461/10000 train_time:107407ms step_avg:73.52ms +[2025-09-02 04:23:34] [Rank 0] step:1461/10000 train_time:107407ms step_avg:73.52ms +[2025-09-02 04:23:35] [Rank 0] step:1481/10000 train_time:108883ms step_avg:73.52ms +[2025-09-02 04:23:35] [Rank 0] step:1481/10000 train_time:108883ms step_avg:73.52ms +[2025-09-02 04:23:37] [Rank 0] step:1501/10000 train_time:110367ms step_avg:73.53ms +[2025-09-02 04:23:37] [Rank 0] step:1501/10000 train_time:110367ms step_avg:73.53ms +[2025-09-02 04:23:38] [Rank 0] step:1521/10000 train_time:111853ms step_avg:73.54ms +[2025-09-02 04:23:38] [Rank 0] step:1521/10000 train_time:111853ms step_avg:73.54ms +[2025-09-02 04:23:40] [Rank 0] step:1541/10000 train_time:113339ms step_avg:73.55ms +[2025-09-02 04:23:40] [Rank 0] step:1541/10000 train_time:113339ms step_avg:73.55ms +[2025-09-02 04:23:41] [Rank 0] step:1561/10000 train_time:114826ms step_avg:73.56ms +[2025-09-02 04:23:41] [Rank 0] step:1561/10000 train_time:114826ms step_avg:73.56ms +[2025-09-02 04:23:43] [Rank 0] step:1581/10000 train_time:116313ms step_avg:73.57ms +[2025-09-02 04:23:43] [Rank 0] step:1581/10000 train_time:116313ms step_avg:73.57ms +[2025-09-02 04:23:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:23:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:23:56] [Rank 0] PRINT: step:1600/10000 val_loss:4.6091 svd_entropy: attn_qk:H=0.7381,top10E=0.29,eRank=158.9,q75/q25=49.20 attn_vo:H=0.7494,top10E=0.20,eRank=213.7,q75/q25=inf mlp_w1:H=0.8293,top10E=0.23,eRank=256.9,q75/q25=7.03 mlp_w2:H=0.9662,top10E=0.04,eRank=613.9,q75/q25=3.20 vo_prod:H=0.5971,top10E=0.33,eRank=73.6,q75/q25=inf train_time:117875ms step_avg:73.67ms +[2025-09-02 04:23:56] [Rank 0] PRINT: step:1600/10000 val_loss:4.6091 svd_entropy: attn_qk:H=0.7381,top10E=0.29,eRank=158.9,q75/q25=49.20 attn_vo:H=0.7494,top10E=0.20,eRank=213.7,q75/q25=inf mlp_w1:H=0.8293,top10E=0.23,eRank=256.9,q75/q25=7.03 mlp_w2:H=0.9662,top10E=0.04,eRank=613.9,q75/q25=3.20 vo_prod:H=0.5971,top10E=0.33,eRank=73.6,q75/q25=inf train_time:117875ms step_avg:73.67ms +[2025-09-02 04:23:56] [Rank 0] step:1601/10000 train_time:117891ms step_avg:73.64ms +[2025-09-02 04:23:56] [Rank 0] step:1601/10000 train_time:117891ms step_avg:73.64ms +[2025-09-02 04:23:58] [Rank 0] step:1621/10000 train_time:119314ms step_avg:73.61ms +[2025-09-02 04:23:58] [Rank 0] step:1621/10000 train_time:119314ms step_avg:73.61ms +[2025-09-02 04:23:59] [Rank 0] step:1641/10000 train_time:120801ms step_avg:73.61ms +[2025-09-02 04:23:59] [Rank 0] step:1641/10000 train_time:120801ms step_avg:73.61ms +[2025-09-02 04:24:01] [Rank 0] step:1661/10000 train_time:122287ms step_avg:73.62ms +[2025-09-02 04:24:01] [Rank 0] step:1661/10000 train_time:122287ms step_avg:73.62ms +[2025-09-02 04:24:02] [Rank 0] step:1681/10000 train_time:123772ms step_avg:73.63ms +[2025-09-02 04:24:02] [Rank 0] step:1681/10000 train_time:123772ms step_avg:73.63ms +[2025-09-02 04:24:04] [Rank 0] step:1701/10000 train_time:125260ms step_avg:73.64ms +[2025-09-02 04:24:04] [Rank 0] step:1701/10000 train_time:125260ms step_avg:73.64ms +[2025-09-02 04:24:05] [Rank 0] step:1721/10000 train_time:126745ms step_avg:73.65ms +[2025-09-02 04:24:05] [Rank 0] step:1721/10000 train_time:126745ms step_avg:73.65ms +[2025-09-02 04:24:07] [Rank 0] step:1741/10000 train_time:128231ms step_avg:73.65ms +[2025-09-02 04:24:07] [Rank 0] step:1741/10000 train_time:128231ms step_avg:73.65ms +[2025-09-02 04:24:08] [Rank 0] step:1761/10000 train_time:129717ms step_avg:73.66ms +[2025-09-02 04:24:08] [Rank 0] step:1761/10000 train_time:129717ms step_avg:73.66ms +[2025-09-02 04:24:10] [Rank 0] step:1781/10000 train_time:131203ms step_avg:73.67ms +[2025-09-02 04:24:10] [Rank 0] step:1781/10000 train_time:131203ms step_avg:73.67ms +[2025-09-02 04:24:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:24:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:24:23] [Rank 0] PRINT: step:1800/10000 val_loss:4.5098 svd_entropy: attn_qk:H=0.7439,top10E=0.29,eRank=163.1,q75/q25=51.92 attn_vo:H=0.7581,top10E=0.19,eRank=223.5,q75/q25=inf mlp_w1:H=0.8388,top10E=0.22,eRank=272.1,q75/q25=6.79 mlp_w2:H=0.9670,top10E=0.04,eRank=617.2,q75/q25=3.15 vo_prod:H=0.6071,top10E=0.31,eRank=79.1,q75/q25=inf train_time:132765ms step_avg:73.76ms +[2025-09-02 04:24:23] [Rank 0] PRINT: step:1800/10000 val_loss:4.5098 svd_entropy: attn_qk:H=0.7439,top10E=0.29,eRank=163.1,q75/q25=51.92 attn_vo:H=0.7581,top10E=0.19,eRank=223.5,q75/q25=inf mlp_w1:H=0.8388,top10E=0.22,eRank=272.1,q75/q25=6.79 mlp_w2:H=0.9670,top10E=0.04,eRank=617.2,q75/q25=3.15 vo_prod:H=0.6071,top10E=0.31,eRank=79.1,q75/q25=inf train_time:132765ms step_avg:73.76ms +[2025-09-02 04:24:23] [Rank 0] step:1801/10000 train_time:132781ms step_avg:73.73ms +[2025-09-02 04:24:23] [Rank 0] step:1801/10000 train_time:132781ms step_avg:73.73ms +[2025-09-02 04:24:24] [Rank 0] step:1821/10000 train_time:134192ms step_avg:73.69ms +[2025-09-02 04:24:24] [Rank 0] step:1821/10000 train_time:134192ms step_avg:73.69ms +[2025-09-02 04:24:26] [Rank 0] step:1841/10000 train_time:135675ms step_avg:73.70ms +[2025-09-02 04:24:26] [Rank 0] step:1841/10000 train_time:135675ms step_avg:73.70ms +[2025-09-02 04:24:27] [Rank 0] step:1861/10000 train_time:137160ms step_avg:73.70ms +[2025-09-02 04:24:27] [Rank 0] step:1861/10000 train_time:137160ms step_avg:73.70ms +[2025-09-02 04:24:29] [Rank 0] step:1881/10000 train_time:138644ms step_avg:73.71ms +[2025-09-02 04:24:29] [Rank 0] step:1881/10000 train_time:138644ms step_avg:73.71ms +[2025-09-02 04:24:30] [Rank 0] step:1901/10000 train_time:140128ms step_avg:73.71ms +[2025-09-02 04:24:30] [Rank 0] step:1901/10000 train_time:140128ms step_avg:73.71ms +[2025-09-02 04:24:32] [Rank 0] step:1921/10000 train_time:141614ms step_avg:73.72ms +[2025-09-02 04:24:32] [Rank 0] step:1921/10000 train_time:141614ms step_avg:73.72ms +[2025-09-02 04:24:33] [Rank 0] step:1941/10000 train_time:143098ms step_avg:73.72ms +[2025-09-02 04:24:33] [Rank 0] step:1941/10000 train_time:143098ms step_avg:73.72ms +[2025-09-02 04:24:35] [Rank 0] step:1961/10000 train_time:144584ms step_avg:73.73ms +[2025-09-02 04:24:35] [Rank 0] step:1961/10000 train_time:144584ms step_avg:73.73ms +[2025-09-02 04:24:36] [Rank 0] step:1981/10000 train_time:146121ms step_avg:73.76ms +[2025-09-02 04:24:36] [Rank 0] step:1981/10000 train_time:146121ms step_avg:73.76ms +[2025-09-02 04:24:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:24:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:24:50] [Rank 0] PRINT: step:2000/10000 val_loss:4.4459 svd_entropy: attn_qk:H=0.7486,top10E=0.28,eRank=166.8,q75/q25=54.06 attn_vo:H=0.7654,top10E=0.18,eRank=232.0,q75/q25=inf mlp_w1:H=0.8466,top10E=0.21,eRank=285.3,q75/q25=6.54 mlp_w2:H=0.9676,top10E=0.04,eRank=619.2,q75/q25=3.12 vo_prod:H=0.6160,top10E=0.30,eRank=84.4,q75/q25=inf train_time:147682ms step_avg:73.84ms +[2025-09-02 04:24:50] [Rank 0] PRINT: step:2000/10000 val_loss:4.4459 svd_entropy: attn_qk:H=0.7486,top10E=0.28,eRank=166.8,q75/q25=54.06 attn_vo:H=0.7654,top10E=0.18,eRank=232.0,q75/q25=inf mlp_w1:H=0.8466,top10E=0.21,eRank=285.3,q75/q25=6.54 mlp_w2:H=0.9676,top10E=0.04,eRank=619.2,q75/q25=3.12 vo_prod:H=0.6160,top10E=0.30,eRank=84.4,q75/q25=inf train_time:147682ms step_avg:73.84ms +[2025-09-02 04:24:50] [Rank 0] step:2001/10000 train_time:147697ms step_avg:73.81ms +[2025-09-02 04:24:50] [Rank 0] step:2001/10000 train_time:147697ms step_avg:73.81ms +[2025-09-02 04:24:51] [Rank 0] step:2021/10000 train_time:149116ms step_avg:73.78ms +[2025-09-02 04:24:51] [Rank 0] step:2021/10000 train_time:149116ms step_avg:73.78ms +[2025-09-02 04:24:53] [Rank 0] step:2041/10000 train_time:151208ms step_avg:74.09ms +[2025-09-02 04:24:53] [Rank 0] step:2041/10000 train_time:151208ms step_avg:74.09ms +[2025-09-02 04:24:55] [Rank 0] step:2061/10000 train_time:152692ms step_avg:74.09ms +[2025-09-02 04:24:55] [Rank 0] step:2061/10000 train_time:152692ms step_avg:74.09ms +[2025-09-02 04:24:56] [Rank 0] step:2081/10000 train_time:154176ms step_avg:74.09ms +[2025-09-02 04:24:56] [Rank 0] step:2081/10000 train_time:154176ms step_avg:74.09ms +[2025-09-02 04:24:58] [Rank 0] step:2101/10000 train_time:155661ms step_avg:74.09ms +[2025-09-02 04:24:58] [Rank 0] step:2101/10000 train_time:155661ms step_avg:74.09ms +[2025-09-02 04:24:59] [Rank 0] step:2121/10000 train_time:157146ms step_avg:74.09ms +[2025-09-02 04:24:59] [Rank 0] step:2121/10000 train_time:157146ms step_avg:74.09ms +[2025-09-02 04:25:01] [Rank 0] step:2141/10000 train_time:158633ms step_avg:74.09ms +[2025-09-02 04:25:01] [Rank 0] step:2141/10000 train_time:158633ms step_avg:74.09ms +[2025-09-02 04:25:02] [Rank 0] step:2161/10000 train_time:160119ms step_avg:74.09ms +[2025-09-02 04:25:02] [Rank 0] step:2161/10000 train_time:160119ms step_avg:74.09ms +[2025-09-02 04:25:04] [Rank 0] step:2181/10000 train_time:161611ms step_avg:74.10ms +[2025-09-02 04:25:04] [Rank 0] step:2181/10000 train_time:161611ms step_avg:74.10ms +[2025-09-02 04:25:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:25:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:25:17] [Rank 0] PRINT: step:2200/10000 val_loss:4.3750 svd_entropy: attn_qk:H=0.7527,top10E=0.27,eRank=170.1,q75/q25=55.48 attn_vo:H=0.7712,top10E=0.18,eRank=239.0,q75/q25=inf mlp_w1:H=0.8532,top10E=0.20,eRank=297.0,q75/q25=6.33 mlp_w2:H=0.9679,top10E=0.04,eRank=620.8,q75/q25=3.10 vo_prod:H=0.6229,top10E=0.29,eRank=88.9,q75/q25=inf train_time:163174ms step_avg:74.17ms +[2025-09-02 04:25:17] [Rank 0] PRINT: step:2200/10000 val_loss:4.3750 svd_entropy: attn_qk:H=0.7527,top10E=0.27,eRank=170.1,q75/q25=55.48 attn_vo:H=0.7712,top10E=0.18,eRank=239.0,q75/q25=inf mlp_w1:H=0.8532,top10E=0.20,eRank=297.0,q75/q25=6.33 mlp_w2:H=0.9679,top10E=0.04,eRank=620.8,q75/q25=3.10 vo_prod:H=0.6229,top10E=0.29,eRank=88.9,q75/q25=inf train_time:163174ms step_avg:74.17ms +[2025-09-02 04:25:17] [Rank 0] step:2201/10000 train_time:163188ms step_avg:74.14ms +[2025-09-02 04:25:17] [Rank 0] step:2201/10000 train_time:163188ms step_avg:74.14ms +[2025-09-02 04:25:19] [Rank 0] step:2221/10000 train_time:164615ms step_avg:74.12ms +[2025-09-02 04:25:19] [Rank 0] step:2221/10000 train_time:164615ms step_avg:74.12ms +[2025-09-02 04:25:20] [Rank 0] step:2241/10000 train_time:166135ms step_avg:74.13ms +[2025-09-02 04:25:20] [Rank 0] step:2241/10000 train_time:166135ms step_avg:74.13ms +[2025-09-02 04:25:22] [Rank 0] step:2261/10000 train_time:167664ms step_avg:74.15ms +[2025-09-02 04:25:22] [Rank 0] step:2261/10000 train_time:167664ms step_avg:74.15ms +[2025-09-02 04:25:23] [Rank 0] step:2281/10000 train_time:169195ms step_avg:74.18ms +[2025-09-02 04:25:23] [Rank 0] step:2281/10000 train_time:169195ms step_avg:74.18ms +[2025-09-02 04:25:25] [Rank 0] step:2301/10000 train_time:170725ms step_avg:74.20ms +[2025-09-02 04:25:25] [Rank 0] step:2301/10000 train_time:170725ms step_avg:74.20ms +[2025-09-02 04:25:26] [Rank 0] step:2321/10000 train_time:172255ms step_avg:74.22ms +[2025-09-02 04:25:26] [Rank 0] step:2321/10000 train_time:172255ms step_avg:74.22ms +[2025-09-02 04:25:28] [Rank 0] step:2341/10000 train_time:173786ms step_avg:74.24ms +[2025-09-02 04:25:28] [Rank 0] step:2341/10000 train_time:173786ms step_avg:74.24ms +[2025-09-02 04:25:29] [Rank 0] step:2361/10000 train_time:175317ms step_avg:74.26ms +[2025-09-02 04:25:29] [Rank 0] step:2361/10000 train_time:175317ms step_avg:74.26ms +[2025-09-02 04:25:31] [Rank 0] step:2381/10000 train_time:176850ms step_avg:74.28ms +[2025-09-02 04:25:31] [Rank 0] step:2381/10000 train_time:176850ms step_avg:74.28ms +[2025-09-02 04:25:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:25:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:25:44] [Rank 0] PRINT: step:2400/10000 val_loss:4.2964 svd_entropy: attn_qk:H=0.7556,top10E=0.27,eRank=172.3,q75/q25=56.41 attn_vo:H=0.7762,top10E=0.17,eRank=245.2,q75/q25=inf mlp_w1:H=0.8590,top10E=0.20,eRank=307.9,q75/q25=6.12 mlp_w2:H=0.9682,top10E=0.04,eRank=621.8,q75/q25=3.08 vo_prod:H=0.6297,top10E=0.28,eRank=93.5,q75/q25=inf train_time:178459ms step_avg:74.36ms +[2025-09-02 04:25:44] [Rank 0] PRINT: step:2400/10000 val_loss:4.2964 svd_entropy: attn_qk:H=0.7556,top10E=0.27,eRank=172.3,q75/q25=56.41 attn_vo:H=0.7762,top10E=0.17,eRank=245.2,q75/q25=inf mlp_w1:H=0.8590,top10E=0.20,eRank=307.9,q75/q25=6.12 mlp_w2:H=0.9682,top10E=0.04,eRank=621.8,q75/q25=3.08 vo_prod:H=0.6297,top10E=0.28,eRank=93.5,q75/q25=inf train_time:178459ms step_avg:74.36ms +[2025-09-02 04:25:44] [Rank 0] step:2401/10000 train_time:178473ms step_avg:74.33ms +[2025-09-02 04:25:44] [Rank 0] step:2401/10000 train_time:178473ms step_avg:74.33ms +[2025-09-02 04:25:46] [Rank 0] step:2421/10000 train_time:179948ms step_avg:74.33ms +[2025-09-02 04:25:46] [Rank 0] step:2421/10000 train_time:179948ms step_avg:74.33ms +[2025-09-02 04:25:47] [Rank 0] step:2441/10000 train_time:181478ms step_avg:74.35ms +[2025-09-02 04:25:47] [Rank 0] step:2441/10000 train_time:181478ms step_avg:74.35ms +[2025-09-02 04:25:49] [Rank 0] step:2461/10000 train_time:183005ms step_avg:74.36ms +[2025-09-02 04:25:49] [Rank 0] step:2461/10000 train_time:183005ms step_avg:74.36ms +[2025-09-02 04:25:50] [Rank 0] step:2481/10000 train_time:184535ms step_avg:74.38ms +[2025-09-02 04:25:50] [Rank 0] step:2481/10000 train_time:184535ms step_avg:74.38ms +[2025-09-02 04:25:52] [Rank 0] step:2501/10000 train_time:186069ms step_avg:74.40ms +[2025-09-02 04:25:52] [Rank 0] step:2501/10000 train_time:186069ms step_avg:74.40ms +[2025-09-02 04:25:53] [Rank 0] step:2521/10000 train_time:187599ms step_avg:74.41ms +[2025-09-02 04:25:53] [Rank 0] step:2521/10000 train_time:187599ms step_avg:74.41ms +[2025-09-02 04:25:55] [Rank 0] step:2541/10000 train_time:189129ms step_avg:74.43ms +[2025-09-02 04:25:55] [Rank 0] step:2541/10000 train_time:189129ms step_avg:74.43ms +[2025-09-02 04:25:57] [Rank 0] step:2561/10000 train_time:190659ms step_avg:74.45ms +[2025-09-02 04:25:57] [Rank 0] step:2561/10000 train_time:190659ms step_avg:74.45ms +[2025-09-02 04:25:58] [Rank 0] step:2581/10000 train_time:192189ms step_avg:74.46ms +[2025-09-02 04:25:58] [Rank 0] step:2581/10000 train_time:192189ms step_avg:74.46ms +[2025-09-02 04:26:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:26:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:26:11] [Rank 0] PRINT: step:2600/10000 val_loss:4.2442 svd_entropy: attn_qk:H=0.7589,top10E=0.27,eRank=175.1,q75/q25=56.83 attn_vo:H=0.7806,top10E=0.16,eRank=250.8,q75/q25=inf mlp_w1:H=0.8642,top10E=0.19,eRank=317.9,q75/q25=5.97 mlp_w2:H=0.9684,top10E=0.04,eRank=622.5,q75/q25=3.07 vo_prod:H=0.6357,top10E=0.27,eRank=97.7,q75/q25=inf train_time:193799ms step_avg:74.54ms +[2025-09-02 04:26:11] [Rank 0] PRINT: step:2600/10000 val_loss:4.2442 svd_entropy: attn_qk:H=0.7589,top10E=0.27,eRank=175.1,q75/q25=56.83 attn_vo:H=0.7806,top10E=0.16,eRank=250.8,q75/q25=inf mlp_w1:H=0.8642,top10E=0.19,eRank=317.9,q75/q25=5.97 mlp_w2:H=0.9684,top10E=0.04,eRank=622.5,q75/q25=3.07 vo_prod:H=0.6357,top10E=0.27,eRank=97.7,q75/q25=inf train_time:193799ms step_avg:74.54ms +[2025-09-02 04:26:11] [Rank 0] step:2601/10000 train_time:193813ms step_avg:74.51ms +[2025-09-02 04:26:11] [Rank 0] step:2601/10000 train_time:193813ms step_avg:74.51ms +[2025-09-02 04:26:13] [Rank 0] step:2621/10000 train_time:195285ms step_avg:74.51ms +[2025-09-02 04:26:13] [Rank 0] step:2621/10000 train_time:195285ms step_avg:74.51ms +[2025-09-02 04:26:14] [Rank 0] step:2641/10000 train_time:196812ms step_avg:74.52ms +[2025-09-02 04:26:14] [Rank 0] step:2641/10000 train_time:196812ms step_avg:74.52ms +[2025-09-02 04:26:16] [Rank 0] step:2661/10000 train_time:198341ms step_avg:74.54ms +[2025-09-02 04:26:16] [Rank 0] step:2661/10000 train_time:198341ms step_avg:74.54ms +[2025-09-02 04:26:17] [Rank 0] step:2681/10000 train_time:199869ms step_avg:74.55ms +[2025-09-02 04:26:17] [Rank 0] step:2681/10000 train_time:199869ms step_avg:74.55ms +[2025-09-02 04:26:19] [Rank 0] step:2701/10000 train_time:201398ms step_avg:74.56ms +[2025-09-02 04:26:19] [Rank 0] step:2701/10000 train_time:201398ms step_avg:74.56ms +[2025-09-02 04:26:20] [Rank 0] step:2721/10000 train_time:202928ms step_avg:74.58ms +[2025-09-02 04:26:20] [Rank 0] step:2721/10000 train_time:202928ms step_avg:74.58ms +[2025-09-02 04:26:22] [Rank 0] step:2741/10000 train_time:204457ms step_avg:74.59ms +[2025-09-02 04:26:22] [Rank 0] step:2741/10000 train_time:204457ms step_avg:74.59ms +[2025-09-02 04:26:24] [Rank 0] step:2761/10000 train_time:205988ms step_avg:74.61ms +[2025-09-02 04:26:24] [Rank 0] step:2761/10000 train_time:205988ms step_avg:74.61ms +[2025-09-02 04:26:25] [Rank 0] step:2781/10000 train_time:207518ms step_avg:74.62ms +[2025-09-02 04:26:25] [Rank 0] step:2781/10000 train_time:207518ms step_avg:74.62ms +[2025-09-02 04:26:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:26:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:26:38] [Rank 0] PRINT: step:2800/10000 val_loss:4.2048 svd_entropy: attn_qk:H=0.7618,top10E=0.27,eRank=177.5,q75/q25=57.62 attn_vo:H=0.7845,top10E=0.16,eRank=255.8,q75/q25=inf mlp_w1:H=0.8689,top10E=0.19,eRank=327.4,q75/q25=5.80 mlp_w2:H=0.9685,top10E=0.04,eRank=623.2,q75/q25=3.06 vo_prod:H=0.6412,top10E=0.26,eRank=101.6,q75/q25=inf train_time:209128ms step_avg:74.69ms +[2025-09-02 04:26:38] [Rank 0] PRINT: step:2800/10000 val_loss:4.2048 svd_entropy: attn_qk:H=0.7618,top10E=0.27,eRank=177.5,q75/q25=57.62 attn_vo:H=0.7845,top10E=0.16,eRank=255.8,q75/q25=inf mlp_w1:H=0.8689,top10E=0.19,eRank=327.4,q75/q25=5.80 mlp_w2:H=0.9685,top10E=0.04,eRank=623.2,q75/q25=3.06 vo_prod:H=0.6412,top10E=0.26,eRank=101.6,q75/q25=inf train_time:209128ms step_avg:74.69ms +[2025-09-02 04:26:39] [Rank 0] step:2801/10000 train_time:209142ms step_avg:74.67ms +[2025-09-02 04:26:39] [Rank 0] step:2801/10000 train_time:209142ms step_avg:74.67ms +[2025-09-02 04:26:40] [Rank 0] step:2821/10000 train_time:210613ms step_avg:74.66ms +[2025-09-02 04:26:40] [Rank 0] step:2821/10000 train_time:210613ms step_avg:74.66ms +[2025-09-02 04:26:42] [Rank 0] step:2841/10000 train_time:212142ms step_avg:74.67ms +[2025-09-02 04:26:42] [Rank 0] step:2841/10000 train_time:212142ms step_avg:74.67ms +[2025-09-02 04:26:43] [Rank 0] step:2861/10000 train_time:213715ms step_avg:74.70ms +[2025-09-02 04:26:43] [Rank 0] step:2861/10000 train_time:213715ms step_avg:74.70ms +[2025-09-02 04:26:45] [Rank 0] step:2881/10000 train_time:215246ms step_avg:74.71ms +[2025-09-02 04:26:45] [Rank 0] step:2881/10000 train_time:215246ms step_avg:74.71ms +[2025-09-02 04:26:46] [Rank 0] step:2901/10000 train_time:216776ms step_avg:74.72ms +[2025-09-02 04:26:46] [Rank 0] step:2901/10000 train_time:216776ms step_avg:74.72ms +[2025-09-02 04:26:48] [Rank 0] step:2921/10000 train_time:218306ms step_avg:74.74ms +[2025-09-02 04:26:48] [Rank 0] step:2921/10000 train_time:218306ms step_avg:74.74ms +[2025-09-02 04:26:49] [Rank 0] step:2941/10000 train_time:219838ms step_avg:74.75ms +[2025-09-02 04:26:49] [Rank 0] step:2941/10000 train_time:219838ms step_avg:74.75ms +[2025-09-02 04:26:51] [Rank 0] step:2961/10000 train_time:221370ms step_avg:74.76ms +[2025-09-02 04:26:51] [Rank 0] step:2961/10000 train_time:221370ms step_avg:74.76ms +[2025-09-02 04:26:52] [Rank 0] step:2981/10000 train_time:222907ms step_avg:74.78ms +[2025-09-02 04:26:52] [Rank 0] step:2981/10000 train_time:222907ms step_avg:74.78ms +[2025-09-02 04:26:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:26:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:27:06] [Rank 0] PRINT: step:3000/10000 val_loss:4.1613 svd_entropy: attn_qk:H=0.7457,top10E=0.29,eRank=148.3,q75/q25=58.87 attn_vo:H=0.8191,top10E=0.17,eRank=260.1,q75/q25=52.49 mlp_w1:H=0.8729,top10E=0.18,eRank=335.8,q75/q25=5.68 mlp_w2:H=0.9687,top10E=0.04,eRank=623.7,q75/q25=3.05 vo_prod:H=0.7044,top10E=0.29,eRank=111.7,q75/q25=3289.33 train_time:224525ms step_avg:74.84ms +[2025-09-02 04:27:06] [Rank 0] PRINT: step:3000/10000 val_loss:4.1613 svd_entropy: attn_qk:H=0.7457,top10E=0.29,eRank=148.3,q75/q25=58.87 attn_vo:H=0.8191,top10E=0.17,eRank=260.1,q75/q25=52.49 mlp_w1:H=0.8729,top10E=0.18,eRank=335.8,q75/q25=5.68 mlp_w2:H=0.9687,top10E=0.04,eRank=623.7,q75/q25=3.05 vo_prod:H=0.7044,top10E=0.29,eRank=111.7,q75/q25=3289.33 train_time:224525ms step_avg:74.84ms +[2025-09-02 04:27:06] [Rank 0] step:3001/10000 train_time:224539ms step_avg:74.82ms +[2025-09-02 04:27:06] [Rank 0] step:3001/10000 train_time:224539ms step_avg:74.82ms +[2025-09-02 04:27:07] [Rank 0] step:3021/10000 train_time:226006ms step_avg:74.81ms +[2025-09-02 04:27:07] [Rank 0] step:3021/10000 train_time:226006ms step_avg:74.81ms +[2025-09-02 04:27:09] [Rank 0] step:3041/10000 train_time:227543ms step_avg:74.83ms +[2025-09-02 04:27:09] [Rank 0] step:3041/10000 train_time:227543ms step_avg:74.83ms +[2025-09-02 04:27:11] [Rank 0] step:3061/10000 train_time:229082ms step_avg:74.84ms +[2025-09-02 04:27:11] [Rank 0] step:3061/10000 train_time:229082ms step_avg:74.84ms +[2025-09-02 04:27:12] [Rank 0] step:3081/10000 train_time:230620ms step_avg:74.85ms +[2025-09-02 04:27:12] [Rank 0] step:3081/10000 train_time:230620ms step_avg:74.85ms +[2025-09-02 04:27:14] [Rank 0] step:3101/10000 train_time:232159ms step_avg:74.87ms +[2025-09-02 04:27:14] [Rank 0] step:3101/10000 train_time:232159ms step_avg:74.87ms +[2025-09-02 04:27:15] [Rank 0] step:3121/10000 train_time:233698ms step_avg:74.88ms +[2025-09-02 04:27:15] [Rank 0] step:3121/10000 train_time:233698ms step_avg:74.88ms +[2025-09-02 04:27:17] [Rank 0] step:3141/10000 train_time:235237ms step_avg:74.89ms +[2025-09-02 04:27:17] [Rank 0] step:3141/10000 train_time:235237ms step_avg:74.89ms +[2025-09-02 04:27:18] [Rank 0] step:3161/10000 train_time:236779ms step_avg:74.91ms +[2025-09-02 04:27:18] [Rank 0] step:3161/10000 train_time:236779ms step_avg:74.91ms +[2025-09-02 04:27:20] [Rank 0] step:3181/10000 train_time:238320ms step_avg:74.92ms +[2025-09-02 04:27:20] [Rank 0] step:3181/10000 train_time:238320ms step_avg:74.92ms +[2025-09-02 04:27:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:27:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:27:33] [Rank 0] PRINT: step:3200/10000 val_loss:4.1214 svd_entropy: attn_qk:H=0.7486,top10E=0.29,eRank=150.8,q75/q25=59.78 attn_vo:H=0.8209,top10E=0.17,eRank=262.7,q75/q25=51.91 mlp_w1:H=0.8765,top10E=0.18,eRank=343.4,q75/q25=5.55 mlp_w2:H=0.9688,top10E=0.04,eRank=624.1,q75/q25=3.04 vo_prod:H=0.7068,top10E=0.29,eRank=114.0,q75/q25=3083.61 train_time:239939ms step_avg:74.98ms +[2025-09-02 04:27:33] [Rank 0] PRINT: step:3200/10000 val_loss:4.1214 svd_entropy: attn_qk:H=0.7486,top10E=0.29,eRank=150.8,q75/q25=59.78 attn_vo:H=0.8209,top10E=0.17,eRank=262.7,q75/q25=51.91 mlp_w1:H=0.8765,top10E=0.18,eRank=343.4,q75/q25=5.55 mlp_w2:H=0.9688,top10E=0.04,eRank=624.1,q75/q25=3.04 vo_prod:H=0.7068,top10E=0.29,eRank=114.0,q75/q25=3083.61 train_time:239939ms step_avg:74.98ms +[2025-09-02 04:27:33] [Rank 0] step:3201/10000 train_time:239953ms step_avg:74.96ms +[2025-09-02 04:27:33] [Rank 0] step:3201/10000 train_time:239953ms step_avg:74.96ms +[2025-09-02 04:27:35] [Rank 0] step:3221/10000 train_time:241426ms step_avg:74.95ms +[2025-09-02 04:27:35] [Rank 0] step:3221/10000 train_time:241426ms step_avg:74.95ms +[2025-09-02 04:27:36] [Rank 0] step:3241/10000 train_time:242963ms step_avg:74.97ms +[2025-09-02 04:27:36] [Rank 0] step:3241/10000 train_time:242963ms step_avg:74.97ms +[2025-09-02 04:27:38] [Rank 0] step:3261/10000 train_time:244502ms step_avg:74.98ms +[2025-09-02 04:27:38] [Rank 0] step:3261/10000 train_time:244502ms step_avg:74.98ms +[2025-09-02 04:27:39] [Rank 0] step:3281/10000 train_time:246043ms step_avg:74.99ms +[2025-09-02 04:27:39] [Rank 0] step:3281/10000 train_time:246043ms step_avg:74.99ms +[2025-09-02 04:27:41] [Rank 0] step:3301/10000 train_time:247583ms step_avg:75.00ms +[2025-09-02 04:27:41] [Rank 0] step:3301/10000 train_time:247583ms step_avg:75.00ms +[2025-09-02 04:27:42] [Rank 0] step:3321/10000 train_time:249123ms step_avg:75.01ms +[2025-09-02 04:27:42] [Rank 0] step:3321/10000 train_time:249123ms step_avg:75.01ms +[2025-09-02 04:27:44] [Rank 0] step:3341/10000 train_time:250665ms step_avg:75.03ms +[2025-09-02 04:27:44] [Rank 0] step:3341/10000 train_time:250665ms step_avg:75.03ms +[2025-09-02 04:27:45] [Rank 0] step:3361/10000 train_time:252206ms step_avg:75.04ms +[2025-09-02 04:27:45] [Rank 0] step:3361/10000 train_time:252206ms step_avg:75.04ms +[2025-09-02 04:27:47] [Rank 0] step:3381/10000 train_time:253828ms step_avg:75.07ms +[2025-09-02 04:27:47] [Rank 0] step:3381/10000 train_time:253828ms step_avg:75.07ms +[2025-09-02 04:27:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:27:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:28:00] [Rank 0] PRINT: step:3400/10000 val_loss:4.0841 svd_entropy: attn_qk:H=0.7514,top10E=0.28,eRank=153.3,q75/q25=60.39 attn_vo:H=0.8237,top10E=0.16,eRank=266.6,q75/q25=50.61 mlp_w1:H=0.8798,top10E=0.17,eRank=350.6,q75/q25=5.45 mlp_w2:H=0.9689,top10E=0.04,eRank=624.5,q75/q25=3.03 vo_prod:H=0.7105,top10E=0.29,eRank=116.9,q75/q25=2820.74 train_time:255449ms step_avg:75.13ms +[2025-09-02 04:28:00] [Rank 0] PRINT: step:3400/10000 val_loss:4.0841 svd_entropy: attn_qk:H=0.7514,top10E=0.28,eRank=153.3,q75/q25=60.39 attn_vo:H=0.8237,top10E=0.16,eRank=266.6,q75/q25=50.61 mlp_w1:H=0.8798,top10E=0.17,eRank=350.6,q75/q25=5.45 mlp_w2:H=0.9689,top10E=0.04,eRank=624.5,q75/q25=3.03 vo_prod:H=0.7105,top10E=0.29,eRank=116.9,q75/q25=2820.74 train_time:255449ms step_avg:75.13ms +[2025-09-02 04:28:00] [Rank 0] step:3401/10000 train_time:255462ms step_avg:75.11ms +[2025-09-02 04:28:00] [Rank 0] step:3401/10000 train_time:255462ms step_avg:75.11ms +[2025-09-02 04:28:02] [Rank 0] step:3421/10000 train_time:256923ms step_avg:75.10ms +[2025-09-02 04:28:02] [Rank 0] step:3421/10000 train_time:256923ms step_avg:75.10ms +[2025-09-02 04:28:04] [Rank 0] step:3441/10000 train_time:258460ms step_avg:75.11ms +[2025-09-02 04:28:04] [Rank 0] step:3441/10000 train_time:258460ms step_avg:75.11ms +[2025-09-02 04:28:05] [Rank 0] step:3461/10000 train_time:259998ms step_avg:75.12ms +[2025-09-02 04:28:05] [Rank 0] step:3461/10000 train_time:259998ms step_avg:75.12ms +[2025-09-02 04:28:07] [Rank 0] step:3481/10000 train_time:261536ms step_avg:75.13ms +[2025-09-02 04:28:07] [Rank 0] step:3481/10000 train_time:261536ms step_avg:75.13ms +[2025-09-02 04:28:08] [Rank 0] step:3501/10000 train_time:263077ms step_avg:75.14ms +[2025-09-02 04:28:08] [Rank 0] step:3501/10000 train_time:263077ms step_avg:75.14ms +[2025-09-02 04:28:10] [Rank 0] step:3521/10000 train_time:264617ms step_avg:75.15ms +[2025-09-02 04:28:10] [Rank 0] step:3521/10000 train_time:264617ms step_avg:75.15ms +[2025-09-02 04:28:11] [Rank 0] step:3541/10000 train_time:266155ms step_avg:75.16ms +[2025-09-02 04:28:11] [Rank 0] step:3541/10000 train_time:266155ms step_avg:75.16ms +[2025-09-02 04:28:13] [Rank 0] step:3561/10000 train_time:267694ms step_avg:75.17ms +[2025-09-02 04:28:13] [Rank 0] step:3561/10000 train_time:267694ms step_avg:75.17ms +[2025-09-02 04:28:14] [Rank 0] step:3581/10000 train_time:269233ms step_avg:75.18ms +[2025-09-02 04:28:14] [Rank 0] step:3581/10000 train_time:269233ms step_avg:75.18ms +[2025-09-02 04:28:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:28:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:28:28] [Rank 0] PRINT: step:3600/10000 val_loss:4.0700 svd_entropy: attn_qk:H=0.7536,top10E=0.28,eRank=155.2,q75/q25=60.51 attn_vo:H=0.8266,top10E=0.16,eRank=270.7,q75/q25=49.62 mlp_w1:H=0.8826,top10E=0.17,eRank=356.7,q75/q25=5.37 mlp_w2:H=0.9689,top10E=0.04,eRank=624.9,q75/q25=3.03 vo_prod:H=0.7142,top10E=0.28,eRank=119.6,q75/q25=2697.37 train_time:270851ms step_avg:75.24ms +[2025-09-02 04:28:28] [Rank 0] PRINT: step:3600/10000 val_loss:4.0700 svd_entropy: attn_qk:H=0.7536,top10E=0.28,eRank=155.2,q75/q25=60.51 attn_vo:H=0.8266,top10E=0.16,eRank=270.7,q75/q25=49.62 mlp_w1:H=0.8826,top10E=0.17,eRank=356.7,q75/q25=5.37 mlp_w2:H=0.9689,top10E=0.04,eRank=624.9,q75/q25=3.03 vo_prod:H=0.7142,top10E=0.28,eRank=119.6,q75/q25=2697.37 train_time:270851ms step_avg:75.24ms +[2025-09-02 04:28:28] [Rank 0] step:3601/10000 train_time:270864ms step_avg:75.22ms +[2025-09-02 04:28:28] [Rank 0] step:3601/10000 train_time:270864ms step_avg:75.22ms +[2025-09-02 04:28:29] [Rank 0] step:3621/10000 train_time:272352ms step_avg:75.21ms +[2025-09-02 04:28:29] [Rank 0] step:3621/10000 train_time:272352ms step_avg:75.21ms +[2025-09-02 04:28:31] [Rank 0] step:3641/10000 train_time:273888ms step_avg:75.22ms +[2025-09-02 04:28:31] [Rank 0] step:3641/10000 train_time:273888ms step_avg:75.22ms +[2025-09-02 04:28:32] [Rank 0] step:3661/10000 train_time:275425ms step_avg:75.23ms +[2025-09-02 04:28:32] [Rank 0] step:3661/10000 train_time:275425ms step_avg:75.23ms +[2025-09-02 04:28:34] [Rank 0] step:3681/10000 train_time:276963ms step_avg:75.24ms +[2025-09-02 04:28:34] [Rank 0] step:3681/10000 train_time:276963ms step_avg:75.24ms +[2025-09-02 04:28:35] [Rank 0] step:3701/10000 train_time:278501ms step_avg:75.25ms +[2025-09-02 04:28:35] [Rank 0] step:3701/10000 train_time:278501ms step_avg:75.25ms +[2025-09-02 04:28:37] [Rank 0] step:3721/10000 train_time:280068ms step_avg:75.27ms +[2025-09-02 04:28:37] [Rank 0] step:3721/10000 train_time:280068ms step_avg:75.27ms +[2025-09-02 04:28:39] [Rank 0] step:3741/10000 train_time:281644ms step_avg:75.29ms +[2025-09-02 04:28:39] [Rank 0] step:3741/10000 train_time:281644ms step_avg:75.29ms +[2025-09-02 04:28:40] [Rank 0] step:3761/10000 train_time:283219ms step_avg:75.30ms +[2025-09-02 04:28:40] [Rank 0] step:3761/10000 train_time:283219ms step_avg:75.30ms +[2025-09-02 04:28:42] [Rank 0] step:3781/10000 train_time:284796ms step_avg:75.32ms +[2025-09-02 04:28:42] [Rank 0] step:3781/10000 train_time:284796ms step_avg:75.32ms +[2025-09-02 04:28:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:28:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:28:55] [Rank 0] PRINT: step:3800/10000 val_loss:4.0203 svd_entropy: attn_qk:H=0.7553,top10E=0.28,eRank=156.8,q75/q25=60.42 attn_vo:H=0.8291,top10E=0.16,eRank=273.9,q75/q25=48.71 mlp_w1:H=0.8851,top10E=0.17,eRank=362.6,q75/q25=5.28 mlp_w2:H=0.9690,top10E=0.04,eRank=625.1,q75/q25=3.02 vo_prod:H=0.7169,top10E=0.28,eRank=121.8,q75/q25=2551.99 train_time:286452ms step_avg:75.38ms +[2025-09-02 04:28:55] [Rank 0] PRINT: step:3800/10000 val_loss:4.0203 svd_entropy: attn_qk:H=0.7553,top10E=0.28,eRank=156.8,q75/q25=60.42 attn_vo:H=0.8291,top10E=0.16,eRank=273.9,q75/q25=48.71 mlp_w1:H=0.8851,top10E=0.17,eRank=362.6,q75/q25=5.28 mlp_w2:H=0.9690,top10E=0.04,eRank=625.1,q75/q25=3.02 vo_prod:H=0.7169,top10E=0.28,eRank=121.8,q75/q25=2551.99 train_time:286452ms step_avg:75.38ms +[2025-09-02 04:28:55] [Rank 0] step:3801/10000 train_time:286466ms step_avg:75.37ms +[2025-09-02 04:28:55] [Rank 0] step:3801/10000 train_time:286466ms step_avg:75.37ms +[2025-09-02 04:28:57] [Rank 0] step:3821/10000 train_time:287970ms step_avg:75.37ms +[2025-09-02 04:28:57] [Rank 0] step:3821/10000 train_time:287970ms step_avg:75.37ms +[2025-09-02 04:28:58] [Rank 0] step:3841/10000 train_time:289548ms step_avg:75.38ms +[2025-09-02 04:28:58] [Rank 0] step:3841/10000 train_time:289548ms step_avg:75.38ms +[2025-09-02 04:29:00] [Rank 0] step:3861/10000 train_time:291122ms step_avg:75.40ms +[2025-09-02 04:29:00] [Rank 0] step:3861/10000 train_time:291122ms step_avg:75.40ms +[2025-09-02 04:29:02] [Rank 0] step:3881/10000 train_time:292698ms step_avg:75.42ms +[2025-09-02 04:29:02] [Rank 0] step:3881/10000 train_time:292698ms step_avg:75.42ms +[2025-09-02 04:29:03] [Rank 0] step:3901/10000 train_time:294273ms step_avg:75.44ms +[2025-09-02 04:29:03] [Rank 0] step:3901/10000 train_time:294273ms step_avg:75.44ms +[2025-09-02 04:29:05] [Rank 0] step:3921/10000 train_time:295847ms step_avg:75.45ms +[2025-09-02 04:29:05] [Rank 0] step:3921/10000 train_time:295847ms step_avg:75.45ms +[2025-09-02 04:29:06] [Rank 0] step:3941/10000 train_time:297421ms step_avg:75.47ms +[2025-09-02 04:29:06] [Rank 0] step:3941/10000 train_time:297421ms step_avg:75.47ms +[2025-09-02 04:29:08] [Rank 0] step:3961/10000 train_time:298995ms step_avg:75.48ms +[2025-09-02 04:29:08] [Rank 0] step:3961/10000 train_time:298995ms step_avg:75.48ms +[2025-09-02 04:29:09] [Rank 0] step:3981/10000 train_time:300571ms step_avg:75.50ms +[2025-09-02 04:29:09] [Rank 0] step:3981/10000 train_time:300571ms step_avg:75.50ms +[2025-09-02 04:29:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:29:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:29:23] [Rank 0] PRINT: step:4000/10000 val_loss:3.9939 svd_entropy: attn_qk:H=0.7572,top10E=0.28,eRank=158.6,q75/q25=60.45 attn_vo:H=0.8311,top10E=0.16,eRank=276.8,q75/q25=47.81 mlp_w1:H=0.8875,top10E=0.16,eRank=368.0,q75/q25=5.23 mlp_w2:H=0.9691,top10E=0.04,eRank=625.4,q75/q25=3.01 vo_prod:H=0.7195,top10E=0.28,eRank=124.0,q75/q25=2363.17 train_time:302226ms step_avg:75.56ms +[2025-09-02 04:29:23] [Rank 0] PRINT: step:4000/10000 val_loss:3.9939 svd_entropy: attn_qk:H=0.7572,top10E=0.28,eRank=158.6,q75/q25=60.45 attn_vo:H=0.8311,top10E=0.16,eRank=276.8,q75/q25=47.81 mlp_w1:H=0.8875,top10E=0.16,eRank=368.0,q75/q25=5.23 mlp_w2:H=0.9691,top10E=0.04,eRank=625.4,q75/q25=3.01 vo_prod:H=0.7195,top10E=0.28,eRank=124.0,q75/q25=2363.17 train_time:302226ms step_avg:75.56ms +[2025-09-02 04:29:23] [Rank 0] step:4001/10000 train_time:302240ms step_avg:75.54ms +[2025-09-02 04:29:23] [Rank 0] step:4001/10000 train_time:302240ms step_avg:75.54ms +[2025-09-02 04:29:25] [Rank 0] step:4021/10000 train_time:303738ms step_avg:75.54ms +[2025-09-02 04:29:25] [Rank 0] step:4021/10000 train_time:303738ms step_avg:75.54ms +[2025-09-02 04:29:26] [Rank 0] step:4041/10000 train_time:305313ms step_avg:75.55ms +[2025-09-02 04:29:26] [Rank 0] step:4041/10000 train_time:305313ms step_avg:75.55ms +[2025-09-02 04:29:28] [Rank 0] step:4061/10000 train_time:306888ms step_avg:75.57ms +[2025-09-02 04:29:28] [Rank 0] step:4061/10000 train_time:306888ms step_avg:75.57ms +[2025-09-02 04:29:30] [Rank 0] step:4081/10000 train_time:309045ms step_avg:75.73ms +[2025-09-02 04:29:30] [Rank 0] step:4081/10000 train_time:309045ms step_avg:75.73ms +[2025-09-02 04:29:31] [Rank 0] step:4101/10000 train_time:310623ms step_avg:75.74ms +[2025-09-02 04:29:31] [Rank 0] step:4101/10000 train_time:310623ms step_avg:75.74ms +[2025-09-02 04:29:33] [Rank 0] step:4121/10000 train_time:312195ms step_avg:75.76ms +[2025-09-02 04:29:33] [Rank 0] step:4121/10000 train_time:312195ms step_avg:75.76ms +[2025-09-02 04:29:35] [Rank 0] step:4141/10000 train_time:313771ms step_avg:75.77ms +[2025-09-02 04:29:35] [Rank 0] step:4141/10000 train_time:313771ms step_avg:75.77ms +[2025-09-02 04:29:36] [Rank 0] step:4161/10000 train_time:315345ms step_avg:75.79ms +[2025-09-02 04:29:36] [Rank 0] step:4161/10000 train_time:315345ms step_avg:75.79ms +[2025-09-02 04:29:38] [Rank 0] step:4181/10000 train_time:316924ms step_avg:75.80ms +[2025-09-02 04:29:38] [Rank 0] step:4181/10000 train_time:316924ms step_avg:75.80ms +[2025-09-02 04:29:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:29:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:29:51] [Rank 0] PRINT: step:4200/10000 val_loss:3.9755 svd_entropy: attn_qk:H=0.7592,top10E=0.27,eRank=160.5,q75/q25=60.45 attn_vo:H=0.8330,top10E=0.15,eRank=279.5,q75/q25=47.08 mlp_w1:H=0.8896,top10E=0.16,eRank=373.0,q75/q25=5.15 mlp_w2:H=0.9691,top10E=0.04,eRank=625.6,q75/q25=3.01 vo_prod:H=0.7219,top10E=0.27,eRank=125.9,q75/q25=2247.54 train_time:318581ms step_avg:75.85ms +[2025-09-02 04:29:51] [Rank 0] PRINT: step:4200/10000 val_loss:3.9755 svd_entropy: attn_qk:H=0.7592,top10E=0.27,eRank=160.5,q75/q25=60.45 attn_vo:H=0.8330,top10E=0.15,eRank=279.5,q75/q25=47.08 mlp_w1:H=0.8896,top10E=0.16,eRank=373.0,q75/q25=5.15 mlp_w2:H=0.9691,top10E=0.04,eRank=625.6,q75/q25=3.01 vo_prod:H=0.7219,top10E=0.27,eRank=125.9,q75/q25=2247.54 train_time:318581ms step_avg:75.85ms +[2025-09-02 04:29:51] [Rank 0] step:4201/10000 train_time:318595ms step_avg:75.84ms +[2025-09-02 04:29:51] [Rank 0] step:4201/10000 train_time:318595ms step_avg:75.84ms +[2025-09-02 04:29:53] [Rank 0] step:4221/10000 train_time:320124ms step_avg:75.84ms +[2025-09-02 04:29:53] [Rank 0] step:4221/10000 train_time:320124ms step_avg:75.84ms +[2025-09-02 04:29:54] [Rank 0] step:4241/10000 train_time:321701ms step_avg:75.86ms +[2025-09-02 04:29:54] [Rank 0] step:4241/10000 train_time:321701ms step_avg:75.86ms +[2025-09-02 04:29:56] [Rank 0] step:4261/10000 train_time:323277ms step_avg:75.87ms +[2025-09-02 04:29:56] [Rank 0] step:4261/10000 train_time:323277ms step_avg:75.87ms +[2025-09-02 04:29:58] [Rank 0] step:4281/10000 train_time:324852ms step_avg:75.88ms +[2025-09-02 04:29:58] [Rank 0] step:4281/10000 train_time:324852ms step_avg:75.88ms +[2025-09-02 04:29:59] [Rank 0] step:4301/10000 train_time:326429ms step_avg:75.90ms +[2025-09-02 04:29:59] [Rank 0] step:4301/10000 train_time:326429ms step_avg:75.90ms +[2025-09-02 04:30:01] [Rank 0] step:4321/10000 train_time:328008ms step_avg:75.91ms +[2025-09-02 04:30:01] [Rank 0] step:4321/10000 train_time:328008ms step_avg:75.91ms +[2025-09-02 04:30:02] [Rank 0] step:4341/10000 train_time:329584ms step_avg:75.92ms +[2025-09-02 04:30:02] [Rank 0] step:4341/10000 train_time:329584ms step_avg:75.92ms +[2025-09-02 04:30:04] [Rank 0] step:4361/10000 train_time:331163ms step_avg:75.94ms +[2025-09-02 04:30:04] [Rank 0] step:4361/10000 train_time:331163ms step_avg:75.94ms +[2025-09-02 04:30:06] [Rank 0] step:4381/10000 train_time:332740ms step_avg:75.95ms +[2025-09-02 04:30:06] [Rank 0] step:4381/10000 train_time:332740ms step_avg:75.95ms +[2025-09-02 04:30:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:30:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:30:19] [Rank 0] PRINT: step:4400/10000 val_loss:3.9506 svd_entropy: attn_qk:H=0.7607,top10E=0.27,eRank=161.9,q75/q25=60.26 attn_vo:H=0.8345,top10E=0.15,eRank=282.0,q75/q25=46.21 mlp_w1:H=0.8915,top10E=0.16,eRank=377.6,q75/q25=5.09 mlp_w2:H=0.9692,top10E=0.04,eRank=625.8,q75/q25=3.00 vo_prod:H=0.7240,top10E=0.27,eRank=127.8,q75/q25=2099.40 train_time:334399ms step_avg:76.00ms +[2025-09-02 04:30:19] [Rank 0] PRINT: step:4400/10000 val_loss:3.9506 svd_entropy: attn_qk:H=0.7607,top10E=0.27,eRank=161.9,q75/q25=60.26 attn_vo:H=0.8345,top10E=0.15,eRank=282.0,q75/q25=46.21 mlp_w1:H=0.8915,top10E=0.16,eRank=377.6,q75/q25=5.09 mlp_w2:H=0.9692,top10E=0.04,eRank=625.8,q75/q25=3.00 vo_prod:H=0.7240,top10E=0.27,eRank=127.8,q75/q25=2099.40 train_time:334399ms step_avg:76.00ms +[2025-09-02 04:30:19] [Rank 0] step:4401/10000 train_time:334412ms step_avg:75.99ms +[2025-09-02 04:30:19] [Rank 0] step:4401/10000 train_time:334412ms step_avg:75.99ms +[2025-09-02 04:30:21] [Rank 0] step:4421/10000 train_time:335924ms step_avg:75.98ms +[2025-09-02 04:30:21] [Rank 0] step:4421/10000 train_time:335924ms step_avg:75.98ms +[2025-09-02 04:30:22] [Rank 0] step:4441/10000 train_time:337498ms step_avg:76.00ms +[2025-09-02 04:30:22] [Rank 0] step:4441/10000 train_time:337498ms step_avg:76.00ms +[2025-09-02 04:30:24] [Rank 0] step:4461/10000 train_time:339077ms step_avg:76.01ms +[2025-09-02 04:30:24] [Rank 0] step:4461/10000 train_time:339077ms step_avg:76.01ms +[2025-09-02 04:30:25] [Rank 0] step:4481/10000 train_time:340660ms step_avg:76.02ms +[2025-09-02 04:30:25] [Rank 0] step:4481/10000 train_time:340660ms step_avg:76.02ms +[2025-09-02 04:30:27] [Rank 0] step:4501/10000 train_time:342241ms step_avg:76.04ms +[2025-09-02 04:30:27] [Rank 0] step:4501/10000 train_time:342241ms step_avg:76.04ms +[2025-09-02 04:30:29] [Rank 0] step:4521/10000 train_time:343820ms step_avg:76.05ms +[2025-09-02 04:30:29] [Rank 0] step:4521/10000 train_time:343820ms step_avg:76.05ms +[2025-09-02 04:30:30] [Rank 0] step:4541/10000 train_time:345402ms step_avg:76.06ms +[2025-09-02 04:30:30] [Rank 0] step:4541/10000 train_time:345402ms step_avg:76.06ms +[2025-09-02 04:30:32] [Rank 0] step:4561/10000 train_time:346988ms step_avg:76.08ms +[2025-09-02 04:30:32] [Rank 0] step:4561/10000 train_time:346988ms step_avg:76.08ms +[2025-09-02 04:30:33] [Rank 0] step:4581/10000 train_time:348569ms step_avg:76.09ms +[2025-09-02 04:30:33] [Rank 0] step:4581/10000 train_time:348569ms step_avg:76.09ms +[2025-09-02 04:30:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:30:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:30:47] [Rank 0] PRINT: step:4600/10000 val_loss:3.9252 svd_entropy: attn_qk:H=0.7622,top10E=0.27,eRank=163.4,q75/q25=60.16 attn_vo:H=0.8360,top10E=0.15,eRank=284.2,q75/q25=45.28 mlp_w1:H=0.8934,top10E=0.16,eRank=382.2,q75/q25=5.04 mlp_w2:H=0.9692,top10E=0.04,eRank=625.9,q75/q25=3.00 vo_prod:H=0.7256,top10E=0.27,eRank=129.2,q75/q25=1985.49 train_time:350231ms step_avg:76.14ms +[2025-09-02 04:30:47] [Rank 0] PRINT: step:4600/10000 val_loss:3.9252 svd_entropy: attn_qk:H=0.7622,top10E=0.27,eRank=163.4,q75/q25=60.16 attn_vo:H=0.8360,top10E=0.15,eRank=284.2,q75/q25=45.28 mlp_w1:H=0.8934,top10E=0.16,eRank=382.2,q75/q25=5.04 mlp_w2:H=0.9692,top10E=0.04,eRank=625.9,q75/q25=3.00 vo_prod:H=0.7256,top10E=0.27,eRank=129.2,q75/q25=1985.49 train_time:350231ms step_avg:76.14ms +[2025-09-02 04:30:47] [Rank 0] step:4601/10000 train_time:350245ms step_avg:76.12ms +[2025-09-02 04:30:47] [Rank 0] step:4601/10000 train_time:350245ms step_avg:76.12ms +[2025-09-02 04:30:48] [Rank 0] step:4621/10000 train_time:351747ms step_avg:76.12ms +[2025-09-02 04:30:48] [Rank 0] step:4621/10000 train_time:351747ms step_avg:76.12ms +[2025-09-02 04:30:50] [Rank 0] step:4641/10000 train_time:353326ms step_avg:76.13ms +[2025-09-02 04:30:50] [Rank 0] step:4641/10000 train_time:353326ms step_avg:76.13ms +[2025-09-02 04:30:52] [Rank 0] step:4661/10000 train_time:354906ms step_avg:76.14ms +[2025-09-02 04:30:52] [Rank 0] step:4661/10000 train_time:354906ms step_avg:76.14ms +[2025-09-02 04:30:53] [Rank 0] step:4681/10000 train_time:356490ms step_avg:76.16ms +[2025-09-02 04:30:53] [Rank 0] step:4681/10000 train_time:356490ms step_avg:76.16ms +[2025-09-02 04:30:55] [Rank 0] step:4701/10000 train_time:358070ms step_avg:76.17ms +[2025-09-02 04:30:55] [Rank 0] step:4701/10000 train_time:358070ms step_avg:76.17ms +[2025-09-02 04:30:56] [Rank 0] step:4721/10000 train_time:359688ms step_avg:76.19ms +[2025-09-02 04:30:56] [Rank 0] step:4721/10000 train_time:359688ms step_avg:76.19ms +[2025-09-02 04:30:58] [Rank 0] step:4741/10000 train_time:361270ms step_avg:76.20ms +[2025-09-02 04:30:58] [Rank 0] step:4741/10000 train_time:361270ms step_avg:76.20ms +[2025-09-02 04:30:59] [Rank 0] step:4761/10000 train_time:362852ms step_avg:76.21ms +[2025-09-02 04:30:59] [Rank 0] step:4761/10000 train_time:362852ms step_avg:76.21ms +[2025-09-02 04:31:01] [Rank 0] step:4781/10000 train_time:364432ms step_avg:76.23ms +[2025-09-02 04:31:01] [Rank 0] step:4781/10000 train_time:364432ms step_avg:76.23ms +[2025-09-02 04:31:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:31:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:31:14] [Rank 0] PRINT: step:4800/10000 val_loss:3.9108 svd_entropy: attn_qk:H=0.7636,top10E=0.27,eRank=164.8,q75/q25=59.80 attn_vo:H=0.8375,top10E=0.15,eRank=286.5,q75/q25=44.18 mlp_w1:H=0.8950,top10E=0.15,eRank=386.2,q75/q25=4.98 mlp_w2:H=0.9692,top10E=0.04,eRank=626.1,q75/q25=3.00 vo_prod:H=0.7278,top10E=0.27,eRank=130.9,q75/q25=1842.54 train_time:366096ms step_avg:76.27ms +[2025-09-02 04:31:14] [Rank 0] PRINT: step:4800/10000 val_loss:3.9108 svd_entropy: attn_qk:H=0.7636,top10E=0.27,eRank=164.8,q75/q25=59.80 attn_vo:H=0.8375,top10E=0.15,eRank=286.5,q75/q25=44.18 mlp_w1:H=0.8950,top10E=0.15,eRank=386.2,q75/q25=4.98 mlp_w2:H=0.9692,top10E=0.04,eRank=626.1,q75/q25=3.00 vo_prod:H=0.7278,top10E=0.27,eRank=130.9,q75/q25=1842.54 train_time:366096ms step_avg:76.27ms +[2025-09-02 04:31:14] [Rank 0] step:4801/10000 train_time:366110ms step_avg:76.26ms +[2025-09-02 04:31:14] [Rank 0] step:4801/10000 train_time:366110ms step_avg:76.26ms +[2025-09-02 04:31:16] [Rank 0] step:4821/10000 train_time:367620ms step_avg:76.25ms +[2025-09-02 04:31:16] [Rank 0] step:4821/10000 train_time:367620ms step_avg:76.25ms +[2025-09-02 04:31:18] [Rank 0] step:4841/10000 train_time:369200ms step_avg:76.27ms +[2025-09-02 04:31:18] [Rank 0] step:4841/10000 train_time:369200ms step_avg:76.27ms +[2025-09-02 04:31:19] [Rank 0] step:4861/10000 train_time:370782ms step_avg:76.28ms +[2025-09-02 04:31:19] [Rank 0] step:4861/10000 train_time:370782ms step_avg:76.28ms +[2025-09-02 04:31:21] [Rank 0] step:4881/10000 train_time:372361ms step_avg:76.29ms +[2025-09-02 04:31:21] [Rank 0] step:4881/10000 train_time:372361ms step_avg:76.29ms +[2025-09-02 04:31:22] [Rank 0] step:4901/10000 train_time:373941ms step_avg:76.30ms +[2025-09-02 04:31:22] [Rank 0] step:4901/10000 train_time:373941ms step_avg:76.30ms +[2025-09-02 04:31:24] [Rank 0] step:4921/10000 train_time:375523ms step_avg:76.31ms +[2025-09-02 04:31:24] [Rank 0] step:4921/10000 train_time:375523ms step_avg:76.31ms +[2025-09-02 04:31:25] [Rank 0] step:4941/10000 train_time:377107ms step_avg:76.32ms +[2025-09-02 04:31:25] [Rank 0] step:4941/10000 train_time:377107ms step_avg:76.32ms +[2025-09-02 04:31:27] [Rank 0] step:4961/10000 train_time:378689ms step_avg:76.33ms +[2025-09-02 04:31:27] [Rank 0] step:4961/10000 train_time:378689ms step_avg:76.33ms +[2025-09-02 04:31:29] [Rank 0] step:4981/10000 train_time:380270ms step_avg:76.34ms +[2025-09-02 04:31:29] [Rank 0] step:4981/10000 train_time:380270ms step_avg:76.34ms +[2025-09-02 04:31:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:31:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:31:42] [Rank 0] PRINT: step:5000/10000 val_loss:3.8918 svd_entropy: attn_qk:H=0.7650,top10E=0.27,eRank=166.2,q75/q25=59.48 attn_vo:H=0.8388,top10E=0.15,eRank=288.6,q75/q25=43.22 mlp_w1:H=0.8965,top10E=0.15,eRank=389.9,q75/q25=4.94 mlp_w2:H=0.9693,top10E=0.04,eRank=626.2,q75/q25=2.99 vo_prod:H=0.7291,top10E=0.27,eRank=132.2,q75/q25=1695.73 train_time:381933ms step_avg:76.39ms +[2025-09-02 04:31:42] [Rank 0] PRINT: step:5000/10000 val_loss:3.8918 svd_entropy: attn_qk:H=0.7650,top10E=0.27,eRank=166.2,q75/q25=59.48 attn_vo:H=0.8388,top10E=0.15,eRank=288.6,q75/q25=43.22 mlp_w1:H=0.8965,top10E=0.15,eRank=389.9,q75/q25=4.94 mlp_w2:H=0.9693,top10E=0.04,eRank=626.2,q75/q25=2.99 vo_prod:H=0.7291,top10E=0.27,eRank=132.2,q75/q25=1695.73 train_time:381933ms step_avg:76.39ms +[2025-09-02 04:31:42] [Rank 0] step:5001/10000 train_time:381947ms step_avg:76.37ms +[2025-09-02 04:31:42] [Rank 0] step:5001/10000 train_time:381947ms step_avg:76.37ms +[2025-09-02 04:31:44] [Rank 0] step:5021/10000 train_time:383461ms step_avg:76.37ms +[2025-09-02 04:31:44] [Rank 0] step:5021/10000 train_time:383461ms step_avg:76.37ms +[2025-09-02 04:31:45] [Rank 0] step:5041/10000 train_time:385042ms step_avg:76.38ms +[2025-09-02 04:31:45] [Rank 0] step:5041/10000 train_time:385042ms step_avg:76.38ms +[2025-09-02 04:31:47] [Rank 0] step:5061/10000 train_time:386623ms step_avg:76.39ms +[2025-09-02 04:31:47] [Rank 0] step:5061/10000 train_time:386623ms step_avg:76.39ms +[2025-09-02 04:31:49] [Rank 0] step:5081/10000 train_time:388207ms step_avg:76.40ms +[2025-09-02 04:31:49] [Rank 0] step:5081/10000 train_time:388207ms step_avg:76.40ms +[2025-09-02 04:31:50] [Rank 0] step:5101/10000 train_time:389789ms step_avg:76.41ms +[2025-09-02 04:31:50] [Rank 0] step:5101/10000 train_time:389789ms step_avg:76.41ms +[2025-09-02 04:31:52] [Rank 0] step:5121/10000 train_time:391371ms step_avg:76.42ms +[2025-09-02 04:31:52] [Rank 0] step:5121/10000 train_time:391371ms step_avg:76.42ms +[2025-09-02 04:31:53] [Rank 0] step:5141/10000 train_time:392959ms step_avg:76.44ms +[2025-09-02 04:31:53] [Rank 0] step:5141/10000 train_time:392959ms step_avg:76.44ms +[2025-09-02 04:31:55] [Rank 0] step:5161/10000 train_time:394541ms step_avg:76.45ms +[2025-09-02 04:31:55] [Rank 0] step:5161/10000 train_time:394541ms step_avg:76.45ms +[2025-09-02 04:31:56] [Rank 0] step:5181/10000 train_time:396126ms step_avg:76.46ms +[2025-09-02 04:31:56] [Rank 0] step:5181/10000 train_time:396126ms step_avg:76.46ms +[2025-09-02 04:31:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:31:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:32:10] [Rank 0] PRINT: step:5200/10000 val_loss:3.8740 svd_entropy: attn_qk:H=0.7662,top10E=0.27,eRank=167.4,q75/q25=59.13 attn_vo:H=0.8400,top10E=0.15,eRank=290.5,q75/q25=42.63 mlp_w1:H=0.8979,top10E=0.15,eRank=393.4,q75/q25=4.89 mlp_w2:H=0.9693,top10E=0.04,eRank=626.4,q75/q25=2.99 vo_prod:H=0.7306,top10E=0.26,eRank=133.6,q75/q25=1636.86 train_time:397816ms step_avg:76.50ms +[2025-09-02 04:32:10] [Rank 0] PRINT: step:5200/10000 val_loss:3.8740 svd_entropy: attn_qk:H=0.7662,top10E=0.27,eRank=167.4,q75/q25=59.13 attn_vo:H=0.8400,top10E=0.15,eRank=290.5,q75/q25=42.63 mlp_w1:H=0.8979,top10E=0.15,eRank=393.4,q75/q25=4.89 mlp_w2:H=0.9693,top10E=0.04,eRank=626.4,q75/q25=2.99 vo_prod:H=0.7306,top10E=0.26,eRank=133.6,q75/q25=1636.86 train_time:397816ms step_avg:76.50ms +[2025-09-02 04:32:10] [Rank 0] step:5201/10000 train_time:397830ms step_avg:76.49ms +[2025-09-02 04:32:10] [Rank 0] step:5201/10000 train_time:397830ms step_avg:76.49ms +[2025-09-02 04:32:12] [Rank 0] step:5221/10000 train_time:399372ms step_avg:76.49ms +[2025-09-02 04:32:12] [Rank 0] step:5221/10000 train_time:399372ms step_avg:76.49ms +[2025-09-02 04:32:13] [Rank 0] step:5241/10000 train_time:400984ms step_avg:76.51ms +[2025-09-02 04:32:13] [Rank 0] step:5241/10000 train_time:400984ms step_avg:76.51ms +[2025-09-02 04:32:15] [Rank 0] step:5261/10000 train_time:402599ms step_avg:76.53ms +[2025-09-02 04:32:15] [Rank 0] step:5261/10000 train_time:402599ms step_avg:76.53ms +[2025-09-02 04:32:16] [Rank 0] step:5281/10000 train_time:404210ms step_avg:76.54ms +[2025-09-02 04:32:16] [Rank 0] step:5281/10000 train_time:404210ms step_avg:76.54ms +[2025-09-02 04:32:18] [Rank 0] step:5301/10000 train_time:405834ms step_avg:76.56ms +[2025-09-02 04:32:18] [Rank 0] step:5301/10000 train_time:405834ms step_avg:76.56ms +[2025-09-02 04:32:20] [Rank 0] step:5321/10000 train_time:407447ms step_avg:76.57ms +[2025-09-02 04:32:20] [Rank 0] step:5321/10000 train_time:407447ms step_avg:76.57ms +[2025-09-02 04:32:21] [Rank 0] step:5341/10000 train_time:409060ms step_avg:76.59ms +[2025-09-02 04:32:21] [Rank 0] step:5341/10000 train_time:409060ms step_avg:76.59ms +[2025-09-02 04:32:23] [Rank 0] step:5361/10000 train_time:410678ms step_avg:76.60ms +[2025-09-02 04:32:23] [Rank 0] step:5361/10000 train_time:410678ms step_avg:76.60ms +[2025-09-02 04:32:24] [Rank 0] step:5381/10000 train_time:412295ms step_avg:76.62ms +[2025-09-02 04:32:24] [Rank 0] step:5381/10000 train_time:412295ms step_avg:76.62ms +[2025-09-02 04:32:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:32:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:32:38] [Rank 0] PRINT: step:5400/10000 val_loss:3.8565 svd_entropy: attn_qk:H=0.7672,top10E=0.27,eRank=168.4,q75/q25=58.85 attn_vo:H=0.8413,top10E=0.14,eRank=292.5,q75/q25=41.68 mlp_w1:H=0.8993,top10E=0.15,eRank=396.8,q75/q25=4.85 mlp_w2:H=0.9693,top10E=0.04,eRank=626.4,q75/q25=2.99 vo_prod:H=0.7326,top10E=0.26,eRank=135.2,q75/q25=1562.61 train_time:413993ms step_avg:76.67ms +[2025-09-02 04:32:38] [Rank 0] PRINT: step:5400/10000 val_loss:3.8565 svd_entropy: attn_qk:H=0.7672,top10E=0.27,eRank=168.4,q75/q25=58.85 attn_vo:H=0.8413,top10E=0.14,eRank=292.5,q75/q25=41.68 mlp_w1:H=0.8993,top10E=0.15,eRank=396.8,q75/q25=4.85 mlp_w2:H=0.9693,top10E=0.04,eRank=626.4,q75/q25=2.99 vo_prod:H=0.7326,top10E=0.26,eRank=135.2,q75/q25=1562.61 train_time:413993ms step_avg:76.67ms +[2025-09-02 04:32:38] [Rank 0] step:5401/10000 train_time:414006ms step_avg:76.65ms +[2025-09-02 04:32:38] [Rank 0] step:5401/10000 train_time:414006ms step_avg:76.65ms +[2025-09-02 04:32:40] [Rank 0] step:5421/10000 train_time:415541ms step_avg:76.65ms +[2025-09-02 04:32:40] [Rank 0] step:5421/10000 train_time:415541ms step_avg:76.65ms +[2025-09-02 04:32:41] [Rank 0] step:5441/10000 train_time:417152ms step_avg:76.67ms +[2025-09-02 04:32:41] [Rank 0] step:5441/10000 train_time:417152ms step_avg:76.67ms +[2025-09-02 04:32:43] [Rank 0] step:5461/10000 train_time:418775ms step_avg:76.68ms +[2025-09-02 04:32:43] [Rank 0] step:5461/10000 train_time:418775ms step_avg:76.68ms +[2025-09-02 04:32:44] [Rank 0] step:5481/10000 train_time:420392ms step_avg:76.70ms +[2025-09-02 04:32:44] [Rank 0] step:5481/10000 train_time:420392ms step_avg:76.70ms +[2025-09-02 04:32:46] [Rank 0] step:5501/10000 train_time:422011ms step_avg:76.72ms +[2025-09-02 04:32:46] [Rank 0] step:5501/10000 train_time:422011ms step_avg:76.72ms +[2025-09-02 04:32:48] [Rank 0] step:5521/10000 train_time:423634ms step_avg:76.73ms +[2025-09-02 04:32:48] [Rank 0] step:5521/10000 train_time:423634ms step_avg:76.73ms +[2025-09-02 04:32:49] [Rank 0] step:5541/10000 train_time:425250ms step_avg:76.75ms +[2025-09-02 04:32:49] [Rank 0] step:5541/10000 train_time:425250ms step_avg:76.75ms +[2025-09-02 04:32:51] [Rank 0] step:5561/10000 train_time:426870ms step_avg:76.76ms +[2025-09-02 04:32:51] [Rank 0] step:5561/10000 train_time:426870ms step_avg:76.76ms +[2025-09-02 04:32:53] [Rank 0] step:5581/10000 train_time:428487ms step_avg:76.78ms +[2025-09-02 04:32:53] [Rank 0] step:5581/10000 train_time:428487ms step_avg:76.78ms +[2025-09-02 04:32:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:32:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:33:06] [Rank 0] PRINT: step:5600/10000 val_loss:3.8437 svd_entropy: attn_qk:H=0.7682,top10E=0.27,eRank=169.4,q75/q25=58.43 attn_vo:H=0.8423,top10E=0.14,eRank=294.1,q75/q25=41.00 mlp_w1:H=0.9004,top10E=0.15,eRank=399.8,q75/q25=4.82 mlp_w2:H=0.9693,top10E=0.04,eRank=626.5,q75/q25=2.99 vo_prod:H=0.7337,top10E=0.26,eRank=136.2,q75/q25=1468.60 train_time:430188ms step_avg:76.82ms +[2025-09-02 04:33:06] [Rank 0] PRINT: step:5600/10000 val_loss:3.8437 svd_entropy: attn_qk:H=0.7682,top10E=0.27,eRank=169.4,q75/q25=58.43 attn_vo:H=0.8423,top10E=0.14,eRank=294.1,q75/q25=41.00 mlp_w1:H=0.9004,top10E=0.15,eRank=399.8,q75/q25=4.82 mlp_w2:H=0.9693,top10E=0.04,eRank=626.5,q75/q25=2.99 vo_prod:H=0.7337,top10E=0.26,eRank=136.2,q75/q25=1468.60 train_time:430188ms step_avg:76.82ms +[2025-09-02 04:33:06] [Rank 0] step:5601/10000 train_time:430201ms step_avg:76.81ms +[2025-09-02 04:33:06] [Rank 0] step:5601/10000 train_time:430201ms step_avg:76.81ms +[2025-09-02 04:33:08] [Rank 0] step:5621/10000 train_time:431751ms step_avg:76.81ms +[2025-09-02 04:33:08] [Rank 0] step:5621/10000 train_time:431751ms step_avg:76.81ms +[2025-09-02 04:33:09] [Rank 0] step:5641/10000 train_time:433362ms step_avg:76.82ms +[2025-09-02 04:33:09] [Rank 0] step:5641/10000 train_time:433362ms step_avg:76.82ms +[2025-09-02 04:33:11] [Rank 0] step:5661/10000 train_time:434972ms step_avg:76.84ms +[2025-09-02 04:33:11] [Rank 0] step:5661/10000 train_time:434972ms step_avg:76.84ms +[2025-09-02 04:33:13] [Rank 0] step:5681/10000 train_time:436587ms step_avg:76.85ms +[2025-09-02 04:33:13] [Rank 0] step:5681/10000 train_time:436587ms step_avg:76.85ms +[2025-09-02 04:33:14] [Rank 0] step:5701/10000 train_time:438199ms step_avg:76.86ms +[2025-09-02 04:33:14] [Rank 0] step:5701/10000 train_time:438199ms step_avg:76.86ms +[2025-09-02 04:33:16] [Rank 0] step:5721/10000 train_time:439817ms step_avg:76.88ms +[2025-09-02 04:33:16] [Rank 0] step:5721/10000 train_time:439817ms step_avg:76.88ms +[2025-09-02 04:33:17] [Rank 0] step:5741/10000 train_time:441432ms step_avg:76.89ms +[2025-09-02 04:33:17] [Rank 0] step:5741/10000 train_time:441432ms step_avg:76.89ms +[2025-09-02 04:33:19] [Rank 0] step:5761/10000 train_time:443047ms step_avg:76.90ms +[2025-09-02 04:33:19] [Rank 0] step:5761/10000 train_time:443047ms step_avg:76.90ms +[2025-09-02 04:33:21] [Rank 0] step:5781/10000 train_time:444662ms step_avg:76.92ms +[2025-09-02 04:33:21] [Rank 0] step:5781/10000 train_time:444662ms step_avg:76.92ms +[2025-09-02 04:33:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:33:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:33:34] [Rank 0] PRINT: step:5800/10000 val_loss:3.8357 svd_entropy: attn_qk:H=0.7693,top10E=0.26,eRank=170.6,q75/q25=58.37 attn_vo:H=0.8434,top10E=0.14,eRank=295.9,q75/q25=40.41 mlp_w1:H=0.9016,top10E=0.15,eRank=402.7,q75/q25=4.77 mlp_w2:H=0.9693,top10E=0.04,eRank=626.6,q75/q25=2.98 vo_prod:H=0.7351,top10E=0.26,eRank=137.5,q75/q25=1385.86 train_time:446356ms step_avg:76.96ms +[2025-09-02 04:33:34] [Rank 0] PRINT: step:5800/10000 val_loss:3.8357 svd_entropy: attn_qk:H=0.7693,top10E=0.26,eRank=170.6,q75/q25=58.37 attn_vo:H=0.8434,top10E=0.14,eRank=295.9,q75/q25=40.41 mlp_w1:H=0.9016,top10E=0.15,eRank=402.7,q75/q25=4.77 mlp_w2:H=0.9693,top10E=0.04,eRank=626.6,q75/q25=2.98 vo_prod:H=0.7351,top10E=0.26,eRank=137.5,q75/q25=1385.86 train_time:446356ms step_avg:76.96ms +[2025-09-02 04:33:34] [Rank 0] step:5801/10000 train_time:446369ms step_avg:76.95ms +[2025-09-02 04:33:34] [Rank 0] step:5801/10000 train_time:446369ms step_avg:76.95ms +[2025-09-02 04:33:36] [Rank 0] step:5821/10000 train_time:447905ms step_avg:76.95ms +[2025-09-02 04:33:36] [Rank 0] step:5821/10000 train_time:447905ms step_avg:76.95ms +[2025-09-02 04:33:38] [Rank 0] step:5841/10000 train_time:449517ms step_avg:76.96ms +[2025-09-02 04:33:38] [Rank 0] step:5841/10000 train_time:449517ms step_avg:76.96ms +[2025-09-02 04:33:39] [Rank 0] step:5861/10000 train_time:451133ms step_avg:76.97ms +[2025-09-02 04:33:39] [Rank 0] step:5861/10000 train_time:451133ms step_avg:76.97ms +[2025-09-02 04:33:41] [Rank 0] step:5881/10000 train_time:452746ms step_avg:76.98ms +[2025-09-02 04:33:41] [Rank 0] step:5881/10000 train_time:452746ms step_avg:76.98ms +[2025-09-02 04:33:42] [Rank 0] step:5901/10000 train_time:454358ms step_avg:77.00ms +[2025-09-02 04:33:42] [Rank 0] step:5901/10000 train_time:454358ms step_avg:77.00ms +[2025-09-02 04:33:44] [Rank 0] step:5921/10000 train_time:455975ms step_avg:77.01ms +[2025-09-02 04:33:44] [Rank 0] step:5921/10000 train_time:455975ms step_avg:77.01ms +[2025-09-02 04:33:46] [Rank 0] step:5941/10000 train_time:457593ms step_avg:77.02ms +[2025-09-02 04:33:46] [Rank 0] step:5941/10000 train_time:457593ms step_avg:77.02ms +[2025-09-02 04:33:47] [Rank 0] step:5961/10000 train_time:459214ms step_avg:77.04ms +[2025-09-02 04:33:47] [Rank 0] step:5961/10000 train_time:459214ms step_avg:77.04ms +[2025-09-02 04:33:49] [Rank 0] step:5981/10000 train_time:460828ms step_avg:77.05ms +[2025-09-02 04:33:49] [Rank 0] step:5981/10000 train_time:460828ms step_avg:77.05ms +[2025-09-02 04:33:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:33:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:34:02] [Rank 0] PRINT: step:6000/10000 val_loss:3.8118 svd_entropy: attn_qk:H=0.7703,top10E=0.26,eRank=171.6,q75/q25=57.81 attn_vo:H=0.8444,top10E=0.14,eRank=297.5,q75/q25=39.74 mlp_w1:H=0.9027,top10E=0.15,eRank=405.5,q75/q25=4.74 mlp_w2:H=0.9694,top10E=0.04,eRank=626.6,q75/q25=2.98 vo_prod:H=0.7362,top10E=0.26,eRank=138.5,q75/q25=1286.91 train_time:462523ms step_avg:77.09ms +[2025-09-02 04:34:02] [Rank 0] PRINT: step:6000/10000 val_loss:3.8118 svd_entropy: attn_qk:H=0.7703,top10E=0.26,eRank=171.6,q75/q25=57.81 attn_vo:H=0.8444,top10E=0.14,eRank=297.5,q75/q25=39.74 mlp_w1:H=0.9027,top10E=0.15,eRank=405.5,q75/q25=4.74 mlp_w2:H=0.9694,top10E=0.04,eRank=626.6,q75/q25=2.98 vo_prod:H=0.7362,top10E=0.26,eRank=138.5,q75/q25=1286.91 train_time:462523ms step_avg:77.09ms +[2025-09-02 04:34:02] [Rank 0] step:6001/10000 train_time:462537ms step_avg:77.08ms +[2025-09-02 04:34:02] [Rank 0] step:6001/10000 train_time:462537ms step_avg:77.08ms +[2025-09-02 04:34:04] [Rank 0] step:6021/10000 train_time:464096ms step_avg:77.08ms +[2025-09-02 04:34:04] [Rank 0] step:6021/10000 train_time:464096ms step_avg:77.08ms +[2025-09-02 04:34:06] [Rank 0] step:6041/10000 train_time:465715ms step_avg:77.09ms +[2025-09-02 04:34:06] [Rank 0] step:6041/10000 train_time:465715ms step_avg:77.09ms +[2025-09-02 04:34:07] [Rank 0] step:6061/10000 train_time:467342ms step_avg:77.11ms +[2025-09-02 04:34:07] [Rank 0] step:6061/10000 train_time:467342ms step_avg:77.11ms +[2025-09-02 04:34:09] [Rank 0] step:6081/10000 train_time:468964ms step_avg:77.12ms +[2025-09-02 04:34:09] [Rank 0] step:6081/10000 train_time:468964ms step_avg:77.12ms +[2025-09-02 04:34:11] [Rank 0] step:6101/10000 train_time:470587ms step_avg:77.13ms +[2025-09-02 04:34:11] [Rank 0] step:6101/10000 train_time:470587ms step_avg:77.13ms +[2025-09-02 04:34:13] [Rank 0] step:6121/10000 train_time:473007ms step_avg:77.28ms +[2025-09-02 04:34:13] [Rank 0] step:6121/10000 train_time:473007ms step_avg:77.28ms +[2025-09-02 04:34:15] [Rank 0] step:6141/10000 train_time:474634ms step_avg:77.29ms +[2025-09-02 04:34:15] [Rank 0] step:6141/10000 train_time:474634ms step_avg:77.29ms +[2025-09-02 04:34:16] [Rank 0] step:6161/10000 train_time:476257ms step_avg:77.30ms +[2025-09-02 04:34:16] [Rank 0] step:6161/10000 train_time:476257ms step_avg:77.30ms +[2025-09-02 04:34:18] [Rank 0] step:6181/10000 train_time:477873ms step_avg:77.31ms +[2025-09-02 04:34:18] [Rank 0] step:6181/10000 train_time:477873ms step_avg:77.31ms +[2025-09-02 04:34:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:34:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:34:31] [Rank 0] PRINT: step:6200/10000 val_loss:3.7977 svd_entropy: attn_qk:H=0.7713,top10E=0.26,eRank=172.6,q75/q25=57.61 attn_vo:H=0.8453,top10E=0.14,eRank=298.9,q75/q25=38.85 mlp_w1:H=0.9037,top10E=0.14,eRank=408.1,q75/q25=4.70 mlp_w2:H=0.9694,top10E=0.04,eRank=626.6,q75/q25=2.98 vo_prod:H=0.7372,top10E=0.26,eRank=139.4,q75/q25=1236.14 train_time:479577ms step_avg:77.35ms +[2025-09-02 04:34:31] [Rank 0] PRINT: step:6200/10000 val_loss:3.7977 svd_entropy: attn_qk:H=0.7713,top10E=0.26,eRank=172.6,q75/q25=57.61 attn_vo:H=0.8453,top10E=0.14,eRank=298.9,q75/q25=38.85 mlp_w1:H=0.9037,top10E=0.14,eRank=408.1,q75/q25=4.70 mlp_w2:H=0.9694,top10E=0.04,eRank=626.6,q75/q25=2.98 vo_prod:H=0.7372,top10E=0.26,eRank=139.4,q75/q25=1236.14 train_time:479577ms step_avg:77.35ms +[2025-09-02 04:34:32] [Rank 0] step:6201/10000 train_time:479590ms step_avg:77.34ms +[2025-09-02 04:34:32] [Rank 0] step:6201/10000 train_time:479590ms step_avg:77.34ms +[2025-09-02 04:34:33] [Rank 0] step:6221/10000 train_time:481135ms step_avg:77.34ms +[2025-09-02 04:34:33] [Rank 0] step:6221/10000 train_time:481135ms step_avg:77.34ms +[2025-09-02 04:34:35] [Rank 0] step:6241/10000 train_time:482748ms step_avg:77.35ms +[2025-09-02 04:34:35] [Rank 0] step:6241/10000 train_time:482748ms step_avg:77.35ms +[2025-09-02 04:34:36] [Rank 0] step:6261/10000 train_time:484365ms step_avg:77.36ms +[2025-09-02 04:34:36] [Rank 0] step:6261/10000 train_time:484365ms step_avg:77.36ms +[2025-09-02 04:34:38] [Rank 0] step:6281/10000 train_time:485985ms step_avg:77.37ms +[2025-09-02 04:34:38] [Rank 0] step:6281/10000 train_time:485985ms step_avg:77.37ms +[2025-09-02 04:34:40] [Rank 0] step:6301/10000 train_time:487607ms step_avg:77.39ms +[2025-09-02 04:34:40] [Rank 0] step:6301/10000 train_time:487607ms step_avg:77.39ms +[2025-09-02 04:34:41] [Rank 0] step:6321/10000 train_time:489226ms step_avg:77.40ms +[2025-09-02 04:34:41] [Rank 0] step:6321/10000 train_time:489226ms step_avg:77.40ms +[2025-09-02 04:34:43] [Rank 0] step:6341/10000 train_time:490850ms step_avg:77.41ms +[2025-09-02 04:34:43] [Rank 0] step:6341/10000 train_time:490850ms step_avg:77.41ms +[2025-09-02 04:34:44] [Rank 0] step:6361/10000 train_time:492478ms step_avg:77.42ms +[2025-09-02 04:34:44] [Rank 0] step:6361/10000 train_time:492478ms step_avg:77.42ms +[2025-09-02 04:34:46] [Rank 0] step:6381/10000 train_time:494104ms step_avg:77.43ms +[2025-09-02 04:34:46] [Rank 0] step:6381/10000 train_time:494104ms step_avg:77.43ms +[2025-09-02 04:34:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:34:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:35:00] [Rank 0] PRINT: step:6400/10000 val_loss:3.7812 svd_entropy: attn_qk:H=0.7721,top10E=0.26,eRank=173.5,q75/q25=57.35 attn_vo:H=0.8461,top10E=0.14,eRank=300.3,q75/q25=38.25 mlp_w1:H=0.9046,top10E=0.14,eRank=410.5,q75/q25=4.67 mlp_w2:H=0.9694,top10E=0.04,eRank=626.6,q75/q25=2.98 vo_prod:H=0.7381,top10E=0.25,eRank=140.4,q75/q25=1161.68 train_time:495810ms step_avg:77.47ms +[2025-09-02 04:35:00] [Rank 0] PRINT: step:6400/10000 val_loss:3.7812 svd_entropy: attn_qk:H=0.7721,top10E=0.26,eRank=173.5,q75/q25=57.35 attn_vo:H=0.8461,top10E=0.14,eRank=300.3,q75/q25=38.25 mlp_w1:H=0.9046,top10E=0.14,eRank=410.5,q75/q25=4.67 mlp_w2:H=0.9694,top10E=0.04,eRank=626.6,q75/q25=2.98 vo_prod:H=0.7381,top10E=0.25,eRank=140.4,q75/q25=1161.68 train_time:495810ms step_avg:77.47ms +[2025-09-02 04:35:00] [Rank 0] step:6401/10000 train_time:495823ms step_avg:77.46ms +[2025-09-02 04:35:00] [Rank 0] step:6401/10000 train_time:495823ms step_avg:77.46ms +[2025-09-02 04:35:01] [Rank 0] step:6421/10000 train_time:497374ms step_avg:77.46ms +[2025-09-02 04:35:01] [Rank 0] step:6421/10000 train_time:497374ms step_avg:77.46ms +[2025-09-02 04:35:03] [Rank 0] step:6441/10000 train_time:498998ms step_avg:77.47ms +[2025-09-02 04:35:03] [Rank 0] step:6441/10000 train_time:498998ms step_avg:77.47ms +[2025-09-02 04:35:05] [Rank 0] step:6461/10000 train_time:500621ms step_avg:77.48ms +[2025-09-02 04:35:05] [Rank 0] step:6461/10000 train_time:500621ms step_avg:77.48ms +[2025-09-02 04:35:06] [Rank 0] step:6481/10000 train_time:502249ms step_avg:77.50ms +[2025-09-02 04:35:06] [Rank 0] step:6481/10000 train_time:502249ms step_avg:77.50ms +[2025-09-02 04:35:08] [Rank 0] step:6501/10000 train_time:503868ms step_avg:77.51ms +[2025-09-02 04:35:08] [Rank 0] step:6501/10000 train_time:503868ms step_avg:77.51ms +[2025-09-02 04:35:09] [Rank 0] step:6521/10000 train_time:505487ms step_avg:77.52ms +[2025-09-02 04:35:09] [Rank 0] step:6521/10000 train_time:505487ms step_avg:77.52ms +[2025-09-02 04:35:11] [Rank 0] step:6541/10000 train_time:507111ms step_avg:77.53ms +[2025-09-02 04:35:11] [Rank 0] step:6541/10000 train_time:507111ms step_avg:77.53ms +[2025-09-02 04:35:13] [Rank 0] step:6561/10000 train_time:508738ms step_avg:77.54ms +[2025-09-02 04:35:13] [Rank 0] step:6561/10000 train_time:508738ms step_avg:77.54ms +[2025-09-02 04:35:14] [Rank 0] step:6581/10000 train_time:510355ms step_avg:77.55ms +[2025-09-02 04:35:14] [Rank 0] step:6581/10000 train_time:510355ms step_avg:77.55ms +[2025-09-02 04:35:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:35:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:35:28] [Rank 0] PRINT: step:6600/10000 val_loss:3.7692 svd_entropy: attn_qk:H=0.7729,top10E=0.26,eRank=174.4,q75/q25=56.71 attn_vo:H=0.8469,top10E=0.14,eRank=301.5,q75/q25=37.69 mlp_w1:H=0.9054,top10E=0.14,eRank=412.7,q75/q25=4.63 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.98 vo_prod:H=0.7390,top10E=0.25,eRank=141.3,q75/q25=1105.40 train_time:512058ms step_avg:77.58ms +[2025-09-02 04:35:28] [Rank 0] PRINT: step:6600/10000 val_loss:3.7692 svd_entropy: attn_qk:H=0.7729,top10E=0.26,eRank=174.4,q75/q25=56.71 attn_vo:H=0.8469,top10E=0.14,eRank=301.5,q75/q25=37.69 mlp_w1:H=0.9054,top10E=0.14,eRank=412.7,q75/q25=4.63 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.98 vo_prod:H=0.7390,top10E=0.25,eRank=141.3,q75/q25=1105.40 train_time:512058ms step_avg:77.58ms +[2025-09-02 04:35:28] [Rank 0] step:6601/10000 train_time:512071ms step_avg:77.57ms +[2025-09-02 04:35:28] [Rank 0] step:6601/10000 train_time:512071ms step_avg:77.57ms +[2025-09-02 04:35:30] [Rank 0] step:6621/10000 train_time:513627ms step_avg:77.58ms +[2025-09-02 04:35:30] [Rank 0] step:6621/10000 train_time:513627ms step_avg:77.58ms +[2025-09-02 04:35:31] [Rank 0] step:6641/10000 train_time:515251ms step_avg:77.59ms +[2025-09-02 04:35:31] [Rank 0] step:6641/10000 train_time:515251ms step_avg:77.59ms +[2025-09-02 04:35:33] [Rank 0] step:6661/10000 train_time:516872ms step_avg:77.60ms +[2025-09-02 04:35:33] [Rank 0] step:6661/10000 train_time:516872ms step_avg:77.60ms +[2025-09-02 04:35:35] [Rank 0] step:6681/10000 train_time:518510ms step_avg:77.61ms +[2025-09-02 04:35:35] [Rank 0] step:6681/10000 train_time:518510ms step_avg:77.61ms +[2025-09-02 04:35:36] [Rank 0] step:6701/10000 train_time:520162ms step_avg:77.62ms +[2025-09-02 04:35:36] [Rank 0] step:6701/10000 train_time:520162ms step_avg:77.62ms +[2025-09-02 04:35:38] [Rank 0] step:6721/10000 train_time:521816ms step_avg:77.64ms +[2025-09-02 04:35:38] [Rank 0] step:6721/10000 train_time:521816ms step_avg:77.64ms +[2025-09-02 04:35:39] [Rank 0] step:6741/10000 train_time:523464ms step_avg:77.65ms +[2025-09-02 04:35:39] [Rank 0] step:6741/10000 train_time:523464ms step_avg:77.65ms +[2025-09-02 04:35:41] [Rank 0] step:6761/10000 train_time:525114ms step_avg:77.67ms +[2025-09-02 04:35:41] [Rank 0] step:6761/10000 train_time:525114ms step_avg:77.67ms +[2025-09-02 04:35:43] [Rank 0] step:6781/10000 train_time:526763ms step_avg:77.68ms +[2025-09-02 04:35:43] [Rank 0] step:6781/10000 train_time:526763ms step_avg:77.68ms +[2025-09-02 04:35:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:35:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:35:57] [Rank 0] PRINT: step:6800/10000 val_loss:3.7520 svd_entropy: attn_qk:H=0.7736,top10E=0.26,eRank=175.1,q75/q25=56.64 attn_vo:H=0.8476,top10E=0.14,eRank=302.8,q75/q25=37.34 mlp_w1:H=0.9061,top10E=0.14,eRank=414.7,q75/q25=4.62 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.97 vo_prod:H=0.7400,top10E=0.25,eRank=142.3,q75/q25=1064.37 train_time:528499ms step_avg:77.72ms +[2025-09-02 04:35:57] [Rank 0] PRINT: step:6800/10000 val_loss:3.7520 svd_entropy: attn_qk:H=0.7736,top10E=0.26,eRank=175.1,q75/q25=56.64 attn_vo:H=0.8476,top10E=0.14,eRank=302.8,q75/q25=37.34 mlp_w1:H=0.9061,top10E=0.14,eRank=414.7,q75/q25=4.62 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.97 vo_prod:H=0.7400,top10E=0.25,eRank=142.3,q75/q25=1064.37 train_time:528499ms step_avg:77.72ms +[2025-09-02 04:35:57] [Rank 0] step:6801/10000 train_time:528513ms step_avg:77.71ms +[2025-09-02 04:35:57] [Rank 0] step:6801/10000 train_time:528513ms step_avg:77.71ms +[2025-09-02 04:35:58] [Rank 0] step:6821/10000 train_time:530072ms step_avg:77.71ms +[2025-09-02 04:35:58] [Rank 0] step:6821/10000 train_time:530072ms step_avg:77.71ms +[2025-09-02 04:36:00] [Rank 0] step:6841/10000 train_time:531713ms step_avg:77.72ms +[2025-09-02 04:36:00] [Rank 0] step:6841/10000 train_time:531713ms step_avg:77.72ms +[2025-09-02 04:36:02] [Rank 0] step:6861/10000 train_time:533357ms step_avg:77.74ms +[2025-09-02 04:36:02] [Rank 0] step:6861/10000 train_time:533357ms step_avg:77.74ms +[2025-09-02 04:36:03] [Rank 0] step:6881/10000 train_time:535002ms step_avg:77.75ms +[2025-09-02 04:36:03] [Rank 0] step:6881/10000 train_time:535002ms step_avg:77.75ms +[2025-09-02 04:36:05] [Rank 0] step:6901/10000 train_time:536650ms step_avg:77.76ms +[2025-09-02 04:36:05] [Rank 0] step:6901/10000 train_time:536650ms step_avg:77.76ms +[2025-09-02 04:36:07] [Rank 0] step:6921/10000 train_time:538294ms step_avg:77.78ms +[2025-09-02 04:36:07] [Rank 0] step:6921/10000 train_time:538294ms step_avg:77.78ms +[2025-09-02 04:36:08] [Rank 0] step:6941/10000 train_time:539945ms step_avg:77.79ms +[2025-09-02 04:36:08] [Rank 0] step:6941/10000 train_time:539945ms step_avg:77.79ms +[2025-09-02 04:36:10] [Rank 0] step:6961/10000 train_time:541605ms step_avg:77.81ms +[2025-09-02 04:36:10] [Rank 0] step:6961/10000 train_time:541605ms step_avg:77.81ms +[2025-09-02 04:36:12] [Rank 0] step:6981/10000 train_time:543257ms step_avg:77.82ms +[2025-09-02 04:36:12] [Rank 0] step:6981/10000 train_time:543257ms step_avg:77.82ms +[2025-09-02 04:36:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:36:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:36:25] [Rank 0] PRINT: step:7000/10000 val_loss:3.7367 svd_entropy: attn_qk:H=0.7742,top10E=0.26,eRank=175.8,q75/q25=56.33 attn_vo:H=0.8483,top10E=0.14,eRank=303.9,q75/q25=36.67 mlp_w1:H=0.9068,top10E=0.14,eRank=416.4,q75/q25=4.59 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.98 vo_prod:H=0.7408,top10E=0.25,eRank=143.1,q75/q25=1020.34 train_time:544993ms step_avg:77.86ms +[2025-09-02 04:36:25] [Rank 0] PRINT: step:7000/10000 val_loss:3.7367 svd_entropy: attn_qk:H=0.7742,top10E=0.26,eRank=175.8,q75/q25=56.33 attn_vo:H=0.8483,top10E=0.14,eRank=303.9,q75/q25=36.67 mlp_w1:H=0.9068,top10E=0.14,eRank=416.4,q75/q25=4.59 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.98 vo_prod:H=0.7408,top10E=0.25,eRank=143.1,q75/q25=1020.34 train_time:544993ms step_avg:77.86ms +[2025-09-02 04:36:25] [Rank 0] step:7001/10000 train_time:545007ms step_avg:77.85ms +[2025-09-02 04:36:25] [Rank 0] step:7001/10000 train_time:545007ms step_avg:77.85ms +[2025-09-02 04:36:27] [Rank 0] step:7021/10000 train_time:546570ms step_avg:77.85ms +[2025-09-02 04:36:27] [Rank 0] step:7021/10000 train_time:546570ms step_avg:77.85ms +[2025-09-02 04:36:29] [Rank 0] step:7041/10000 train_time:548218ms step_avg:77.86ms +[2025-09-02 04:36:29] [Rank 0] step:7041/10000 train_time:548218ms step_avg:77.86ms +[2025-09-02 04:36:30] [Rank 0] step:7061/10000 train_time:549867ms step_avg:77.87ms +[2025-09-02 04:36:30] [Rank 0] step:7061/10000 train_time:549867ms step_avg:77.87ms +[2025-09-02 04:36:32] [Rank 0] step:7081/10000 train_time:551512ms step_avg:77.89ms +[2025-09-02 04:36:32] [Rank 0] step:7081/10000 train_time:551512ms step_avg:77.89ms +[2025-09-02 04:36:33] [Rank 0] step:7101/10000 train_time:553162ms step_avg:77.90ms +[2025-09-02 04:36:33] [Rank 0] step:7101/10000 train_time:553162ms step_avg:77.90ms +[2025-09-02 04:36:35] [Rank 0] step:7121/10000 train_time:554807ms step_avg:77.91ms +[2025-09-02 04:36:35] [Rank 0] step:7121/10000 train_time:554807ms step_avg:77.91ms +[2025-09-02 04:36:37] [Rank 0] step:7141/10000 train_time:556455ms step_avg:77.92ms +[2025-09-02 04:36:37] [Rank 0] step:7141/10000 train_time:556455ms step_avg:77.92ms +[2025-09-02 04:36:38] [Rank 0] step:7161/10000 train_time:558104ms step_avg:77.94ms +[2025-09-02 04:36:38] [Rank 0] step:7161/10000 train_time:558104ms step_avg:77.94ms +[2025-09-02 04:36:40] [Rank 0] step:7181/10000 train_time:559755ms step_avg:77.95ms +[2025-09-02 04:36:40] [Rank 0] step:7181/10000 train_time:559755ms step_avg:77.95ms +[2025-09-02 04:36:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:36:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:36:54] [Rank 0] PRINT: step:7200/10000 val_loss:3.7259 svd_entropy: attn_qk:H=0.7748,top10E=0.26,eRank=176.4,q75/q25=55.98 attn_vo:H=0.8490,top10E=0.14,eRank=305.0,q75/q25=36.61 mlp_w1:H=0.9074,top10E=0.14,eRank=418.1,q75/q25=4.57 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.98 vo_prod:H=0.7419,top10E=0.25,eRank=144.0,q75/q25=991.91 train_time:561491ms step_avg:77.98ms +[2025-09-02 04:36:54] [Rank 0] PRINT: step:7200/10000 val_loss:3.7259 svd_entropy: attn_qk:H=0.7748,top10E=0.26,eRank=176.4,q75/q25=55.98 attn_vo:H=0.8490,top10E=0.14,eRank=305.0,q75/q25=36.61 mlp_w1:H=0.9074,top10E=0.14,eRank=418.1,q75/q25=4.57 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.98 vo_prod:H=0.7419,top10E=0.25,eRank=144.0,q75/q25=991.91 train_time:561491ms step_avg:77.98ms +[2025-09-02 04:36:54] [Rank 0] step:7201/10000 train_time:561505ms step_avg:77.98ms +[2025-09-02 04:36:54] [Rank 0] step:7201/10000 train_time:561505ms step_avg:77.98ms +[2025-09-02 04:36:55] [Rank 0] step:7221/10000 train_time:563084ms step_avg:77.98ms +[2025-09-02 04:36:55] [Rank 0] step:7221/10000 train_time:563084ms step_avg:77.98ms +[2025-09-02 04:36:57] [Rank 0] step:7241/10000 train_time:564729ms step_avg:77.99ms +[2025-09-02 04:36:57] [Rank 0] step:7241/10000 train_time:564729ms step_avg:77.99ms +[2025-09-02 04:36:59] [Rank 0] step:7261/10000 train_time:566372ms step_avg:78.00ms +[2025-09-02 04:36:59] [Rank 0] step:7261/10000 train_time:566372ms step_avg:78.00ms +[2025-09-02 04:37:00] [Rank 0] step:7281/10000 train_time:568029ms step_avg:78.02ms +[2025-09-02 04:37:00] [Rank 0] step:7281/10000 train_time:568029ms step_avg:78.02ms +[2025-09-02 04:37:02] [Rank 0] step:7301/10000 train_time:569677ms step_avg:78.03ms +[2025-09-02 04:37:02] [Rank 0] step:7301/10000 train_time:569677ms step_avg:78.03ms +[2025-09-02 04:37:04] [Rank 0] step:7321/10000 train_time:571336ms step_avg:78.04ms +[2025-09-02 04:37:04] [Rank 0] step:7321/10000 train_time:571336ms step_avg:78.04ms +[2025-09-02 04:37:05] [Rank 0] step:7341/10000 train_time:572989ms step_avg:78.05ms +[2025-09-02 04:37:05] [Rank 0] step:7341/10000 train_time:572989ms step_avg:78.05ms +[2025-09-02 04:37:07] [Rank 0] step:7361/10000 train_time:574644ms step_avg:78.07ms +[2025-09-02 04:37:07] [Rank 0] step:7361/10000 train_time:574644ms step_avg:78.07ms +[2025-09-02 04:37:09] [Rank 0] step:7381/10000 train_time:576298ms step_avg:78.08ms +[2025-09-02 04:37:09] [Rank 0] step:7381/10000 train_time:576298ms step_avg:78.08ms +[2025-09-02 04:37:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:37:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:37:22] [Rank 0] PRINT: step:7400/10000 val_loss:3.7094 svd_entropy: attn_qk:H=0.7754,top10E=0.26,eRank=177.0,q75/q25=55.81 attn_vo:H=0.8495,top10E=0.14,eRank=305.9,q75/q25=36.15 mlp_w1:H=0.9079,top10E=0.14,eRank=419.5,q75/q25=4.55 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.97 vo_prod:H=0.7426,top10E=0.25,eRank=144.8,q75/q25=945.46 train_time:578019ms step_avg:78.11ms +[2025-09-02 04:37:22] [Rank 0] PRINT: step:7400/10000 val_loss:3.7094 svd_entropy: attn_qk:H=0.7754,top10E=0.26,eRank=177.0,q75/q25=55.81 attn_vo:H=0.8495,top10E=0.14,eRank=305.9,q75/q25=36.15 mlp_w1:H=0.9079,top10E=0.14,eRank=419.5,q75/q25=4.55 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.97 vo_prod:H=0.7426,top10E=0.25,eRank=144.8,q75/q25=945.46 train_time:578019ms step_avg:78.11ms +[2025-09-02 04:37:22] [Rank 0] step:7401/10000 train_time:578032ms step_avg:78.10ms +[2025-09-02 04:37:22] [Rank 0] step:7401/10000 train_time:578032ms step_avg:78.10ms +[2025-09-02 04:37:24] [Rank 0] step:7421/10000 train_time:579601ms step_avg:78.10ms +[2025-09-02 04:37:24] [Rank 0] step:7421/10000 train_time:579601ms step_avg:78.10ms +[2025-09-02 04:37:25] [Rank 0] step:7441/10000 train_time:581251ms step_avg:78.11ms +[2025-09-02 04:37:25] [Rank 0] step:7441/10000 train_time:581251ms step_avg:78.11ms +[2025-09-02 04:37:27] [Rank 0] step:7461/10000 train_time:582902ms step_avg:78.13ms +[2025-09-02 04:37:27] [Rank 0] step:7461/10000 train_time:582902ms step_avg:78.13ms +[2025-09-02 04:37:29] [Rank 0] step:7481/10000 train_time:584558ms step_avg:78.14ms +[2025-09-02 04:37:29] [Rank 0] step:7481/10000 train_time:584558ms step_avg:78.14ms +[2025-09-02 04:37:30] [Rank 0] step:7501/10000 train_time:586213ms step_avg:78.15ms +[2025-09-02 04:37:30] [Rank 0] step:7501/10000 train_time:586213ms step_avg:78.15ms +[2025-09-02 04:37:32] [Rank 0] step:7521/10000 train_time:587869ms step_avg:78.16ms +[2025-09-02 04:37:32] [Rank 0] step:7521/10000 train_time:587869ms step_avg:78.16ms +[2025-09-02 04:37:34] [Rank 0] step:7541/10000 train_time:589537ms step_avg:78.18ms +[2025-09-02 04:37:34] [Rank 0] step:7541/10000 train_time:589537ms step_avg:78.18ms +[2025-09-02 04:37:35] [Rank 0] step:7561/10000 train_time:591183ms step_avg:78.19ms +[2025-09-02 04:37:35] [Rank 0] step:7561/10000 train_time:591183ms step_avg:78.19ms +[2025-09-02 04:37:37] [Rank 0] step:7581/10000 train_time:592848ms step_avg:78.20ms +[2025-09-02 04:37:37] [Rank 0] step:7581/10000 train_time:592848ms step_avg:78.20ms +[2025-09-02 04:37:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:37:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:37:51] [Rank 0] PRINT: step:7600/10000 val_loss:3.7041 svd_entropy: attn_qk:H=0.7759,top10E=0.26,eRank=177.5,q75/q25=55.55 attn_vo:H=0.8500,top10E=0.14,eRank=306.7,q75/q25=35.61 mlp_w1:H=0.9084,top10E=0.14,eRank=420.8,q75/q25=4.53 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.97 vo_prod:H=0.7432,top10E=0.25,eRank=145.5,q75/q25=921.14 train_time:594598ms step_avg:78.24ms +[2025-09-02 04:37:51] [Rank 0] PRINT: step:7600/10000 val_loss:3.7041 svd_entropy: attn_qk:H=0.7759,top10E=0.26,eRank=177.5,q75/q25=55.55 attn_vo:H=0.8500,top10E=0.14,eRank=306.7,q75/q25=35.61 mlp_w1:H=0.9084,top10E=0.14,eRank=420.8,q75/q25=4.53 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.97 vo_prod:H=0.7432,top10E=0.25,eRank=145.5,q75/q25=921.14 train_time:594598ms step_avg:78.24ms +[2025-09-02 04:37:51] [Rank 0] step:7601/10000 train_time:594612ms step_avg:78.23ms +[2025-09-02 04:37:51] [Rank 0] step:7601/10000 train_time:594612ms step_avg:78.23ms +[2025-09-02 04:37:52] [Rank 0] step:7621/10000 train_time:596191ms step_avg:78.23ms +[2025-09-02 04:37:52] [Rank 0] step:7621/10000 train_time:596191ms step_avg:78.23ms +[2025-09-02 04:37:54] [Rank 0] step:7641/10000 train_time:597843ms step_avg:78.24ms +[2025-09-02 04:37:54] [Rank 0] step:7641/10000 train_time:597843ms step_avg:78.24ms +[2025-09-02 04:37:56] [Rank 0] step:7661/10000 train_time:599496ms step_avg:78.25ms +[2025-09-02 04:37:56] [Rank 0] step:7661/10000 train_time:599496ms step_avg:78.25ms +[2025-09-02 04:37:57] [Rank 0] step:7681/10000 train_time:601146ms step_avg:78.26ms +[2025-09-02 04:37:57] [Rank 0] step:7681/10000 train_time:601146ms step_avg:78.26ms +[2025-09-02 04:37:59] [Rank 0] step:7701/10000 train_time:602800ms step_avg:78.28ms +[2025-09-02 04:37:59] [Rank 0] step:7701/10000 train_time:602800ms step_avg:78.28ms +[2025-09-02 04:38:01] [Rank 0] step:7721/10000 train_time:604465ms step_avg:78.29ms +[2025-09-02 04:38:01] [Rank 0] step:7721/10000 train_time:604465ms step_avg:78.29ms +[2025-09-02 04:38:02] [Rank 0] step:7741/10000 train_time:606122ms step_avg:78.30ms +[2025-09-02 04:38:02] [Rank 0] step:7741/10000 train_time:606122ms step_avg:78.30ms +[2025-09-02 04:38:04] [Rank 0] step:7761/10000 train_time:607784ms step_avg:78.31ms +[2025-09-02 04:38:04] [Rank 0] step:7761/10000 train_time:607784ms step_avg:78.31ms +[2025-09-02 04:38:06] [Rank 0] step:7781/10000 train_time:609442ms step_avg:78.32ms +[2025-09-02 04:38:06] [Rank 0] step:7781/10000 train_time:609442ms step_avg:78.32ms +[2025-09-02 04:38:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:38:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:38:19] [Rank 0] PRINT: step:7800/10000 val_loss:3.6903 svd_entropy: attn_qk:H=0.7763,top10E=0.26,eRank=178.0,q75/q25=55.41 attn_vo:H=0.8505,top10E=0.14,eRank=307.6,q75/q25=35.20 mlp_w1:H=0.9089,top10E=0.14,eRank=422.1,q75/q25=4.51 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.97 vo_prod:H=0.7439,top10E=0.25,eRank=146.3,q75/q25=899.41 train_time:611191ms step_avg:78.36ms +[2025-09-02 04:38:19] [Rank 0] PRINT: step:7800/10000 val_loss:3.6903 svd_entropy: attn_qk:H=0.7763,top10E=0.26,eRank=178.0,q75/q25=55.41 attn_vo:H=0.8505,top10E=0.14,eRank=307.6,q75/q25=35.20 mlp_w1:H=0.9089,top10E=0.14,eRank=422.1,q75/q25=4.51 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.97 vo_prod:H=0.7439,top10E=0.25,eRank=146.3,q75/q25=899.41 train_time:611191ms step_avg:78.36ms +[2025-09-02 04:38:19] [Rank 0] step:7801/10000 train_time:611205ms step_avg:78.35ms +[2025-09-02 04:38:19] [Rank 0] step:7801/10000 train_time:611205ms step_avg:78.35ms +[2025-09-02 04:38:21] [Rank 0] step:7821/10000 train_time:612772ms step_avg:78.35ms +[2025-09-02 04:38:21] [Rank 0] step:7821/10000 train_time:612772ms step_avg:78.35ms +[2025-09-02 04:38:23] [Rank 0] step:7841/10000 train_time:614417ms step_avg:78.36ms +[2025-09-02 04:38:23] [Rank 0] step:7841/10000 train_time:614417ms step_avg:78.36ms +[2025-09-02 04:38:24] [Rank 0] step:7861/10000 train_time:616075ms step_avg:78.37ms +[2025-09-02 04:38:24] [Rank 0] step:7861/10000 train_time:616075ms step_avg:78.37ms +[2025-09-02 04:38:26] [Rank 0] step:7881/10000 train_time:617735ms step_avg:78.38ms +[2025-09-02 04:38:26] [Rank 0] step:7881/10000 train_time:617735ms step_avg:78.38ms +[2025-09-02 04:38:28] [Rank 0] step:7901/10000 train_time:619387ms step_avg:78.39ms +[2025-09-02 04:38:28] [Rank 0] step:7901/10000 train_time:619387ms step_avg:78.39ms +[2025-09-02 04:38:29] [Rank 0] step:7921/10000 train_time:621041ms step_avg:78.40ms +[2025-09-02 04:38:29] [Rank 0] step:7921/10000 train_time:621041ms step_avg:78.40ms +[2025-09-02 04:38:31] [Rank 0] step:7941/10000 train_time:622702ms step_avg:78.42ms +[2025-09-02 04:38:31] [Rank 0] step:7941/10000 train_time:622702ms step_avg:78.42ms +[2025-09-02 04:38:33] [Rank 0] step:7961/10000 train_time:624361ms step_avg:78.43ms +[2025-09-02 04:38:33] [Rank 0] step:7961/10000 train_time:624361ms step_avg:78.43ms +[2025-09-02 04:38:34] [Rank 0] step:7981/10000 train_time:626010ms step_avg:78.44ms +[2025-09-02 04:38:34] [Rank 0] step:7981/10000 train_time:626010ms step_avg:78.44ms +[2025-09-02 04:38:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:38:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:38:48] [Rank 0] PRINT: step:8000/10000 val_loss:3.6746 svd_entropy: attn_qk:H=0.7767,top10E=0.26,eRank=178.4,q75/q25=55.02 attn_vo:H=0.8510,top10E=0.13,eRank=308.4,q75/q25=34.79 mlp_w1:H=0.9093,top10E=0.14,eRank=423.1,q75/q25=4.49 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.97 vo_prod:H=0.7448,top10E=0.25,eRank=147.1,q75/q25=868.31 train_time:627751ms step_avg:78.47ms +[2025-09-02 04:38:48] [Rank 0] PRINT: step:8000/10000 val_loss:3.6746 svd_entropy: attn_qk:H=0.7767,top10E=0.26,eRank=178.4,q75/q25=55.02 attn_vo:H=0.8510,top10E=0.13,eRank=308.4,q75/q25=34.79 mlp_w1:H=0.9093,top10E=0.14,eRank=423.1,q75/q25=4.49 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.97 vo_prod:H=0.7448,top10E=0.25,eRank=147.1,q75/q25=868.31 train_time:627751ms step_avg:78.47ms +[2025-09-02 04:38:48] [Rank 0] step:8001/10000 train_time:627765ms step_avg:78.46ms +[2025-09-02 04:38:48] [Rank 0] step:8001/10000 train_time:627765ms step_avg:78.46ms +[2025-09-02 04:38:50] [Rank 0] step:8021/10000 train_time:629356ms step_avg:78.46ms +[2025-09-02 04:38:50] [Rank 0] step:8021/10000 train_time:629356ms step_avg:78.46ms +[2025-09-02 04:38:51] [Rank 0] step:8041/10000 train_time:631014ms step_avg:78.47ms +[2025-09-02 04:38:51] [Rank 0] step:8041/10000 train_time:631014ms step_avg:78.47ms +[2025-09-02 04:38:53] [Rank 0] step:8061/10000 train_time:632664ms step_avg:78.48ms +[2025-09-02 04:38:53] [Rank 0] step:8061/10000 train_time:632664ms step_avg:78.48ms +[2025-09-02 04:38:54] [Rank 0] step:8081/10000 train_time:634309ms step_avg:78.49ms +[2025-09-02 04:38:54] [Rank 0] step:8081/10000 train_time:634309ms step_avg:78.49ms +[2025-09-02 04:38:56] [Rank 0] step:8101/10000 train_time:635970ms step_avg:78.51ms +[2025-09-02 04:38:56] [Rank 0] step:8101/10000 train_time:635970ms step_avg:78.51ms +[2025-09-02 04:38:58] [Rank 0] step:8121/10000 train_time:637622ms step_avg:78.52ms +[2025-09-02 04:38:58] [Rank 0] step:8121/10000 train_time:637622ms step_avg:78.52ms +[2025-09-02 04:39:00] [Rank 0] step:8141/10000 train_time:639562ms step_avg:78.56ms +[2025-09-02 04:39:00] [Rank 0] step:8141/10000 train_time:639562ms step_avg:78.56ms +[2025-09-02 04:39:01] [Rank 0] step:8161/10000 train_time:641232ms step_avg:78.57ms +[2025-09-02 04:39:01] [Rank 0] step:8161/10000 train_time:641232ms step_avg:78.57ms +[2025-09-02 04:39:03] [Rank 0] step:8181/10000 train_time:642913ms step_avg:78.59ms +[2025-09-02 04:39:03] [Rank 0] step:8181/10000 train_time:642913ms step_avg:78.59ms +[2025-09-02 04:39:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:39:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:39:17] [Rank 0] PRINT: step:8200/10000 val_loss:3.6654 svd_entropy: attn_qk:H=0.7770,top10E=0.26,eRank=178.8,q75/q25=55.00 attn_vo:H=0.8514,top10E=0.13,eRank=309.1,q75/q25=34.41 mlp_w1:H=0.9096,top10E=0.14,eRank=424.2,q75/q25=4.47 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.97 vo_prod:H=0.7455,top10E=0.25,eRank=147.8,q75/q25=840.97 train_time:644704ms step_avg:78.62ms +[2025-09-02 04:39:17] [Rank 0] PRINT: step:8200/10000 val_loss:3.6654 svd_entropy: attn_qk:H=0.7770,top10E=0.26,eRank=178.8,q75/q25=55.00 attn_vo:H=0.8514,top10E=0.13,eRank=309.1,q75/q25=34.41 mlp_w1:H=0.9096,top10E=0.14,eRank=424.2,q75/q25=4.47 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.97 vo_prod:H=0.7455,top10E=0.25,eRank=147.8,q75/q25=840.97 train_time:644704ms step_avg:78.62ms +[2025-09-02 04:39:17] [Rank 0] step:8201/10000 train_time:644718ms step_avg:78.61ms +[2025-09-02 04:39:17] [Rank 0] step:8201/10000 train_time:644718ms step_avg:78.61ms +[2025-09-02 04:39:18] [Rank 0] step:8221/10000 train_time:646334ms step_avg:78.62ms +[2025-09-02 04:39:18] [Rank 0] step:8221/10000 train_time:646334ms step_avg:78.62ms +[2025-09-02 04:39:20] [Rank 0] step:8241/10000 train_time:648020ms step_avg:78.63ms +[2025-09-02 04:39:20] [Rank 0] step:8241/10000 train_time:648020ms step_avg:78.63ms +[2025-09-02 04:39:22] [Rank 0] step:8261/10000 train_time:649704ms step_avg:78.65ms +[2025-09-02 04:39:22] [Rank 0] step:8261/10000 train_time:649704ms step_avg:78.65ms +[2025-09-02 04:39:24] [Rank 0] step:8281/10000 train_time:651385ms step_avg:78.66ms +[2025-09-02 04:39:24] [Rank 0] step:8281/10000 train_time:651385ms step_avg:78.66ms +[2025-09-02 04:39:25] [Rank 0] step:8301/10000 train_time:653067ms step_avg:78.67ms +[2025-09-02 04:39:25] [Rank 0] step:8301/10000 train_time:653067ms step_avg:78.67ms +[2025-09-02 04:39:27] [Rank 0] step:8321/10000 train_time:654740ms step_avg:78.69ms +[2025-09-02 04:39:27] [Rank 0] step:8321/10000 train_time:654740ms step_avg:78.69ms +[2025-09-02 04:39:29] [Rank 0] step:8341/10000 train_time:656424ms step_avg:78.70ms +[2025-09-02 04:39:29] [Rank 0] step:8341/10000 train_time:656424ms step_avg:78.70ms +[2025-09-02 04:39:30] [Rank 0] step:8361/10000 train_time:658111ms step_avg:78.71ms +[2025-09-02 04:39:30] [Rank 0] step:8361/10000 train_time:658111ms step_avg:78.71ms +[2025-09-02 04:39:32] [Rank 0] step:8381/10000 train_time:659789ms step_avg:78.72ms +[2025-09-02 04:39:32] [Rank 0] step:8381/10000 train_time:659789ms step_avg:78.72ms +[2025-09-02 04:39:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:39:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:39:45] [Rank 0] PRINT: step:8400/10000 val_loss:3.6542 svd_entropy: attn_qk:H=0.7773,top10E=0.26,eRank=179.0,q75/q25=54.72 attn_vo:H=0.8517,top10E=0.13,eRank=309.7,q75/q25=34.29 mlp_w1:H=0.9100,top10E=0.14,eRank=425.1,q75/q25=4.46 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.97 vo_prod:H=0.7461,top10E=0.25,eRank=148.5,q75/q25=824.45 train_time:661557ms step_avg:78.76ms +[2025-09-02 04:39:45] [Rank 0] PRINT: step:8400/10000 val_loss:3.6542 svd_entropy: attn_qk:H=0.7773,top10E=0.26,eRank=179.0,q75/q25=54.72 attn_vo:H=0.8517,top10E=0.13,eRank=309.7,q75/q25=34.29 mlp_w1:H=0.9100,top10E=0.14,eRank=425.1,q75/q25=4.46 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.97 vo_prod:H=0.7461,top10E=0.25,eRank=148.5,q75/q25=824.45 train_time:661557ms step_avg:78.76ms +[2025-09-02 04:39:46] [Rank 0] step:8401/10000 train_time:661571ms step_avg:78.75ms +[2025-09-02 04:39:46] [Rank 0] step:8401/10000 train_time:661571ms step_avg:78.75ms +[2025-09-02 04:39:47] [Rank 0] step:8421/10000 train_time:663181ms step_avg:78.75ms +[2025-09-02 04:39:47] [Rank 0] step:8421/10000 train_time:663181ms step_avg:78.75ms +[2025-09-02 04:39:49] [Rank 0] step:8441/10000 train_time:664858ms step_avg:78.77ms +[2025-09-02 04:39:49] [Rank 0] step:8441/10000 train_time:664858ms step_avg:78.77ms +[2025-09-02 04:39:51] [Rank 0] step:8461/10000 train_time:666538ms step_avg:78.78ms +[2025-09-02 04:39:51] [Rank 0] step:8461/10000 train_time:666538ms step_avg:78.78ms +[2025-09-02 04:39:52] [Rank 0] step:8481/10000 train_time:668224ms step_avg:78.79ms +[2025-09-02 04:39:52] [Rank 0] step:8481/10000 train_time:668224ms step_avg:78.79ms +[2025-09-02 04:39:54] [Rank 0] step:8501/10000 train_time:669928ms step_avg:78.81ms +[2025-09-02 04:39:54] [Rank 0] step:8501/10000 train_time:669928ms step_avg:78.81ms +[2025-09-02 04:39:56] [Rank 0] step:8521/10000 train_time:671616ms step_avg:78.82ms +[2025-09-02 04:39:56] [Rank 0] step:8521/10000 train_time:671616ms step_avg:78.82ms +[2025-09-02 04:39:57] [Rank 0] step:8541/10000 train_time:673310ms step_avg:78.83ms +[2025-09-02 04:39:57] [Rank 0] step:8541/10000 train_time:673310ms step_avg:78.83ms +[2025-09-02 04:39:59] [Rank 0] step:8561/10000 train_time:674994ms step_avg:78.85ms +[2025-09-02 04:39:59] [Rank 0] step:8561/10000 train_time:674994ms step_avg:78.85ms +[2025-09-02 04:40:01] [Rank 0] step:8581/10000 train_time:676679ms step_avg:78.86ms +[2025-09-02 04:40:01] [Rank 0] step:8581/10000 train_time:676679ms step_avg:78.86ms +[2025-09-02 04:40:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:40:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:40:14] [Rank 0] PRINT: step:8600/10000 val_loss:3.6452 svd_entropy: attn_qk:H=0.7776,top10E=0.26,eRank=179.3,q75/q25=54.54 attn_vo:H=0.8521,top10E=0.13,eRank=310.2,q75/q25=33.95 mlp_w1:H=0.9103,top10E=0.14,eRank=425.9,q75/q25=4.45 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.96 vo_prod:H=0.7465,top10E=0.25,eRank=148.9,q75/q25=806.64 train_time:678440ms step_avg:78.89ms +[2025-09-02 04:40:14] [Rank 0] PRINT: step:8600/10000 val_loss:3.6452 svd_entropy: attn_qk:H=0.7776,top10E=0.26,eRank=179.3,q75/q25=54.54 attn_vo:H=0.8521,top10E=0.13,eRank=310.2,q75/q25=33.95 mlp_w1:H=0.9103,top10E=0.14,eRank=425.9,q75/q25=4.45 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.96 vo_prod:H=0.7465,top10E=0.25,eRank=148.9,q75/q25=806.64 train_time:678440ms step_avg:78.89ms +[2025-09-02 04:40:14] [Rank 0] step:8601/10000 train_time:678454ms step_avg:78.88ms +[2025-09-02 04:40:14] [Rank 0] step:8601/10000 train_time:678454ms step_avg:78.88ms +[2025-09-02 04:40:16] [Rank 0] step:8621/10000 train_time:680060ms step_avg:78.88ms +[2025-09-02 04:40:16] [Rank 0] step:8621/10000 train_time:680060ms step_avg:78.88ms +[2025-09-02 04:40:18] [Rank 0] step:8641/10000 train_time:681741ms step_avg:78.90ms +[2025-09-02 04:40:18] [Rank 0] step:8641/10000 train_time:681741ms step_avg:78.90ms +[2025-09-02 04:40:19] [Rank 0] step:8661/10000 train_time:683424ms step_avg:78.91ms +[2025-09-02 04:40:19] [Rank 0] step:8661/10000 train_time:683424ms step_avg:78.91ms +[2025-09-02 04:40:21] [Rank 0] step:8681/10000 train_time:685105ms step_avg:78.92ms +[2025-09-02 04:40:21] [Rank 0] step:8681/10000 train_time:685105ms step_avg:78.92ms +[2025-09-02 04:40:23] [Rank 0] step:8701/10000 train_time:686784ms step_avg:78.93ms +[2025-09-02 04:40:23] [Rank 0] step:8701/10000 train_time:686784ms step_avg:78.93ms +[2025-09-02 04:40:24] [Rank 0] step:8721/10000 train_time:688469ms step_avg:78.94ms +[2025-09-02 04:40:24] [Rank 0] step:8721/10000 train_time:688469ms step_avg:78.94ms +[2025-09-02 04:40:26] [Rank 0] step:8741/10000 train_time:690141ms step_avg:78.95ms +[2025-09-02 04:40:26] [Rank 0] step:8741/10000 train_time:690141ms step_avg:78.95ms +[2025-09-02 04:40:28] [Rank 0] step:8761/10000 train_time:691823ms step_avg:78.97ms +[2025-09-02 04:40:28] [Rank 0] step:8761/10000 train_time:691823ms step_avg:78.97ms +[2025-09-02 04:40:29] [Rank 0] step:8781/10000 train_time:693512ms step_avg:78.98ms +[2025-09-02 04:40:29] [Rank 0] step:8781/10000 train_time:693512ms step_avg:78.98ms +[2025-09-02 04:40:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:40:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:40:43] [Rank 0] PRINT: step:8800/10000 val_loss:3.6358 svd_entropy: attn_qk:H=0.7778,top10E=0.26,eRank=179.5,q75/q25=54.33 attn_vo:H=0.8524,top10E=0.13,eRank=310.8,q75/q25=33.81 mlp_w1:H=0.9105,top10E=0.14,eRank=426.6,q75/q25=4.44 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.97 vo_prod:H=0.7471,top10E=0.25,eRank=149.5,q75/q25=785.33 train_time:695283ms step_avg:79.01ms +[2025-09-02 04:40:43] [Rank 0] PRINT: step:8800/10000 val_loss:3.6358 svd_entropy: attn_qk:H=0.7778,top10E=0.26,eRank=179.5,q75/q25=54.33 attn_vo:H=0.8524,top10E=0.13,eRank=310.8,q75/q25=33.81 mlp_w1:H=0.9105,top10E=0.14,eRank=426.6,q75/q25=4.44 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.97 vo_prod:H=0.7471,top10E=0.25,eRank=149.5,q75/q25=785.33 train_time:695283ms step_avg:79.01ms +[2025-09-02 04:40:43] [Rank 0] step:8801/10000 train_time:695297ms step_avg:79.00ms +[2025-09-02 04:40:43] [Rank 0] step:8801/10000 train_time:695297ms step_avg:79.00ms +[2025-09-02 04:40:45] [Rank 0] step:8821/10000 train_time:696902ms step_avg:79.00ms +[2025-09-02 04:40:45] [Rank 0] step:8821/10000 train_time:696902ms step_avg:79.00ms +[2025-09-02 04:40:47] [Rank 0] step:8841/10000 train_time:698600ms step_avg:79.02ms +[2025-09-02 04:40:47] [Rank 0] step:8841/10000 train_time:698600ms step_avg:79.02ms +[2025-09-02 04:40:48] [Rank 0] step:8861/10000 train_time:700280ms step_avg:79.03ms +[2025-09-02 04:40:48] [Rank 0] step:8861/10000 train_time:700280ms step_avg:79.03ms +[2025-09-02 04:40:50] [Rank 0] step:8881/10000 train_time:701965ms step_avg:79.04ms +[2025-09-02 04:40:50] [Rank 0] step:8881/10000 train_time:701965ms step_avg:79.04ms +[2025-09-02 04:40:52] [Rank 0] step:8901/10000 train_time:703650ms step_avg:79.05ms +[2025-09-02 04:40:52] [Rank 0] step:8901/10000 train_time:703650ms step_avg:79.05ms +[2025-09-02 04:40:53] [Rank 0] step:8921/10000 train_time:705348ms step_avg:79.07ms +[2025-09-02 04:40:53] [Rank 0] step:8921/10000 train_time:705348ms step_avg:79.07ms +[2025-09-02 04:40:55] [Rank 0] step:8941/10000 train_time:707041ms step_avg:79.08ms +[2025-09-02 04:40:55] [Rank 0] step:8941/10000 train_time:707041ms step_avg:79.08ms +[2025-09-02 04:40:57] [Rank 0] step:8961/10000 train_time:708721ms step_avg:79.09ms +[2025-09-02 04:40:57] [Rank 0] step:8961/10000 train_time:708721ms step_avg:79.09ms +[2025-09-02 04:40:58] [Rank 0] step:8981/10000 train_time:710405ms step_avg:79.10ms +[2025-09-02 04:40:58] [Rank 0] step:8981/10000 train_time:710405ms step_avg:79.10ms +[2025-09-02 04:41:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:41:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:41:12] [Rank 0] PRINT: step:9000/10000 val_loss:3.6263 svd_entropy: attn_qk:H=0.7780,top10E=0.25,eRank=179.7,q75/q25=54.04 attn_vo:H=0.8527,top10E=0.13,eRank=311.3,q75/q25=33.56 mlp_w1:H=0.9107,top10E=0.14,eRank=427.2,q75/q25=4.43 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.96 vo_prod:H=0.7476,top10E=0.25,eRank=150.0,q75/q25=772.51 train_time:712174ms step_avg:79.13ms +[2025-09-02 04:41:12] [Rank 0] PRINT: step:9000/10000 val_loss:3.6263 svd_entropy: attn_qk:H=0.7780,top10E=0.25,eRank=179.7,q75/q25=54.04 attn_vo:H=0.8527,top10E=0.13,eRank=311.3,q75/q25=33.56 mlp_w1:H=0.9107,top10E=0.14,eRank=427.2,q75/q25=4.43 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.96 vo_prod:H=0.7476,top10E=0.25,eRank=150.0,q75/q25=772.51 train_time:712174ms step_avg:79.13ms +[2025-09-02 04:41:12] [Rank 0] step:9001/10000 train_time:712188ms step_avg:79.12ms +[2025-09-02 04:41:12] [Rank 0] step:9001/10000 train_time:712188ms step_avg:79.12ms +[2025-09-02 04:41:14] [Rank 0] step:9021/10000 train_time:713805ms step_avg:79.13ms +[2025-09-02 04:41:14] [Rank 0] step:9021/10000 train_time:713805ms step_avg:79.13ms +[2025-09-02 04:41:15] [Rank 0] step:9041/10000 train_time:715495ms step_avg:79.14ms +[2025-09-02 04:41:15] [Rank 0] step:9041/10000 train_time:715495ms step_avg:79.14ms +[2025-09-02 04:41:17] [Rank 0] step:9061/10000 train_time:717189ms step_avg:79.15ms +[2025-09-02 04:41:17] [Rank 0] step:9061/10000 train_time:717189ms step_avg:79.15ms +[2025-09-02 04:41:19] [Rank 0] step:9081/10000 train_time:718882ms step_avg:79.16ms +[2025-09-02 04:41:19] [Rank 0] step:9081/10000 train_time:718882ms step_avg:79.16ms +[2025-09-02 04:41:21] [Rank 0] step:9101/10000 train_time:720585ms step_avg:79.18ms +[2025-09-02 04:41:21] [Rank 0] step:9101/10000 train_time:720585ms step_avg:79.18ms +[2025-09-02 04:41:22] [Rank 0] step:9121/10000 train_time:722274ms step_avg:79.19ms +[2025-09-02 04:41:22] [Rank 0] step:9121/10000 train_time:722274ms step_avg:79.19ms +[2025-09-02 04:41:24] [Rank 0] step:9141/10000 train_time:723952ms step_avg:79.20ms +[2025-09-02 04:41:24] [Rank 0] step:9141/10000 train_time:723952ms step_avg:79.20ms +[2025-09-02 04:41:26] [Rank 0] step:9161/10000 train_time:725632ms step_avg:79.21ms +[2025-09-02 04:41:26] [Rank 0] step:9161/10000 train_time:725632ms step_avg:79.21ms +[2025-09-02 04:41:27] [Rank 0] step:9181/10000 train_time:727354ms step_avg:79.22ms +[2025-09-02 04:41:27] [Rank 0] step:9181/10000 train_time:727354ms step_avg:79.22ms +[2025-09-02 04:41:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:41:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:41:41] [Rank 0] PRINT: step:9200/10000 val_loss:3.6191 svd_entropy: attn_qk:H=0.7782,top10E=0.25,eRank=179.9,q75/q25=54.11 attn_vo:H=0.8529,top10E=0.13,eRank=311.7,q75/q25=33.30 mlp_w1:H=0.9109,top10E=0.14,eRank=427.8,q75/q25=4.42 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.96 vo_prod:H=0.7480,top10E=0.24,eRank=150.4,q75/q25=761.74 train_time:729121ms step_avg:79.25ms +[2025-09-02 04:41:41] [Rank 0] PRINT: step:9200/10000 val_loss:3.6191 svd_entropy: attn_qk:H=0.7782,top10E=0.25,eRank=179.9,q75/q25=54.11 attn_vo:H=0.8529,top10E=0.13,eRank=311.7,q75/q25=33.30 mlp_w1:H=0.9109,top10E=0.14,eRank=427.8,q75/q25=4.42 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.96 vo_prod:H=0.7480,top10E=0.24,eRank=150.4,q75/q25=761.74 train_time:729121ms step_avg:79.25ms +[2025-09-02 04:41:41] [Rank 0] step:9201/10000 train_time:729135ms step_avg:79.25ms +[2025-09-02 04:41:41] [Rank 0] step:9201/10000 train_time:729135ms step_avg:79.25ms +[2025-09-02 04:41:43] [Rank 0] step:9221/10000 train_time:730752ms step_avg:79.25ms +[2025-09-02 04:41:43] [Rank 0] step:9221/10000 train_time:730752ms step_avg:79.25ms +[2025-09-02 04:41:44] [Rank 0] step:9241/10000 train_time:732445ms step_avg:79.26ms +[2025-09-02 04:41:44] [Rank 0] step:9241/10000 train_time:732445ms step_avg:79.26ms +[2025-09-02 04:41:46] [Rank 0] step:9261/10000 train_time:734139ms step_avg:79.27ms +[2025-09-02 04:41:46] [Rank 0] step:9261/10000 train_time:734139ms step_avg:79.27ms +[2025-09-02 04:41:48] [Rank 0] step:9281/10000 train_time:735814ms step_avg:79.28ms +[2025-09-02 04:41:48] [Rank 0] step:9281/10000 train_time:735814ms step_avg:79.28ms +[2025-09-02 04:41:49] [Rank 0] step:9301/10000 train_time:737495ms step_avg:79.29ms +[2025-09-02 04:41:49] [Rank 0] step:9301/10000 train_time:737495ms step_avg:79.29ms +[2025-09-02 04:41:51] [Rank 0] step:9321/10000 train_time:739185ms step_avg:79.30ms +[2025-09-02 04:41:51] [Rank 0] step:9321/10000 train_time:739185ms step_avg:79.30ms +[2025-09-02 04:41:53] [Rank 0] step:9341/10000 train_time:740870ms step_avg:79.31ms +[2025-09-02 04:41:53] [Rank 0] step:9341/10000 train_time:740870ms step_avg:79.31ms +[2025-09-02 04:41:54] [Rank 0] step:9361/10000 train_time:742559ms step_avg:79.32ms +[2025-09-02 04:41:54] [Rank 0] step:9361/10000 train_time:742559ms step_avg:79.32ms +[2025-09-02 04:41:56] [Rank 0] step:9381/10000 train_time:744258ms step_avg:79.34ms +[2025-09-02 04:41:56] [Rank 0] step:9381/10000 train_time:744258ms step_avg:79.34ms +[2025-09-02 04:41:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:41:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:42:10] [Rank 0] PRINT: step:9400/10000 val_loss:3.6115 svd_entropy: attn_qk:H=0.7783,top10E=0.25,eRank=180.1,q75/q25=53.97 attn_vo:H=0.8531,top10E=0.13,eRank=312.0,q75/q25=33.19 mlp_w1:H=0.9111,top10E=0.14,eRank=428.2,q75/q25=4.41 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.96 vo_prod:H=0.7483,top10E=0.24,eRank=150.9,q75/q25=750.67 train_time:746035ms step_avg:79.37ms +[2025-09-02 04:42:10] [Rank 0] PRINT: step:9400/10000 val_loss:3.6115 svd_entropy: attn_qk:H=0.7783,top10E=0.25,eRank=180.1,q75/q25=53.97 attn_vo:H=0.8531,top10E=0.13,eRank=312.0,q75/q25=33.19 mlp_w1:H=0.9111,top10E=0.14,eRank=428.2,q75/q25=4.41 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.96 vo_prod:H=0.7483,top10E=0.24,eRank=150.9,q75/q25=750.67 train_time:746035ms step_avg:79.37ms +[2025-09-02 04:42:10] [Rank 0] step:9401/10000 train_time:746048ms step_avg:79.36ms +[2025-09-02 04:42:10] [Rank 0] step:9401/10000 train_time:746048ms step_avg:79.36ms +[2025-09-02 04:42:11] [Rank 0] step:9421/10000 train_time:747647ms step_avg:79.36ms +[2025-09-02 04:42:11] [Rank 0] step:9421/10000 train_time:747647ms step_avg:79.36ms +[2025-09-02 04:42:13] [Rank 0] step:9441/10000 train_time:749331ms step_avg:79.37ms +[2025-09-02 04:42:13] [Rank 0] step:9441/10000 train_time:749331ms step_avg:79.37ms +[2025-09-02 04:42:15] [Rank 0] step:9461/10000 train_time:751022ms step_avg:79.38ms +[2025-09-02 04:42:15] [Rank 0] step:9461/10000 train_time:751022ms step_avg:79.38ms +[2025-09-02 04:42:16] [Rank 0] step:9481/10000 train_time:752709ms step_avg:79.39ms +[2025-09-02 04:42:16] [Rank 0] step:9481/10000 train_time:752709ms step_avg:79.39ms +[2025-09-02 04:42:18] [Rank 0] step:9501/10000 train_time:754410ms step_avg:79.40ms +[2025-09-02 04:42:18] [Rank 0] step:9501/10000 train_time:754410ms step_avg:79.40ms +[2025-09-02 04:42:20] [Rank 0] step:9521/10000 train_time:756089ms step_avg:79.41ms +[2025-09-02 04:42:20] [Rank 0] step:9521/10000 train_time:756089ms step_avg:79.41ms +[2025-09-02 04:42:21] [Rank 0] step:9541/10000 train_time:757776ms step_avg:79.42ms +[2025-09-02 04:42:21] [Rank 0] step:9541/10000 train_time:757776ms step_avg:79.42ms +[2025-09-02 04:42:23] [Rank 0] step:9561/10000 train_time:759455ms step_avg:79.43ms +[2025-09-02 04:42:23] [Rank 0] step:9561/10000 train_time:759455ms step_avg:79.43ms +[2025-09-02 04:42:25] [Rank 0] step:9581/10000 train_time:761140ms step_avg:79.44ms +[2025-09-02 04:42:25] [Rank 0] step:9581/10000 train_time:761140ms step_avg:79.44ms +[2025-09-02 04:42:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:42:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:42:38] [Rank 0] PRINT: step:9600/10000 val_loss:3.6053 svd_entropy: attn_qk:H=0.7784,top10E=0.25,eRank=180.2,q75/q25=53.80 attn_vo:H=0.8533,top10E=0.13,eRank=312.3,q75/q25=33.04 mlp_w1:H=0.9112,top10E=0.14,eRank=428.6,q75/q25=4.41 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.96 vo_prod:H=0.7487,top10E=0.24,eRank=151.2,q75/q25=752.73 train_time:762926ms step_avg:79.47ms +[2025-09-02 04:42:38] [Rank 0] PRINT: step:9600/10000 val_loss:3.6053 svd_entropy: attn_qk:H=0.7784,top10E=0.25,eRank=180.2,q75/q25=53.80 attn_vo:H=0.8533,top10E=0.13,eRank=312.3,q75/q25=33.04 mlp_w1:H=0.9112,top10E=0.14,eRank=428.6,q75/q25=4.41 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.96 vo_prod:H=0.7487,top10E=0.24,eRank=151.2,q75/q25=752.73 train_time:762926ms step_avg:79.47ms +[2025-09-02 04:42:39] [Rank 0] step:9601/10000 train_time:762940ms step_avg:79.46ms +[2025-09-02 04:42:39] [Rank 0] step:9601/10000 train_time:762940ms step_avg:79.46ms +[2025-09-02 04:42:40] [Rank 0] step:9621/10000 train_time:764552ms step_avg:79.47ms +[2025-09-02 04:42:40] [Rank 0] step:9621/10000 train_time:764552ms step_avg:79.47ms +[2025-09-02 04:42:42] [Rank 0] step:9641/10000 train_time:766243ms step_avg:79.48ms +[2025-09-02 04:42:42] [Rank 0] step:9641/10000 train_time:766243ms step_avg:79.48ms +[2025-09-02 04:42:44] [Rank 0] step:9661/10000 train_time:767958ms step_avg:79.49ms +[2025-09-02 04:42:44] [Rank 0] step:9661/10000 train_time:767958ms step_avg:79.49ms +[2025-09-02 04:42:45] [Rank 0] step:9681/10000 train_time:769667ms step_avg:79.50ms +[2025-09-02 04:42:45] [Rank 0] step:9681/10000 train_time:769667ms step_avg:79.50ms +[2025-09-02 04:42:47] [Rank 0] step:9701/10000 train_time:771388ms step_avg:79.52ms +[2025-09-02 04:42:47] [Rank 0] step:9701/10000 train_time:771388ms step_avg:79.52ms +[2025-09-02 04:42:49] [Rank 0] step:9721/10000 train_time:773093ms step_avg:79.53ms +[2025-09-02 04:42:49] [Rank 0] step:9721/10000 train_time:773093ms step_avg:79.53ms +[2025-09-02 04:42:51] [Rank 0] step:9741/10000 train_time:774823ms step_avg:79.54ms +[2025-09-02 04:42:51] [Rank 0] step:9741/10000 train_time:774823ms step_avg:79.54ms +[2025-09-02 04:42:52] [Rank 0] step:9761/10000 train_time:776542ms step_avg:79.56ms +[2025-09-02 04:42:52] [Rank 0] step:9761/10000 train_time:776542ms step_avg:79.56ms +[2025-09-02 04:42:54] [Rank 0] step:9781/10000 train_time:778261ms step_avg:79.57ms +[2025-09-02 04:42:54] [Rank 0] step:9781/10000 train_time:778261ms step_avg:79.57ms +[2025-09-02 04:42:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:42:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:43:08] [Rank 0] PRINT: step:9800/10000 val_loss:3.5984 svd_entropy: attn_qk:H=0.7785,top10E=0.25,eRank=180.2,q75/q25=53.74 attn_vo:H=0.8534,top10E=0.13,eRank=312.5,q75/q25=32.94 mlp_w1:H=0.9114,top10E=0.14,eRank=428.9,q75/q25=4.40 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.96 vo_prod:H=0.7490,top10E=0.24,eRank=151.5,q75/q25=749.67 train_time:780078ms step_avg:79.60ms +[2025-09-02 04:43:08] [Rank 0] PRINT: step:9800/10000 val_loss:3.5984 svd_entropy: attn_qk:H=0.7785,top10E=0.25,eRank=180.2,q75/q25=53.74 attn_vo:H=0.8534,top10E=0.13,eRank=312.5,q75/q25=32.94 mlp_w1:H=0.9114,top10E=0.14,eRank=428.9,q75/q25=4.40 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.96 vo_prod:H=0.7490,top10E=0.24,eRank=151.5,q75/q25=749.67 train_time:780078ms step_avg:79.60ms +[2025-09-02 04:43:08] [Rank 0] step:9801/10000 train_time:780092ms step_avg:79.59ms +[2025-09-02 04:43:08] [Rank 0] step:9801/10000 train_time:780092ms step_avg:79.59ms +[2025-09-02 04:43:09] [Rank 0] step:9821/10000 train_time:781732ms step_avg:79.60ms +[2025-09-02 04:43:09] [Rank 0] step:9821/10000 train_time:781732ms step_avg:79.60ms +[2025-09-02 04:43:11] [Rank 0] step:9841/10000 train_time:783455ms step_avg:79.61ms +[2025-09-02 04:43:11] [Rank 0] step:9841/10000 train_time:783455ms step_avg:79.61ms +[2025-09-02 04:43:13] [Rank 0] step:9861/10000 train_time:785155ms step_avg:79.62ms +[2025-09-02 04:43:13] [Rank 0] step:9861/10000 train_time:785155ms step_avg:79.62ms +[2025-09-02 04:43:15] [Rank 0] step:9881/10000 train_time:786860ms step_avg:79.63ms +[2025-09-02 04:43:15] [Rank 0] step:9881/10000 train_time:786860ms step_avg:79.63ms +[2025-09-02 04:43:16] [Rank 0] step:9901/10000 train_time:788575ms step_avg:79.65ms +[2025-09-02 04:43:16] [Rank 0] step:9901/10000 train_time:788575ms step_avg:79.65ms +[2025-09-02 04:43:18] [Rank 0] step:9921/10000 train_time:790287ms step_avg:79.66ms +[2025-09-02 04:43:18] [Rank 0] step:9921/10000 train_time:790287ms step_avg:79.66ms +[2025-09-02 04:43:20] [Rank 0] step:9941/10000 train_time:792003ms step_avg:79.67ms +[2025-09-02 04:43:20] [Rank 0] step:9941/10000 train_time:792003ms step_avg:79.67ms +[2025-09-02 04:43:21] [Rank 0] step:9961/10000 train_time:793716ms step_avg:79.68ms +[2025-09-02 04:43:21] [Rank 0] step:9961/10000 train_time:793716ms step_avg:79.68ms +[2025-09-02 04:43:23] [Rank 0] step:9981/10000 train_time:795428ms step_avg:79.69ms +[2025-09-02 04:43:23] [Rank 0] step:9981/10000 train_time:795428ms step_avg:79.69ms +[2025-09-02 04:43:25] [Rank 0] step:10000/10000 train_time:797059ms step_avg:79.71ms +[2025-09-02 04:43:25] [Rank 0] step:10000/10000 train_time:797059ms step_avg:79.71ms +[2025-09-02 04:43:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:43:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:43:37] [Rank 0] PRINT: step:10000/10000 val_loss:3.5931 svd_entropy: attn_qk:H=0.7785,top10E=0.25,eRank=180.3,q75/q25=53.70 attn_vo:H=0.8535,top10E=0.13,eRank=312.7,q75/q25=32.93 mlp_w1:H=0.9114,top10E=0.14,eRank=429.1,q75/q25=4.40 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.96 vo_prod:H=0.7492,top10E=0.24,eRank=151.7,q75/q25=746.23 train_time:797241ms step_avg:79.72ms +[2025-09-02 04:43:37] [Rank 0] PRINT: step:10000/10000 val_loss:3.5931 svd_entropy: attn_qk:H=0.7785,top10E=0.25,eRank=180.3,q75/q25=53.70 attn_vo:H=0.8535,top10E=0.13,eRank=312.7,q75/q25=32.93 mlp_w1:H=0.9114,top10E=0.14,eRank=429.1,q75/q25=4.40 mlp_w2:H=0.9694,top10E=0.04,eRank=626.7,q75/q25=2.96 vo_prod:H=0.7492,top10E=0.24,eRank=151.7,q75/q25=746.23 train_time:797241ms step_avg:79.72ms +[2025-09-02 04:43:37] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 04:43:37 2025 --- +[2025-09-02 04:43:37] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 04:43:37 2025 --- +[2025-09-02 04:43:37] [Rank 0] PRINT: Peak memory allocated: 10086 MiB reserved: 14938 MiB +[2025-09-02 04:43:37] [Rank 0] PRINT: Peak memory allocated: 10086 MiB reserved: 14938 MiB diff --git a/logs_svd_qkvo/mode_13_param_qkvo_seed_42/config.json b/logs_svd_qkvo/mode_13_param_qkvo_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..f5db71a19709d1abbdf84045856264587fa04864 --- /dev/null +++ b/logs_svd_qkvo/mode_13_param_qkvo_seed_42/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 13, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "4282b1e8-549e-4bc9-9616-95ff8f0474a1", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_13_param_qkvo_seed_42/training_log_4282b1e8-549e-4bc9-9616-95ff8f0474a1.txt b/logs_svd_qkvo/mode_13_param_qkvo_seed_42/training_log_4282b1e8-549e-4bc9-9616-95ff8f0474a1.txt new file mode 100644 index 0000000000000000000000000000000000000000..6b68fc990fd35856cd4402aed2b9527552cb144a --- /dev/null +++ b/logs_svd_qkvo/mode_13_param_qkvo_seed_42/training_log_4282b1e8-549e-4bc9-9616-95ff8f0474a1.txt @@ -0,0 +1,2984 @@ +[2025-09-02 05:38:11] [Rank 0] PRINT: --- Script Start: Tue Sep 2 05:38:11 2025 --- +[2025-09-02 05:38:11] [Rank 0] PRINT: --- Script Start: Tue Sep 2 05:38:11 2025 --- +[2025-09-02 05:38:11] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=13, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 05:38:11] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=13, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 05:38:11] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 05:38:11] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 05:38:11] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-02 05:38:11] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-02 05:38:11] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_13_param_qkvo_seed_42 +[2025-09-02 05:38:11] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_13_param_qkvo_seed_42 +[2025-09-02 05:38:11] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 05:38:11] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 05:38:11] [Rank 0] PRINT: Constructing model... +[2025-09-02 05:38:11] [Rank 0] PRINT: Constructing model... +[2025-09-02 05:38:12] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 05:38:12] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 05:38:12] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 05:38:12] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 05:38:12] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 05:38:12] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 05:38:12] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 13 +[2025-09-02 05:38:12] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 13 +[2025-09-02 05:38:13] [Rank 0] PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: 0.008). +[2025-09-02 05:38:13] [Rank 0] PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: 0.008). +[2025-09-02 05:38:13] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 05:38:13] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 05:38:13] [Rank 0] PRINT: Muon optimizer is active with 23 parameters. +[2025-09-02 05:38:13] [Rank 0] PRINT: Muon optimizer is active with 23 parameters. +[2025-09-02 05:38:13] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 05:38:13] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 05:38:13] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 05:38:13] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 05:38:13] [Rank 0] PRINT: Starting warmup... +[2025-09-02 05:38:13] [Rank 0] PRINT: Starting warmup... +[2025-09-02 05:38:58] [Rank 0] PRINT: Warmup complete. +[2025-09-02 05:38:58] [Rank 0] PRINT: Warmup complete. +[2025-09-02 05:38:58] [Rank 0] PRINT: Starting training... +[2025-09-02 05:38:58] [Rank 0] PRINT: Starting training... +[2025-09-02 05:38:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:38:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:39:16] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9248,top10E=0.05,eRank=465.9,q75/q25=10.27 attn_vo:H=0.4623,top10E=0.02,eRank=232.8,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 05:39:16] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9248,top10E=0.05,eRank=465.9,q75/q25=10.27 attn_vo:H=0.4623,top10E=0.02,eRank=232.8,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 05:39:18] [Rank 0] step:21/10000 train_time:1441ms step_avg:68.60ms +[2025-09-02 05:39:18] [Rank 0] step:21/10000 train_time:1441ms step_avg:68.60ms +[2025-09-02 05:39:19] [Rank 0] step:41/10000 train_time:2899ms step_avg:70.71ms +[2025-09-02 05:39:19] [Rank 0] step:41/10000 train_time:2899ms step_avg:70.71ms +[2025-09-02 05:39:21] [Rank 0] step:61/10000 train_time:4359ms step_avg:71.46ms +[2025-09-02 05:39:21] [Rank 0] step:61/10000 train_time:4359ms step_avg:71.46ms +[2025-09-02 05:39:22] [Rank 0] step:81/10000 train_time:5820ms step_avg:71.86ms +[2025-09-02 05:39:22] [Rank 0] step:81/10000 train_time:5820ms step_avg:71.86ms +[2025-09-02 05:39:24] [Rank 0] step:101/10000 train_time:7282ms step_avg:72.10ms +[2025-09-02 05:39:24] [Rank 0] step:101/10000 train_time:7282ms step_avg:72.10ms +[2025-09-02 05:39:25] [Rank 0] step:121/10000 train_time:8744ms step_avg:72.27ms +[2025-09-02 05:39:25] [Rank 0] step:121/10000 train_time:8744ms step_avg:72.27ms +[2025-09-02 05:39:27] [Rank 0] step:141/10000 train_time:10249ms step_avg:72.68ms +[2025-09-02 05:39:27] [Rank 0] step:141/10000 train_time:10249ms step_avg:72.68ms +[2025-09-02 05:39:28] [Rank 0] step:161/10000 train_time:11718ms step_avg:72.79ms +[2025-09-02 05:39:28] [Rank 0] step:161/10000 train_time:11718ms step_avg:72.79ms +[2025-09-02 05:39:29] [Rank 0] step:181/10000 train_time:13181ms step_avg:72.82ms +[2025-09-02 05:39:29] [Rank 0] step:181/10000 train_time:13181ms step_avg:72.82ms +[2025-09-02 05:39:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:39:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:39:43] [Rank 0] PRINT: step:200/10000 val_loss:6.2152 svd_entropy: attn_qk:H=0.6079,top10E=0.54,eRank=97.0,q75/q25=13.09 attn_vo:H=0.5225,top10E=0.57,eRank=77.6,q75/q25=inf mlp_w1:H=0.6579,top10E=0.52,eRank=94.8,q75/q25=2.96 mlp_w2:H=0.8074,top10E=0.17,eRank=218.0,q75/q25=17.10 vo_prod:H=0.3315,top10E=0.81,eRank=14.7,q75/q25=inf train_time:14716ms step_avg:73.58ms +[2025-09-02 05:39:43] [Rank 0] PRINT: step:200/10000 val_loss:6.2152 svd_entropy: attn_qk:H=0.6079,top10E=0.54,eRank=97.0,q75/q25=13.09 attn_vo:H=0.5225,top10E=0.57,eRank=77.6,q75/q25=inf mlp_w1:H=0.6579,top10E=0.52,eRank=94.8,q75/q25=2.96 mlp_w2:H=0.8074,top10E=0.17,eRank=218.0,q75/q25=17.10 vo_prod:H=0.3315,top10E=0.81,eRank=14.7,q75/q25=inf train_time:14716ms step_avg:73.58ms +[2025-09-02 05:39:43] [Rank 0] step:201/10000 train_time:14731ms step_avg:73.29ms +[2025-09-02 05:39:43] [Rank 0] step:201/10000 train_time:14731ms step_avg:73.29ms +[2025-09-02 05:39:44] [Rank 0] step:221/10000 train_time:16136ms step_avg:73.01ms +[2025-09-02 05:39:44] [Rank 0] step:221/10000 train_time:16136ms step_avg:73.01ms +[2025-09-02 05:39:46] [Rank 0] step:241/10000 train_time:17594ms step_avg:73.00ms +[2025-09-02 05:39:46] [Rank 0] step:241/10000 train_time:17594ms step_avg:73.00ms +[2025-09-02 05:39:47] [Rank 0] step:261/10000 train_time:19051ms step_avg:72.99ms +[2025-09-02 05:39:47] [Rank 0] step:261/10000 train_time:19051ms step_avg:72.99ms +[2025-09-02 05:39:49] [Rank 0] step:281/10000 train_time:20508ms step_avg:72.98ms +[2025-09-02 05:39:49] [Rank 0] step:281/10000 train_time:20508ms step_avg:72.98ms +[2025-09-02 05:39:50] [Rank 0] step:301/10000 train_time:21965ms step_avg:72.97ms +[2025-09-02 05:39:50] [Rank 0] step:301/10000 train_time:21965ms step_avg:72.97ms +[2025-09-02 05:39:52] [Rank 0] step:321/10000 train_time:23423ms step_avg:72.97ms +[2025-09-02 05:39:52] [Rank 0] step:321/10000 train_time:23423ms step_avg:72.97ms +[2025-09-02 05:39:53] [Rank 0] step:341/10000 train_time:24881ms step_avg:72.96ms +[2025-09-02 05:39:53] [Rank 0] step:341/10000 train_time:24881ms step_avg:72.96ms +[2025-09-02 05:39:55] [Rank 0] step:361/10000 train_time:26339ms step_avg:72.96ms +[2025-09-02 05:39:55] [Rank 0] step:361/10000 train_time:26339ms step_avg:72.96ms +[2025-09-02 05:39:56] [Rank 0] step:381/10000 train_time:27797ms step_avg:72.96ms +[2025-09-02 05:39:56] [Rank 0] step:381/10000 train_time:27797ms step_avg:72.96ms +[2025-09-02 05:39:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:39:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:40:09] [Rank 0] PRINT: step:400/10000 val_loss:5.7091 svd_entropy: attn_qk:H=0.6459,top10E=0.45,eRank=110.4,q75/q25=15.74 attn_vo:H=0.6062,top10E=0.41,eRank=107.0,q75/q25=inf mlp_w1:H=0.6865,top10E=0.41,eRank=116.2,q75/q25=4.54 mlp_w2:H=0.9301,top10E=0.06,eRank=484.0,q75/q25=6.33 vo_prod:H=0.4335,top10E=0.64,eRank=25.5,q75/q25=inf train_time:29329ms step_avg:73.32ms +[2025-09-02 05:40:09] [Rank 0] PRINT: step:400/10000 val_loss:5.7091 svd_entropy: attn_qk:H=0.6459,top10E=0.45,eRank=110.4,q75/q25=15.74 attn_vo:H=0.6062,top10E=0.41,eRank=107.0,q75/q25=inf mlp_w1:H=0.6865,top10E=0.41,eRank=116.2,q75/q25=4.54 mlp_w2:H=0.9301,top10E=0.06,eRank=484.0,q75/q25=6.33 vo_prod:H=0.4335,top10E=0.64,eRank=25.5,q75/q25=inf train_time:29329ms step_avg:73.32ms +[2025-09-02 05:40:09] [Rank 0] step:401/10000 train_time:29344ms step_avg:73.18ms +[2025-09-02 05:40:09] [Rank 0] step:401/10000 train_time:29344ms step_avg:73.18ms +[2025-09-02 05:40:11] [Rank 0] step:421/10000 train_time:30739ms step_avg:73.01ms +[2025-09-02 05:40:11] [Rank 0] step:421/10000 train_time:30739ms step_avg:73.01ms +[2025-09-02 05:40:12] [Rank 0] step:441/10000 train_time:32194ms step_avg:73.00ms +[2025-09-02 05:40:12] [Rank 0] step:441/10000 train_time:32194ms step_avg:73.00ms +[2025-09-02 05:40:14] [Rank 0] step:461/10000 train_time:33650ms step_avg:72.99ms +[2025-09-02 05:40:14] [Rank 0] step:461/10000 train_time:33650ms step_avg:72.99ms +[2025-09-02 05:40:15] [Rank 0] step:481/10000 train_time:35106ms step_avg:72.99ms +[2025-09-02 05:40:15] [Rank 0] step:481/10000 train_time:35106ms step_avg:72.99ms +[2025-09-02 05:40:17] [Rank 0] step:501/10000 train_time:36563ms step_avg:72.98ms +[2025-09-02 05:40:17] [Rank 0] step:501/10000 train_time:36563ms step_avg:72.98ms +[2025-09-02 05:40:18] [Rank 0] step:521/10000 train_time:38022ms step_avg:72.98ms +[2025-09-02 05:40:18] [Rank 0] step:521/10000 train_time:38022ms step_avg:72.98ms +[2025-09-02 05:40:20] [Rank 0] step:541/10000 train_time:39478ms step_avg:72.97ms +[2025-09-02 05:40:20] [Rank 0] step:541/10000 train_time:39478ms step_avg:72.97ms +[2025-09-02 05:40:21] [Rank 0] step:561/10000 train_time:40936ms step_avg:72.97ms +[2025-09-02 05:40:21] [Rank 0] step:561/10000 train_time:40936ms step_avg:72.97ms +[2025-09-02 05:40:23] [Rank 0] step:581/10000 train_time:42394ms step_avg:72.97ms +[2025-09-02 05:40:23] [Rank 0] step:581/10000 train_time:42394ms step_avg:72.97ms +[2025-09-02 05:40:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:40:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:40:36] [Rank 0] PRINT: step:600/10000 val_loss:5.4195 svd_entropy: attn_qk:H=0.6735,top10E=0.40,eRank=121.9,q75/q25=20.48 attn_vo:H=0.6525,top10E=0.33,eRank=131.6,q75/q25=inf mlp_w1:H=0.7293,top10E=0.35,eRank=146.2,q75/q25=6.31 mlp_w2:H=0.9482,top10E=0.05,eRank=545.2,q75/q25=4.55 vo_prod:H=0.4877,top10E=0.53,eRank=35.3,q75/q25=inf train_time:43926ms step_avg:73.21ms +[2025-09-02 05:40:36] [Rank 0] PRINT: step:600/10000 val_loss:5.4195 svd_entropy: attn_qk:H=0.6735,top10E=0.40,eRank=121.9,q75/q25=20.48 attn_vo:H=0.6525,top10E=0.33,eRank=131.6,q75/q25=inf mlp_w1:H=0.7293,top10E=0.35,eRank=146.2,q75/q25=6.31 mlp_w2:H=0.9482,top10E=0.05,eRank=545.2,q75/q25=4.55 vo_prod:H=0.4877,top10E=0.53,eRank=35.3,q75/q25=inf train_time:43926ms step_avg:73.21ms +[2025-09-02 05:40:36] [Rank 0] step:601/10000 train_time:43940ms step_avg:73.11ms +[2025-09-02 05:40:36] [Rank 0] step:601/10000 train_time:43940ms step_avg:73.11ms +[2025-09-02 05:40:37] [Rank 0] step:621/10000 train_time:45330ms step_avg:73.00ms +[2025-09-02 05:40:37] [Rank 0] step:621/10000 train_time:45330ms step_avg:73.00ms +[2025-09-02 05:40:39] [Rank 0] step:641/10000 train_time:46785ms step_avg:72.99ms +[2025-09-02 05:40:39] [Rank 0] step:641/10000 train_time:46785ms step_avg:72.99ms +[2025-09-02 05:40:40] [Rank 0] step:661/10000 train_time:48240ms step_avg:72.98ms +[2025-09-02 05:40:40] [Rank 0] step:661/10000 train_time:48240ms step_avg:72.98ms +[2025-09-02 05:40:42] [Rank 0] step:681/10000 train_time:49699ms step_avg:72.98ms +[2025-09-02 05:40:42] [Rank 0] step:681/10000 train_time:49699ms step_avg:72.98ms +[2025-09-02 05:40:43] [Rank 0] step:701/10000 train_time:51156ms step_avg:72.98ms +[2025-09-02 05:40:43] [Rank 0] step:701/10000 train_time:51156ms step_avg:72.98ms +[2025-09-02 05:40:44] [Rank 0] step:721/10000 train_time:52613ms step_avg:72.97ms +[2025-09-02 05:40:44] [Rank 0] step:721/10000 train_time:52613ms step_avg:72.97ms +[2025-09-02 05:40:46] [Rank 0] step:741/10000 train_time:54072ms step_avg:72.97ms +[2025-09-02 05:40:46] [Rank 0] step:741/10000 train_time:54072ms step_avg:72.97ms +[2025-09-02 05:40:47] [Rank 0] step:761/10000 train_time:55540ms step_avg:72.98ms +[2025-09-02 05:40:47] [Rank 0] step:761/10000 train_time:55540ms step_avg:72.98ms +[2025-09-02 05:40:49] [Rank 0] step:781/10000 train_time:57012ms step_avg:73.00ms +[2025-09-02 05:40:49] [Rank 0] step:781/10000 train_time:57012ms step_avg:73.00ms +[2025-09-02 05:40:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:40:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:41:02] [Rank 0] PRINT: step:800/10000 val_loss:5.1904 svd_entropy: attn_qk:H=0.6924,top10E=0.37,eRank=130.7,q75/q25=26.88 attn_vo:H=0.6838,top10E=0.29,eRank=152.3,q75/q25=inf mlp_w1:H=0.7607,top10E=0.31,eRank=173.3,q75/q25=7.26 mlp_w2:H=0.9544,top10E=0.05,eRank=567.9,q75/q25=4.04 vo_prod:H=0.5256,top10E=0.45,eRank=45.1,q75/q25=inf train_time:58560ms step_avg:73.20ms +[2025-09-02 05:41:02] [Rank 0] PRINT: step:800/10000 val_loss:5.1904 svd_entropy: attn_qk:H=0.6924,top10E=0.37,eRank=130.7,q75/q25=26.88 attn_vo:H=0.6838,top10E=0.29,eRank=152.3,q75/q25=inf mlp_w1:H=0.7607,top10E=0.31,eRank=173.3,q75/q25=7.26 mlp_w2:H=0.9544,top10E=0.05,eRank=567.9,q75/q25=4.04 vo_prod:H=0.5256,top10E=0.45,eRank=45.1,q75/q25=inf train_time:58560ms step_avg:73.20ms +[2025-09-02 05:41:02] [Rank 0] step:801/10000 train_time:58574ms step_avg:73.13ms +[2025-09-02 05:41:02] [Rank 0] step:801/10000 train_time:58574ms step_avg:73.13ms +[2025-09-02 05:41:04] [Rank 0] step:821/10000 train_time:59992ms step_avg:73.07ms +[2025-09-02 05:41:04] [Rank 0] step:821/10000 train_time:59992ms step_avg:73.07ms +[2025-09-02 05:41:05] [Rank 0] step:841/10000 train_time:61462ms step_avg:73.08ms +[2025-09-02 05:41:05] [Rank 0] step:841/10000 train_time:61462ms step_avg:73.08ms +[2025-09-02 05:41:07] [Rank 0] step:861/10000 train_time:62933ms step_avg:73.09ms +[2025-09-02 05:41:07] [Rank 0] step:861/10000 train_time:62933ms step_avg:73.09ms +[2025-09-02 05:41:08] [Rank 0] step:881/10000 train_time:64405ms step_avg:73.10ms +[2025-09-02 05:41:08] [Rank 0] step:881/10000 train_time:64405ms step_avg:73.10ms +[2025-09-02 05:41:10] [Rank 0] step:901/10000 train_time:65876ms step_avg:73.11ms +[2025-09-02 05:41:10] [Rank 0] step:901/10000 train_time:65876ms step_avg:73.11ms +[2025-09-02 05:41:11] [Rank 0] step:921/10000 train_time:67348ms step_avg:73.12ms +[2025-09-02 05:41:11] [Rank 0] step:921/10000 train_time:67348ms step_avg:73.12ms +[2025-09-02 05:41:13] [Rank 0] step:941/10000 train_time:68819ms step_avg:73.13ms +[2025-09-02 05:41:13] [Rank 0] step:941/10000 train_time:68819ms step_avg:73.13ms +[2025-09-02 05:41:14] [Rank 0] step:961/10000 train_time:70293ms step_avg:73.15ms +[2025-09-02 05:41:14] [Rank 0] step:961/10000 train_time:70293ms step_avg:73.15ms +[2025-09-02 05:41:15] [Rank 0] step:981/10000 train_time:71766ms step_avg:73.16ms +[2025-09-02 05:41:15] [Rank 0] step:981/10000 train_time:71766ms step_avg:73.16ms +[2025-09-02 05:41:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:41:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:41:29] [Rank 0] PRINT: step:1000/10000 val_loss:5.0120 svd_entropy: attn_qk:H=0.7069,top10E=0.34,eRank=138.4,q75/q25=33.26 attn_vo:H=0.7058,top10E=0.26,eRank=169.5,q75/q25=inf mlp_w1:H=0.7855,top10E=0.28,eRank=199.0,q75/q25=7.56 mlp_w2:H=0.9591,top10E=0.05,eRank=585.8,q75/q25=3.69 vo_prod:H=0.5504,top10E=0.40,eRank=53.1,q75/q25=inf train_time:73333ms step_avg:73.33ms +[2025-09-02 05:41:29] [Rank 0] PRINT: step:1000/10000 val_loss:5.0120 svd_entropy: attn_qk:H=0.7069,top10E=0.34,eRank=138.4,q75/q25=33.26 attn_vo:H=0.7058,top10E=0.26,eRank=169.5,q75/q25=inf mlp_w1:H=0.7855,top10E=0.28,eRank=199.0,q75/q25=7.56 mlp_w2:H=0.9591,top10E=0.05,eRank=585.8,q75/q25=3.69 vo_prod:H=0.5504,top10E=0.40,eRank=53.1,q75/q25=inf train_time:73333ms step_avg:73.33ms +[2025-09-02 05:41:29] [Rank 0] step:1001/10000 train_time:73348ms step_avg:73.27ms +[2025-09-02 05:41:29] [Rank 0] step:1001/10000 train_time:73348ms step_avg:73.27ms +[2025-09-02 05:41:30] [Rank 0] step:1021/10000 train_time:74761ms step_avg:73.22ms +[2025-09-02 05:41:30] [Rank 0] step:1021/10000 train_time:74761ms step_avg:73.22ms +[2025-09-02 05:41:32] [Rank 0] step:1041/10000 train_time:76233ms step_avg:73.23ms +[2025-09-02 05:41:32] [Rank 0] step:1041/10000 train_time:76233ms step_avg:73.23ms +[2025-09-02 05:41:33] [Rank 0] step:1061/10000 train_time:77757ms step_avg:73.29ms +[2025-09-02 05:41:33] [Rank 0] step:1061/10000 train_time:77757ms step_avg:73.29ms +[2025-09-02 05:41:35] [Rank 0] step:1081/10000 train_time:79229ms step_avg:73.29ms +[2025-09-02 05:41:35] [Rank 0] step:1081/10000 train_time:79229ms step_avg:73.29ms +[2025-09-02 05:41:36] [Rank 0] step:1101/10000 train_time:80701ms step_avg:73.30ms +[2025-09-02 05:41:36] [Rank 0] step:1101/10000 train_time:80701ms step_avg:73.30ms +[2025-09-02 05:41:38] [Rank 0] step:1121/10000 train_time:82174ms step_avg:73.30ms +[2025-09-02 05:41:38] [Rank 0] step:1121/10000 train_time:82174ms step_avg:73.30ms +[2025-09-02 05:41:39] [Rank 0] step:1141/10000 train_time:83645ms step_avg:73.31ms +[2025-09-02 05:41:39] [Rank 0] step:1141/10000 train_time:83645ms step_avg:73.31ms +[2025-09-02 05:41:41] [Rank 0] step:1161/10000 train_time:85116ms step_avg:73.31ms +[2025-09-02 05:41:41] [Rank 0] step:1161/10000 train_time:85116ms step_avg:73.31ms +[2025-09-02 05:41:42] [Rank 0] step:1181/10000 train_time:86589ms step_avg:73.32ms +[2025-09-02 05:41:42] [Rank 0] step:1181/10000 train_time:86589ms step_avg:73.32ms +[2025-09-02 05:41:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:41:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:41:55] [Rank 0] PRINT: step:1200/10000 val_loss:4.8331 svd_entropy: attn_qk:H=0.7179,top10E=0.32,eRank=145.0,q75/q25=39.38 attn_vo:H=0.7236,top10E=0.23,eRank=185.5,q75/q25=inf mlp_w1:H=0.8058,top10E=0.25,eRank=223.5,q75/q25=7.51 mlp_w2:H=0.9622,top10E=0.04,eRank=597.9,q75/q25=3.46 vo_prod:H=0.5694,top10E=0.37,eRank=60.4,q75/q25=inf train_time:88138ms step_avg:73.45ms +[2025-09-02 05:41:55] [Rank 0] PRINT: step:1200/10000 val_loss:4.8331 svd_entropy: attn_qk:H=0.7179,top10E=0.32,eRank=145.0,q75/q25=39.38 attn_vo:H=0.7236,top10E=0.23,eRank=185.5,q75/q25=inf mlp_w1:H=0.8058,top10E=0.25,eRank=223.5,q75/q25=7.51 mlp_w2:H=0.9622,top10E=0.04,eRank=597.9,q75/q25=3.46 vo_prod:H=0.5694,top10E=0.37,eRank=60.4,q75/q25=inf train_time:88138ms step_avg:73.45ms +[2025-09-02 05:41:56] [Rank 0] step:1201/10000 train_time:88152ms step_avg:73.40ms +[2025-09-02 05:41:56] [Rank 0] step:1201/10000 train_time:88152ms step_avg:73.40ms +[2025-09-02 05:41:57] [Rank 0] step:1221/10000 train_time:89558ms step_avg:73.35ms +[2025-09-02 05:41:57] [Rank 0] step:1221/10000 train_time:89558ms step_avg:73.35ms +[2025-09-02 05:41:59] [Rank 0] step:1241/10000 train_time:91029ms step_avg:73.35ms +[2025-09-02 05:41:59] [Rank 0] step:1241/10000 train_time:91029ms step_avg:73.35ms +[2025-09-02 05:42:00] [Rank 0] step:1261/10000 train_time:92499ms step_avg:73.35ms +[2025-09-02 05:42:00] [Rank 0] step:1261/10000 train_time:92499ms step_avg:73.35ms +[2025-09-02 05:42:01] [Rank 0] step:1281/10000 train_time:93969ms step_avg:73.36ms +[2025-09-02 05:42:01] [Rank 0] step:1281/10000 train_time:93969ms step_avg:73.36ms +[2025-09-02 05:42:03] [Rank 0] step:1301/10000 train_time:95442ms step_avg:73.36ms +[2025-09-02 05:42:03] [Rank 0] step:1301/10000 train_time:95442ms step_avg:73.36ms +[2025-09-02 05:42:04] [Rank 0] step:1321/10000 train_time:96914ms step_avg:73.36ms +[2025-09-02 05:42:04] [Rank 0] step:1321/10000 train_time:96914ms step_avg:73.36ms +[2025-09-02 05:42:06] [Rank 0] step:1341/10000 train_time:98386ms step_avg:73.37ms +[2025-09-02 05:42:06] [Rank 0] step:1341/10000 train_time:98386ms step_avg:73.37ms +[2025-09-02 05:42:07] [Rank 0] step:1361/10000 train_time:99859ms step_avg:73.37ms +[2025-09-02 05:42:07] [Rank 0] step:1361/10000 train_time:99859ms step_avg:73.37ms +[2025-09-02 05:42:09] [Rank 0] step:1381/10000 train_time:101333ms step_avg:73.38ms +[2025-09-02 05:42:09] [Rank 0] step:1381/10000 train_time:101333ms step_avg:73.38ms +[2025-09-02 05:42:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:42:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:42:22] [Rank 0] PRINT: step:1400/10000 val_loss:4.7122 svd_entropy: attn_qk:H=0.7266,top10E=0.31,eRank=150.8,q75/q25=44.31 attn_vo:H=0.7375,top10E=0.21,eRank=199.4,q75/q25=inf mlp_w1:H=0.8214,top10E=0.23,eRank=245.0,q75/q25=7.33 mlp_w2:H=0.9644,top10E=0.04,eRank=606.5,q75/q25=3.32 vo_prod:H=0.5843,top10E=0.34,eRank=67.0,q75/q25=inf train_time:102879ms step_avg:73.48ms +[2025-09-02 05:42:22] [Rank 0] PRINT: step:1400/10000 val_loss:4.7122 svd_entropy: attn_qk:H=0.7266,top10E=0.31,eRank=150.8,q75/q25=44.31 attn_vo:H=0.7375,top10E=0.21,eRank=199.4,q75/q25=inf mlp_w1:H=0.8214,top10E=0.23,eRank=245.0,q75/q25=7.33 mlp_w2:H=0.9644,top10E=0.04,eRank=606.5,q75/q25=3.32 vo_prod:H=0.5843,top10E=0.34,eRank=67.0,q75/q25=inf train_time:102879ms step_avg:73.48ms +[2025-09-02 05:42:22] [Rank 0] step:1401/10000 train_time:102893ms step_avg:73.44ms +[2025-09-02 05:42:22] [Rank 0] step:1401/10000 train_time:102893ms step_avg:73.44ms +[2025-09-02 05:42:24] [Rank 0] step:1421/10000 train_time:104302ms step_avg:73.40ms +[2025-09-02 05:42:24] [Rank 0] step:1421/10000 train_time:104302ms step_avg:73.40ms +[2025-09-02 05:42:25] [Rank 0] step:1441/10000 train_time:105771ms step_avg:73.40ms +[2025-09-02 05:42:25] [Rank 0] step:1441/10000 train_time:105771ms step_avg:73.40ms +[2025-09-02 05:42:27] [Rank 0] step:1461/10000 train_time:107242ms step_avg:73.40ms +[2025-09-02 05:42:27] [Rank 0] step:1461/10000 train_time:107242ms step_avg:73.40ms +[2025-09-02 05:42:28] [Rank 0] step:1481/10000 train_time:108715ms step_avg:73.41ms +[2025-09-02 05:42:28] [Rank 0] step:1481/10000 train_time:108715ms step_avg:73.41ms +[2025-09-02 05:42:30] [Rank 0] step:1501/10000 train_time:110196ms step_avg:73.42ms +[2025-09-02 05:42:30] [Rank 0] step:1501/10000 train_time:110196ms step_avg:73.42ms +[2025-09-02 05:42:31] [Rank 0] step:1521/10000 train_time:111680ms step_avg:73.43ms +[2025-09-02 05:42:31] [Rank 0] step:1521/10000 train_time:111680ms step_avg:73.43ms +[2025-09-02 05:42:33] [Rank 0] step:1541/10000 train_time:113164ms step_avg:73.44ms +[2025-09-02 05:42:33] [Rank 0] step:1541/10000 train_time:113164ms step_avg:73.44ms +[2025-09-02 05:42:34] [Rank 0] step:1561/10000 train_time:114648ms step_avg:73.45ms +[2025-09-02 05:42:34] [Rank 0] step:1561/10000 train_time:114648ms step_avg:73.45ms +[2025-09-02 05:42:36] [Rank 0] step:1581/10000 train_time:116129ms step_avg:73.45ms +[2025-09-02 05:42:36] [Rank 0] step:1581/10000 train_time:116129ms step_avg:73.45ms +[2025-09-02 05:42:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:42:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:42:49] [Rank 0] PRINT: step:1600/10000 val_loss:4.5890 svd_entropy: attn_qk:H=0.7334,top10E=0.30,eRank=155.4,q75/q25=48.58 attn_vo:H=0.7487,top10E=0.20,eRank=211.5,q75/q25=inf mlp_w1:H=0.8338,top10E=0.22,eRank=263.9,q75/q25=7.10 mlp_w2:H=0.9660,top10E=0.04,eRank=612.8,q75/q25=3.22 vo_prod:H=0.5972,top10E=0.32,eRank=73.2,q75/q25=inf train_time:117734ms step_avg:73.58ms +[2025-09-02 05:42:49] [Rank 0] PRINT: step:1600/10000 val_loss:4.5890 svd_entropy: attn_qk:H=0.7334,top10E=0.30,eRank=155.4,q75/q25=48.58 attn_vo:H=0.7487,top10E=0.20,eRank=211.5,q75/q25=inf mlp_w1:H=0.8338,top10E=0.22,eRank=263.9,q75/q25=7.10 mlp_w2:H=0.9660,top10E=0.04,eRank=612.8,q75/q25=3.22 vo_prod:H=0.5972,top10E=0.32,eRank=73.2,q75/q25=inf train_time:117734ms step_avg:73.58ms +[2025-09-02 05:42:49] [Rank 0] step:1601/10000 train_time:117749ms step_avg:73.55ms +[2025-09-02 05:42:49] [Rank 0] step:1601/10000 train_time:117749ms step_avg:73.55ms +[2025-09-02 05:42:50] [Rank 0] step:1621/10000 train_time:119155ms step_avg:73.51ms +[2025-09-02 05:42:50] [Rank 0] step:1621/10000 train_time:119155ms step_avg:73.51ms +[2025-09-02 05:42:52] [Rank 0] step:1641/10000 train_time:120642ms step_avg:73.52ms +[2025-09-02 05:42:52] [Rank 0] step:1641/10000 train_time:120642ms step_avg:73.52ms +[2025-09-02 05:42:53] [Rank 0] step:1661/10000 train_time:122124ms step_avg:73.52ms +[2025-09-02 05:42:53] [Rank 0] step:1661/10000 train_time:122124ms step_avg:73.52ms +[2025-09-02 05:42:55] [Rank 0] step:1681/10000 train_time:123607ms step_avg:73.53ms +[2025-09-02 05:42:55] [Rank 0] step:1681/10000 train_time:123607ms step_avg:73.53ms +[2025-09-02 05:42:56] [Rank 0] step:1701/10000 train_time:125091ms step_avg:73.54ms +[2025-09-02 05:42:56] [Rank 0] step:1701/10000 train_time:125091ms step_avg:73.54ms +[2025-09-02 05:42:58] [Rank 0] step:1721/10000 train_time:126575ms step_avg:73.55ms +[2025-09-02 05:42:58] [Rank 0] step:1721/10000 train_time:126575ms step_avg:73.55ms +[2025-09-02 05:42:59] [Rank 0] step:1741/10000 train_time:128059ms step_avg:73.55ms +[2025-09-02 05:42:59] [Rank 0] step:1741/10000 train_time:128059ms step_avg:73.55ms +[2025-09-02 05:43:01] [Rank 0] step:1761/10000 train_time:129547ms step_avg:73.56ms +[2025-09-02 05:43:01] [Rank 0] step:1761/10000 train_time:129547ms step_avg:73.56ms +[2025-09-02 05:43:02] [Rank 0] step:1781/10000 train_time:131033ms step_avg:73.57ms +[2025-09-02 05:43:02] [Rank 0] step:1781/10000 train_time:131033ms step_avg:73.57ms +[2025-09-02 05:43:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:43:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:43:16] [Rank 0] PRINT: step:1800/10000 val_loss:4.4941 svd_entropy: attn_qk:H=0.7393,top10E=0.30,eRank=159.5,q75/q25=51.73 attn_vo:H=0.7579,top10E=0.19,eRank=222.1,q75/q25=inf mlp_w1:H=0.8437,top10E=0.21,eRank=280.2,q75/q25=6.82 mlp_w2:H=0.9671,top10E=0.04,eRank=617.5,q75/q25=3.14 vo_prod:H=0.6079,top10E=0.30,eRank=79.0,q75/q25=inf train_time:132594ms step_avg:73.66ms +[2025-09-02 05:43:16] [Rank 0] PRINT: step:1800/10000 val_loss:4.4941 svd_entropy: attn_qk:H=0.7393,top10E=0.30,eRank=159.5,q75/q25=51.73 attn_vo:H=0.7579,top10E=0.19,eRank=222.1,q75/q25=inf mlp_w1:H=0.8437,top10E=0.21,eRank=280.2,q75/q25=6.82 mlp_w2:H=0.9671,top10E=0.04,eRank=617.5,q75/q25=3.14 vo_prod:H=0.6079,top10E=0.30,eRank=79.0,q75/q25=inf train_time:132594ms step_avg:73.66ms +[2025-09-02 05:43:16] [Rank 0] step:1801/10000 train_time:132608ms step_avg:73.63ms +[2025-09-02 05:43:16] [Rank 0] step:1801/10000 train_time:132608ms step_avg:73.63ms +[2025-09-02 05:43:17] [Rank 0] step:1821/10000 train_time:134026ms step_avg:73.60ms +[2025-09-02 05:43:17] [Rank 0] step:1821/10000 train_time:134026ms step_avg:73.60ms +[2025-09-02 05:43:19] [Rank 0] step:1841/10000 train_time:135507ms step_avg:73.61ms +[2025-09-02 05:43:19] [Rank 0] step:1841/10000 train_time:135507ms step_avg:73.61ms +[2025-09-02 05:43:20] [Rank 0] step:1861/10000 train_time:136990ms step_avg:73.61ms +[2025-09-02 05:43:20] [Rank 0] step:1861/10000 train_time:136990ms step_avg:73.61ms +[2025-09-02 05:43:22] [Rank 0] step:1881/10000 train_time:138473ms step_avg:73.62ms +[2025-09-02 05:43:22] [Rank 0] step:1881/10000 train_time:138473ms step_avg:73.62ms +[2025-09-02 05:43:23] [Rank 0] step:1901/10000 train_time:139957ms step_avg:73.62ms +[2025-09-02 05:43:23] [Rank 0] step:1901/10000 train_time:139957ms step_avg:73.62ms +[2025-09-02 05:43:25] [Rank 0] step:1921/10000 train_time:141442ms step_avg:73.63ms +[2025-09-02 05:43:25] [Rank 0] step:1921/10000 train_time:141442ms step_avg:73.63ms +[2025-09-02 05:43:26] [Rank 0] step:1941/10000 train_time:142926ms step_avg:73.64ms +[2025-09-02 05:43:26] [Rank 0] step:1941/10000 train_time:142926ms step_avg:73.64ms +[2025-09-02 05:43:28] [Rank 0] step:1961/10000 train_time:144411ms step_avg:73.64ms +[2025-09-02 05:43:28] [Rank 0] step:1961/10000 train_time:144411ms step_avg:73.64ms +[2025-09-02 05:43:29] [Rank 0] step:1981/10000 train_time:145896ms step_avg:73.65ms +[2025-09-02 05:43:29] [Rank 0] step:1981/10000 train_time:145896ms step_avg:73.65ms +[2025-09-02 05:43:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:43:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:43:42] [Rank 0] PRINT: step:2000/10000 val_loss:4.4380 svd_entropy: attn_qk:H=0.7442,top10E=0.29,eRank=163.2,q75/q25=54.40 attn_vo:H=0.7655,top10E=0.18,eRank=231.2,q75/q25=inf mlp_w1:H=0.8517,top10E=0.20,eRank=294.2,q75/q25=6.57 mlp_w2:H=0.9680,top10E=0.04,eRank=620.9,q75/q25=3.09 vo_prod:H=0.6172,top10E=0.29,eRank=84.3,q75/q25=inf train_time:147457ms step_avg:73.73ms +[2025-09-02 05:43:42] [Rank 0] PRINT: step:2000/10000 val_loss:4.4380 svd_entropy: attn_qk:H=0.7442,top10E=0.29,eRank=163.2,q75/q25=54.40 attn_vo:H=0.7655,top10E=0.18,eRank=231.2,q75/q25=inf mlp_w1:H=0.8517,top10E=0.20,eRank=294.2,q75/q25=6.57 mlp_w2:H=0.9680,top10E=0.04,eRank=620.9,q75/q25=3.09 vo_prod:H=0.6172,top10E=0.29,eRank=84.3,q75/q25=inf train_time:147457ms step_avg:73.73ms +[2025-09-02 05:43:42] [Rank 0] step:2001/10000 train_time:147472ms step_avg:73.70ms +[2025-09-02 05:43:42] [Rank 0] step:2001/10000 train_time:147472ms step_avg:73.70ms +[2025-09-02 05:43:44] [Rank 0] step:2021/10000 train_time:148905ms step_avg:73.68ms +[2025-09-02 05:43:44] [Rank 0] step:2021/10000 train_time:148905ms step_avg:73.68ms +[2025-09-02 05:43:45] [Rank 0] step:2041/10000 train_time:150565ms step_avg:73.77ms +[2025-09-02 05:43:45] [Rank 0] step:2041/10000 train_time:150565ms step_avg:73.77ms +[2025-09-02 05:43:47] [Rank 0] step:2061/10000 train_time:152048ms step_avg:73.77ms +[2025-09-02 05:43:47] [Rank 0] step:2061/10000 train_time:152048ms step_avg:73.77ms +[2025-09-02 05:43:48] [Rank 0] step:2081/10000 train_time:153531ms step_avg:73.78ms +[2025-09-02 05:43:48] [Rank 0] step:2081/10000 train_time:153531ms step_avg:73.78ms +[2025-09-02 05:43:50] [Rank 0] step:2101/10000 train_time:155024ms step_avg:73.79ms +[2025-09-02 05:43:50] [Rank 0] step:2101/10000 train_time:155024ms step_avg:73.79ms +[2025-09-02 05:43:51] [Rank 0] step:2121/10000 train_time:156510ms step_avg:73.79ms +[2025-09-02 05:43:51] [Rank 0] step:2121/10000 train_time:156510ms step_avg:73.79ms +[2025-09-02 05:43:53] [Rank 0] step:2141/10000 train_time:157993ms step_avg:73.79ms +[2025-09-02 05:43:53] [Rank 0] step:2141/10000 train_time:157993ms step_avg:73.79ms +[2025-09-02 05:43:54] [Rank 0] step:2161/10000 train_time:159477ms step_avg:73.80ms +[2025-09-02 05:43:54] [Rank 0] step:2161/10000 train_time:159477ms step_avg:73.80ms +[2025-09-02 05:43:56] [Rank 0] step:2181/10000 train_time:160961ms step_avg:73.80ms +[2025-09-02 05:43:56] [Rank 0] step:2181/10000 train_time:160961ms step_avg:73.80ms +[2025-09-02 05:43:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:43:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:44:09] [Rank 0] PRINT: step:2200/10000 val_loss:4.3719 svd_entropy: attn_qk:H=0.7484,top10E=0.28,eRank=166.4,q75/q25=55.82 attn_vo:H=0.7718,top10E=0.17,eRank=238.9,q75/q25=inf mlp_w1:H=0.8582,top10E=0.19,eRank=306.2,q75/q25=6.36 mlp_w2:H=0.9686,top10E=0.04,eRank=623.5,q75/q25=3.05 vo_prod:H=0.6247,top10E=0.27,eRank=89.1,q75/q25=inf train_time:162521ms step_avg:73.87ms +[2025-09-02 05:44:09] [Rank 0] PRINT: step:2200/10000 val_loss:4.3719 svd_entropy: attn_qk:H=0.7484,top10E=0.28,eRank=166.4,q75/q25=55.82 attn_vo:H=0.7718,top10E=0.17,eRank=238.9,q75/q25=inf mlp_w1:H=0.8582,top10E=0.19,eRank=306.2,q75/q25=6.36 mlp_w2:H=0.9686,top10E=0.04,eRank=623.5,q75/q25=3.05 vo_prod:H=0.6247,top10E=0.27,eRank=89.1,q75/q25=inf train_time:162521ms step_avg:73.87ms +[2025-09-02 05:44:09] [Rank 0] step:2201/10000 train_time:162535ms step_avg:73.85ms +[2025-09-02 05:44:09] [Rank 0] step:2201/10000 train_time:162535ms step_avg:73.85ms +[2025-09-02 05:44:11] [Rank 0] step:2221/10000 train_time:163965ms step_avg:73.82ms +[2025-09-02 05:44:11] [Rank 0] step:2221/10000 train_time:163965ms step_avg:73.82ms +[2025-09-02 05:44:12] [Rank 0] step:2241/10000 train_time:165482ms step_avg:73.84ms +[2025-09-02 05:44:12] [Rank 0] step:2241/10000 train_time:165482ms step_avg:73.84ms +[2025-09-02 05:44:14] [Rank 0] step:2261/10000 train_time:167008ms step_avg:73.86ms +[2025-09-02 05:44:14] [Rank 0] step:2261/10000 train_time:167008ms step_avg:73.86ms +[2025-09-02 05:44:15] [Rank 0] step:2281/10000 train_time:168534ms step_avg:73.89ms +[2025-09-02 05:44:15] [Rank 0] step:2281/10000 train_time:168534ms step_avg:73.89ms +[2025-09-02 05:44:17] [Rank 0] step:2301/10000 train_time:170060ms step_avg:73.91ms +[2025-09-02 05:44:17] [Rank 0] step:2301/10000 train_time:170060ms step_avg:73.91ms +[2025-09-02 05:44:18] [Rank 0] step:2321/10000 train_time:171588ms step_avg:73.93ms +[2025-09-02 05:44:18] [Rank 0] step:2321/10000 train_time:171588ms step_avg:73.93ms +[2025-09-02 05:44:20] [Rank 0] step:2341/10000 train_time:173116ms step_avg:73.95ms +[2025-09-02 05:44:20] [Rank 0] step:2341/10000 train_time:173116ms step_avg:73.95ms +[2025-09-02 05:44:21] [Rank 0] step:2361/10000 train_time:174644ms step_avg:73.97ms +[2025-09-02 05:44:21] [Rank 0] step:2361/10000 train_time:174644ms step_avg:73.97ms +[2025-09-02 05:44:23] [Rank 0] step:2381/10000 train_time:176172ms step_avg:73.99ms +[2025-09-02 05:44:23] [Rank 0] step:2381/10000 train_time:176172ms step_avg:73.99ms +[2025-09-02 05:44:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:44:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:44:36] [Rank 0] PRINT: step:2400/10000 val_loss:4.2961 svd_entropy: attn_qk:H=0.7514,top10E=0.28,eRank=168.7,q75/q25=56.49 attn_vo:H=0.7771,top10E=0.16,eRank=245.7,q75/q25=inf mlp_w1:H=0.8638,top10E=0.19,eRank=317.2,q75/q25=6.14 mlp_w2:H=0.9691,top10E=0.04,eRank=625.4,q75/q25=3.02 vo_prod:H=0.6316,top10E=0.26,eRank=93.7,q75/q25=inf train_time:177778ms step_avg:74.07ms +[2025-09-02 05:44:36] [Rank 0] PRINT: step:2400/10000 val_loss:4.2961 svd_entropy: attn_qk:H=0.7514,top10E=0.28,eRank=168.7,q75/q25=56.49 attn_vo:H=0.7771,top10E=0.16,eRank=245.7,q75/q25=inf mlp_w1:H=0.8638,top10E=0.19,eRank=317.2,q75/q25=6.14 mlp_w2:H=0.9691,top10E=0.04,eRank=625.4,q75/q25=3.02 vo_prod:H=0.6316,top10E=0.26,eRank=93.7,q75/q25=inf train_time:177778ms step_avg:74.07ms +[2025-09-02 05:44:36] [Rank 0] step:2401/10000 train_time:177792ms step_avg:74.05ms +[2025-09-02 05:44:36] [Rank 0] step:2401/10000 train_time:177792ms step_avg:74.05ms +[2025-09-02 05:44:38] [Rank 0] step:2421/10000 train_time:179258ms step_avg:74.04ms +[2025-09-02 05:44:38] [Rank 0] step:2421/10000 train_time:179258ms step_avg:74.04ms +[2025-09-02 05:44:39] [Rank 0] step:2441/10000 train_time:180785ms step_avg:74.06ms +[2025-09-02 05:44:39] [Rank 0] step:2441/10000 train_time:180785ms step_avg:74.06ms +[2025-09-02 05:44:41] [Rank 0] step:2461/10000 train_time:182314ms step_avg:74.08ms +[2025-09-02 05:44:41] [Rank 0] step:2461/10000 train_time:182314ms step_avg:74.08ms +[2025-09-02 05:44:42] [Rank 0] step:2481/10000 train_time:183840ms step_avg:74.10ms +[2025-09-02 05:44:42] [Rank 0] step:2481/10000 train_time:183840ms step_avg:74.10ms +[2025-09-02 05:44:44] [Rank 0] step:2501/10000 train_time:185419ms step_avg:74.14ms +[2025-09-02 05:44:44] [Rank 0] step:2501/10000 train_time:185419ms step_avg:74.14ms +[2025-09-02 05:44:45] [Rank 0] step:2521/10000 train_time:186947ms step_avg:74.16ms +[2025-09-02 05:44:45] [Rank 0] step:2521/10000 train_time:186947ms step_avg:74.16ms +[2025-09-02 05:44:47] [Rank 0] step:2541/10000 train_time:188474ms step_avg:74.17ms +[2025-09-02 05:44:47] [Rank 0] step:2541/10000 train_time:188474ms step_avg:74.17ms +[2025-09-02 05:44:48] [Rank 0] step:2561/10000 train_time:190002ms step_avg:74.19ms +[2025-09-02 05:44:48] [Rank 0] step:2561/10000 train_time:190002ms step_avg:74.19ms +[2025-09-02 05:44:50] [Rank 0] step:2581/10000 train_time:191532ms step_avg:74.21ms +[2025-09-02 05:44:50] [Rank 0] step:2581/10000 train_time:191532ms step_avg:74.21ms +[2025-09-02 05:44:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:44:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:45:03] [Rank 0] PRINT: step:2600/10000 val_loss:4.2468 svd_entropy: attn_qk:H=0.7546,top10E=0.28,eRank=171.3,q75/q25=57.46 attn_vo:H=0.7818,top10E=0.16,eRank=251.8,q75/q25=inf mlp_w1:H=0.8687,top10E=0.18,eRank=327.0,q75/q25=5.97 mlp_w2:H=0.9694,top10E=0.04,eRank=626.8,q75/q25=3.00 vo_prod:H=0.6378,top10E=0.25,eRank=98.0,q75/q25=inf train_time:193139ms step_avg:74.28ms +[2025-09-02 05:45:03] [Rank 0] PRINT: step:2600/10000 val_loss:4.2468 svd_entropy: attn_qk:H=0.7546,top10E=0.28,eRank=171.3,q75/q25=57.46 attn_vo:H=0.7818,top10E=0.16,eRank=251.8,q75/q25=inf mlp_w1:H=0.8687,top10E=0.18,eRank=327.0,q75/q25=5.97 mlp_w2:H=0.9694,top10E=0.04,eRank=626.8,q75/q25=3.00 vo_prod:H=0.6378,top10E=0.25,eRank=98.0,q75/q25=inf train_time:193139ms step_avg:74.28ms +[2025-09-02 05:45:03] [Rank 0] step:2601/10000 train_time:193153ms step_avg:74.26ms +[2025-09-02 05:45:03] [Rank 0] step:2601/10000 train_time:193153ms step_avg:74.26ms +[2025-09-02 05:45:05] [Rank 0] step:2621/10000 train_time:194614ms step_avg:74.25ms +[2025-09-02 05:45:05] [Rank 0] step:2621/10000 train_time:194614ms step_avg:74.25ms +[2025-09-02 05:45:06] [Rank 0] step:2641/10000 train_time:196142ms step_avg:74.27ms +[2025-09-02 05:45:06] [Rank 0] step:2641/10000 train_time:196142ms step_avg:74.27ms +[2025-09-02 05:45:08] [Rank 0] step:2661/10000 train_time:197670ms step_avg:74.28ms +[2025-09-02 05:45:08] [Rank 0] step:2661/10000 train_time:197670ms step_avg:74.28ms +[2025-09-02 05:45:09] [Rank 0] step:2681/10000 train_time:199199ms step_avg:74.30ms +[2025-09-02 05:45:09] [Rank 0] step:2681/10000 train_time:199199ms step_avg:74.30ms +[2025-09-02 05:45:11] [Rank 0] step:2701/10000 train_time:200728ms step_avg:74.32ms +[2025-09-02 05:45:11] [Rank 0] step:2701/10000 train_time:200728ms step_avg:74.32ms +[2025-09-02 05:45:12] [Rank 0] step:2721/10000 train_time:202257ms step_avg:74.33ms +[2025-09-02 05:45:12] [Rank 0] step:2721/10000 train_time:202257ms step_avg:74.33ms +[2025-09-02 05:45:14] [Rank 0] step:2741/10000 train_time:203787ms step_avg:74.35ms +[2025-09-02 05:45:14] [Rank 0] step:2741/10000 train_time:203787ms step_avg:74.35ms +[2025-09-02 05:45:16] [Rank 0] step:2761/10000 train_time:205317ms step_avg:74.36ms +[2025-09-02 05:45:16] [Rank 0] step:2761/10000 train_time:205317ms step_avg:74.36ms +[2025-09-02 05:45:17] [Rank 0] step:2781/10000 train_time:206847ms step_avg:74.38ms +[2025-09-02 05:45:17] [Rank 0] step:2781/10000 train_time:206847ms step_avg:74.38ms +[2025-09-02 05:45:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:45:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:45:30] [Rank 0] PRINT: step:2800/10000 val_loss:4.2107 svd_entropy: attn_qk:H=0.7576,top10E=0.27,eRank=173.8,q75/q25=57.64 attn_vo:H=0.7860,top10E=0.15,eRank=257.3,q75/q25=inf mlp_w1:H=0.8730,top10E=0.18,eRank=335.9,q75/q25=5.80 mlp_w2:H=0.9697,top10E=0.04,eRank=627.9,q75/q25=2.98 vo_prod:H=0.6432,top10E=0.25,eRank=102.0,q75/q25=inf train_time:208457ms step_avg:74.45ms +[2025-09-02 05:45:30] [Rank 0] PRINT: step:2800/10000 val_loss:4.2107 svd_entropy: attn_qk:H=0.7576,top10E=0.27,eRank=173.8,q75/q25=57.64 attn_vo:H=0.7860,top10E=0.15,eRank=257.3,q75/q25=inf mlp_w1:H=0.8730,top10E=0.18,eRank=335.9,q75/q25=5.80 mlp_w2:H=0.9697,top10E=0.04,eRank=627.9,q75/q25=2.98 vo_prod:H=0.6432,top10E=0.25,eRank=102.0,q75/q25=inf train_time:208457ms step_avg:74.45ms +[2025-09-02 05:45:30] [Rank 0] step:2801/10000 train_time:208471ms step_avg:74.43ms +[2025-09-02 05:45:30] [Rank 0] step:2801/10000 train_time:208471ms step_avg:74.43ms +[2025-09-02 05:45:32] [Rank 0] step:2821/10000 train_time:209925ms step_avg:74.42ms +[2025-09-02 05:45:32] [Rank 0] step:2821/10000 train_time:209925ms step_avg:74.42ms +[2025-09-02 05:45:33] [Rank 0] step:2841/10000 train_time:211453ms step_avg:74.43ms +[2025-09-02 05:45:33] [Rank 0] step:2841/10000 train_time:211453ms step_avg:74.43ms +[2025-09-02 05:45:35] [Rank 0] step:2861/10000 train_time:212979ms step_avg:74.44ms +[2025-09-02 05:45:35] [Rank 0] step:2861/10000 train_time:212979ms step_avg:74.44ms +[2025-09-02 05:45:36] [Rank 0] step:2881/10000 train_time:214507ms step_avg:74.46ms +[2025-09-02 05:45:36] [Rank 0] step:2881/10000 train_time:214507ms step_avg:74.46ms +[2025-09-02 05:45:38] [Rank 0] step:2901/10000 train_time:216033ms step_avg:74.47ms +[2025-09-02 05:45:38] [Rank 0] step:2901/10000 train_time:216033ms step_avg:74.47ms +[2025-09-02 05:45:40] [Rank 0] step:2921/10000 train_time:217560ms step_avg:74.48ms +[2025-09-02 05:45:40] [Rank 0] step:2921/10000 train_time:217560ms step_avg:74.48ms +[2025-09-02 05:45:41] [Rank 0] step:2941/10000 train_time:219089ms step_avg:74.49ms +[2025-09-02 05:45:41] [Rank 0] step:2941/10000 train_time:219089ms step_avg:74.49ms +[2025-09-02 05:45:43] [Rank 0] step:2961/10000 train_time:220618ms step_avg:74.51ms +[2025-09-02 05:45:43] [Rank 0] step:2961/10000 train_time:220618ms step_avg:74.51ms +[2025-09-02 05:45:44] [Rank 0] step:2981/10000 train_time:222153ms step_avg:74.52ms +[2025-09-02 05:45:44] [Rank 0] step:2981/10000 train_time:222153ms step_avg:74.52ms +[2025-09-02 05:45:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:45:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:45:57] [Rank 0] PRINT: step:3000/10000 val_loss:4.1674 svd_entropy: attn_qk:H=0.7603,top10E=0.27,eRank=176.1,q75/q25=57.76 attn_vo:H=0.7896,top10E=0.15,eRank=262.4,q75/q25=inf mlp_w1:H=0.8767,top10E=0.17,eRank=343.7,q75/q25=5.66 mlp_w2:H=0.9699,top10E=0.04,eRank=628.9,q75/q25=2.97 vo_prod:H=0.6482,top10E=0.24,eRank=106.0,q75/q25=inf train_time:223766ms step_avg:74.59ms +[2025-09-02 05:45:57] [Rank 0] PRINT: step:3000/10000 val_loss:4.1674 svd_entropy: attn_qk:H=0.7603,top10E=0.27,eRank=176.1,q75/q25=57.76 attn_vo:H=0.7896,top10E=0.15,eRank=262.4,q75/q25=inf mlp_w1:H=0.8767,top10E=0.17,eRank=343.7,q75/q25=5.66 mlp_w2:H=0.9699,top10E=0.04,eRank=628.9,q75/q25=2.97 vo_prod:H=0.6482,top10E=0.24,eRank=106.0,q75/q25=inf train_time:223766ms step_avg:74.59ms +[2025-09-02 05:45:57] [Rank 0] step:3001/10000 train_time:223780ms step_avg:74.57ms +[2025-09-02 05:45:57] [Rank 0] step:3001/10000 train_time:223780ms step_avg:74.57ms +[2025-09-02 05:45:59] [Rank 0] step:3021/10000 train_time:225242ms step_avg:74.56ms +[2025-09-02 05:45:59] [Rank 0] step:3021/10000 train_time:225242ms step_avg:74.56ms +[2025-09-02 05:46:00] [Rank 0] step:3041/10000 train_time:226776ms step_avg:74.57ms +[2025-09-02 05:46:00] [Rank 0] step:3041/10000 train_time:226776ms step_avg:74.57ms +[2025-09-02 05:46:02] [Rank 0] step:3061/10000 train_time:228311ms step_avg:74.59ms +[2025-09-02 05:46:02] [Rank 0] step:3061/10000 train_time:228311ms step_avg:74.59ms +[2025-09-02 05:46:03] [Rank 0] step:3081/10000 train_time:229846ms step_avg:74.60ms +[2025-09-02 05:46:03] [Rank 0] step:3081/10000 train_time:229846ms step_avg:74.60ms +[2025-09-02 05:46:05] [Rank 0] step:3101/10000 train_time:231382ms step_avg:74.62ms +[2025-09-02 05:46:05] [Rank 0] step:3101/10000 train_time:231382ms step_avg:74.62ms +[2025-09-02 05:46:06] [Rank 0] step:3121/10000 train_time:232916ms step_avg:74.63ms +[2025-09-02 05:46:06] [Rank 0] step:3121/10000 train_time:232916ms step_avg:74.63ms +[2025-09-02 05:46:08] [Rank 0] step:3141/10000 train_time:234452ms step_avg:74.64ms +[2025-09-02 05:46:08] [Rank 0] step:3141/10000 train_time:234452ms step_avg:74.64ms +[2025-09-02 05:46:10] [Rank 0] step:3161/10000 train_time:235989ms step_avg:74.66ms +[2025-09-02 05:46:10] [Rank 0] step:3161/10000 train_time:235989ms step_avg:74.66ms +[2025-09-02 05:46:11] [Rank 0] step:3181/10000 train_time:237527ms step_avg:74.67ms +[2025-09-02 05:46:11] [Rank 0] step:3181/10000 train_time:237527ms step_avg:74.67ms +[2025-09-02 05:46:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:46:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:46:24] [Rank 0] PRINT: step:3200/10000 val_loss:4.1324 svd_entropy: attn_qk:H=0.7626,top10E=0.27,eRank=178.2,q75/q25=58.04 attn_vo:H=0.7928,top10E=0.15,eRank=266.8,q75/q25=inf mlp_w1:H=0.8800,top10E=0.17,eRank=351.0,q75/q25=5.53 mlp_w2:H=0.9701,top10E=0.04,eRank=629.6,q75/q25=2.95 vo_prod:H=0.6526,top10E=0.23,eRank=109.5,q75/q25=inf train_time:239141ms step_avg:74.73ms +[2025-09-02 05:46:24] [Rank 0] PRINT: step:3200/10000 val_loss:4.1324 svd_entropy: attn_qk:H=0.7626,top10E=0.27,eRank=178.2,q75/q25=58.04 attn_vo:H=0.7928,top10E=0.15,eRank=266.8,q75/q25=inf mlp_w1:H=0.8800,top10E=0.17,eRank=351.0,q75/q25=5.53 mlp_w2:H=0.9701,top10E=0.04,eRank=629.6,q75/q25=2.95 vo_prod:H=0.6526,top10E=0.23,eRank=109.5,q75/q25=inf train_time:239141ms step_avg:74.73ms +[2025-09-02 05:46:25] [Rank 0] step:3201/10000 train_time:239156ms step_avg:74.71ms +[2025-09-02 05:46:25] [Rank 0] step:3201/10000 train_time:239156ms step_avg:74.71ms +[2025-09-02 05:46:26] [Rank 0] step:3221/10000 train_time:240628ms step_avg:74.71ms +[2025-09-02 05:46:26] [Rank 0] step:3221/10000 train_time:240628ms step_avg:74.71ms +[2025-09-02 05:46:28] [Rank 0] step:3241/10000 train_time:242161ms step_avg:74.72ms +[2025-09-02 05:46:28] [Rank 0] step:3241/10000 train_time:242161ms step_avg:74.72ms +[2025-09-02 05:46:29] [Rank 0] step:3261/10000 train_time:243695ms step_avg:74.73ms +[2025-09-02 05:46:29] [Rank 0] step:3261/10000 train_time:243695ms step_avg:74.73ms +[2025-09-02 05:46:31] [Rank 0] step:3281/10000 train_time:245232ms step_avg:74.74ms +[2025-09-02 05:46:31] [Rank 0] step:3281/10000 train_time:245232ms step_avg:74.74ms +[2025-09-02 05:46:32] [Rank 0] step:3301/10000 train_time:246768ms step_avg:74.76ms +[2025-09-02 05:46:32] [Rank 0] step:3301/10000 train_time:246768ms step_avg:74.76ms +[2025-09-02 05:46:34] [Rank 0] step:3321/10000 train_time:248304ms step_avg:74.77ms +[2025-09-02 05:46:34] [Rank 0] step:3321/10000 train_time:248304ms step_avg:74.77ms +[2025-09-02 05:46:35] [Rank 0] step:3341/10000 train_time:249841ms step_avg:74.78ms +[2025-09-02 05:46:35] [Rank 0] step:3341/10000 train_time:249841ms step_avg:74.78ms +[2025-09-02 05:46:37] [Rank 0] step:3361/10000 train_time:251378ms step_avg:74.79ms +[2025-09-02 05:46:37] [Rank 0] step:3361/10000 train_time:251378ms step_avg:74.79ms +[2025-09-02 05:46:38] [Rank 0] step:3381/10000 train_time:252914ms step_avg:74.80ms +[2025-09-02 05:46:38] [Rank 0] step:3381/10000 train_time:252914ms step_avg:74.80ms +[2025-09-02 05:46:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:46:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:46:52] [Rank 0] PRINT: step:3400/10000 val_loss:4.0963 svd_entropy: attn_qk:H=0.7650,top10E=0.27,eRank=180.3,q75/q25=57.90 attn_vo:H=0.7957,top10E=0.14,eRank=271.1,q75/q25=inf mlp_w1:H=0.8831,top10E=0.17,eRank=358.1,q75/q25=5.41 mlp_w2:H=0.9702,top10E=0.04,eRank=630.1,q75/q25=2.95 vo_prod:H=0.6567,top10E=0.23,eRank=113.0,q75/q25=inf train_time:254531ms step_avg:74.86ms +[2025-09-02 05:46:52] [Rank 0] PRINT: step:3400/10000 val_loss:4.0963 svd_entropy: attn_qk:H=0.7650,top10E=0.27,eRank=180.3,q75/q25=57.90 attn_vo:H=0.7957,top10E=0.14,eRank=271.1,q75/q25=inf mlp_w1:H=0.8831,top10E=0.17,eRank=358.1,q75/q25=5.41 mlp_w2:H=0.9702,top10E=0.04,eRank=630.1,q75/q25=2.95 vo_prod:H=0.6567,top10E=0.23,eRank=113.0,q75/q25=inf train_time:254531ms step_avg:74.86ms +[2025-09-02 05:46:52] [Rank 0] step:3401/10000 train_time:254546ms step_avg:74.84ms +[2025-09-02 05:46:52] [Rank 0] step:3401/10000 train_time:254546ms step_avg:74.84ms +[2025-09-02 05:46:53] [Rank 0] step:3421/10000 train_time:256021ms step_avg:74.84ms +[2025-09-02 05:46:53] [Rank 0] step:3421/10000 train_time:256021ms step_avg:74.84ms +[2025-09-02 05:46:55] [Rank 0] step:3441/10000 train_time:257558ms step_avg:74.85ms +[2025-09-02 05:46:55] [Rank 0] step:3441/10000 train_time:257558ms step_avg:74.85ms +[2025-09-02 05:46:57] [Rank 0] step:3461/10000 train_time:259096ms step_avg:74.86ms +[2025-09-02 05:46:57] [Rank 0] step:3461/10000 train_time:259096ms step_avg:74.86ms +[2025-09-02 05:46:58] [Rank 0] step:3481/10000 train_time:260633ms step_avg:74.87ms +[2025-09-02 05:46:58] [Rank 0] step:3481/10000 train_time:260633ms step_avg:74.87ms +[2025-09-02 05:47:00] [Rank 0] step:3501/10000 train_time:262171ms step_avg:74.88ms +[2025-09-02 05:47:00] [Rank 0] step:3501/10000 train_time:262171ms step_avg:74.88ms +[2025-09-02 05:47:01] [Rank 0] step:3521/10000 train_time:263709ms step_avg:74.90ms +[2025-09-02 05:47:01] [Rank 0] step:3521/10000 train_time:263709ms step_avg:74.90ms +[2025-09-02 05:47:03] [Rank 0] step:3541/10000 train_time:265246ms step_avg:74.91ms +[2025-09-02 05:47:03] [Rank 0] step:3541/10000 train_time:265246ms step_avg:74.91ms +[2025-09-02 05:47:04] [Rank 0] step:3561/10000 train_time:266782ms step_avg:74.92ms +[2025-09-02 05:47:04] [Rank 0] step:3561/10000 train_time:266782ms step_avg:74.92ms +[2025-09-02 05:47:06] [Rank 0] step:3581/10000 train_time:268319ms step_avg:74.93ms +[2025-09-02 05:47:06] [Rank 0] step:3581/10000 train_time:268319ms step_avg:74.93ms +[2025-09-02 05:47:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:47:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:47:19] [Rank 0] PRINT: step:3600/10000 val_loss:4.0829 svd_entropy: attn_qk:H=0.7672,top10E=0.26,eRank=182.3,q75/q25=57.64 attn_vo:H=0.7984,top10E=0.14,eRank=274.9,q75/q25=inf mlp_w1:H=0.8858,top10E=0.16,eRank=364.1,q75/q25=5.30 mlp_w2:H=0.9703,top10E=0.04,eRank=630.5,q75/q25=2.94 vo_prod:H=0.6604,top10E=0.22,eRank=116.1,q75/q25=inf train_time:269936ms step_avg:74.98ms +[2025-09-02 05:47:19] [Rank 0] PRINT: step:3600/10000 val_loss:4.0829 svd_entropy: attn_qk:H=0.7672,top10E=0.26,eRank=182.3,q75/q25=57.64 attn_vo:H=0.7984,top10E=0.14,eRank=274.9,q75/q25=inf mlp_w1:H=0.8858,top10E=0.16,eRank=364.1,q75/q25=5.30 mlp_w2:H=0.9703,top10E=0.04,eRank=630.5,q75/q25=2.94 vo_prod:H=0.6604,top10E=0.22,eRank=116.1,q75/q25=inf train_time:269936ms step_avg:74.98ms +[2025-09-02 05:47:19] [Rank 0] step:3601/10000 train_time:269952ms step_avg:74.97ms +[2025-09-02 05:47:19] [Rank 0] step:3601/10000 train_time:269952ms step_avg:74.97ms +[2025-09-02 05:47:21] [Rank 0] step:3621/10000 train_time:271429ms step_avg:74.96ms +[2025-09-02 05:47:21] [Rank 0] step:3621/10000 train_time:271429ms step_avg:74.96ms +[2025-09-02 05:47:22] [Rank 0] step:3641/10000 train_time:272965ms step_avg:74.97ms +[2025-09-02 05:47:22] [Rank 0] step:3641/10000 train_time:272965ms step_avg:74.97ms +[2025-09-02 05:47:24] [Rank 0] step:3661/10000 train_time:274502ms step_avg:74.98ms +[2025-09-02 05:47:24] [Rank 0] step:3661/10000 train_time:274502ms step_avg:74.98ms +[2025-09-02 05:47:25] [Rank 0] step:3681/10000 train_time:276039ms step_avg:74.99ms +[2025-09-02 05:47:25] [Rank 0] step:3681/10000 train_time:276039ms step_avg:74.99ms +[2025-09-02 05:47:27] [Rank 0] step:3701/10000 train_time:277578ms step_avg:75.00ms +[2025-09-02 05:47:27] [Rank 0] step:3701/10000 train_time:277578ms step_avg:75.00ms +[2025-09-02 05:47:29] [Rank 0] step:3721/10000 train_time:279145ms step_avg:75.02ms +[2025-09-02 05:47:29] [Rank 0] step:3721/10000 train_time:279145ms step_avg:75.02ms +[2025-09-02 05:47:30] [Rank 0] step:3741/10000 train_time:280720ms step_avg:75.04ms +[2025-09-02 05:47:30] [Rank 0] step:3741/10000 train_time:280720ms step_avg:75.04ms +[2025-09-02 05:47:32] [Rank 0] step:3761/10000 train_time:282294ms step_avg:75.06ms +[2025-09-02 05:47:32] [Rank 0] step:3761/10000 train_time:282294ms step_avg:75.06ms +[2025-09-02 05:47:33] [Rank 0] step:3781/10000 train_time:283867ms step_avg:75.08ms +[2025-09-02 05:47:33] [Rank 0] step:3781/10000 train_time:283867ms step_avg:75.08ms +[2025-09-02 05:47:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:47:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:47:47] [Rank 0] PRINT: step:3800/10000 val_loss:4.0391 svd_entropy: attn_qk:H=0.7689,top10E=0.26,eRank=183.8,q75/q25=57.42 attn_vo:H=0.8007,top10E=0.14,eRank=278.4,q75/q25=inf mlp_w1:H=0.8882,top10E=0.16,eRank=369.7,q75/q25=5.21 mlp_w2:H=0.9704,top10E=0.04,eRank=630.9,q75/q25=2.94 vo_prod:H=0.6637,top10E=0.22,eRank=119.1,q75/q25=inf train_time:285524ms step_avg:75.14ms +[2025-09-02 05:47:47] [Rank 0] PRINT: step:3800/10000 val_loss:4.0391 svd_entropy: attn_qk:H=0.7689,top10E=0.26,eRank=183.8,q75/q25=57.42 attn_vo:H=0.8007,top10E=0.14,eRank=278.4,q75/q25=inf mlp_w1:H=0.8882,top10E=0.16,eRank=369.7,q75/q25=5.21 mlp_w2:H=0.9704,top10E=0.04,eRank=630.9,q75/q25=2.94 vo_prod:H=0.6637,top10E=0.22,eRank=119.1,q75/q25=inf train_time:285524ms step_avg:75.14ms +[2025-09-02 05:47:47] [Rank 0] step:3801/10000 train_time:285539ms step_avg:75.12ms +[2025-09-02 05:47:47] [Rank 0] step:3801/10000 train_time:285539ms step_avg:75.12ms +[2025-09-02 05:47:48] [Rank 0] step:3821/10000 train_time:287035ms step_avg:75.12ms +[2025-09-02 05:47:48] [Rank 0] step:3821/10000 train_time:287035ms step_avg:75.12ms +[2025-09-02 05:47:50] [Rank 0] step:3841/10000 train_time:288610ms step_avg:75.14ms +[2025-09-02 05:47:50] [Rank 0] step:3841/10000 train_time:288610ms step_avg:75.14ms +[2025-09-02 05:47:52] [Rank 0] step:3861/10000 train_time:290184ms step_avg:75.16ms +[2025-09-02 05:47:52] [Rank 0] step:3861/10000 train_time:290184ms step_avg:75.16ms +[2025-09-02 05:47:53] [Rank 0] step:3881/10000 train_time:291755ms step_avg:75.18ms +[2025-09-02 05:47:53] [Rank 0] step:3881/10000 train_time:291755ms step_avg:75.18ms +[2025-09-02 05:47:55] [Rank 0] step:3901/10000 train_time:293329ms step_avg:75.19ms +[2025-09-02 05:47:55] [Rank 0] step:3901/10000 train_time:293329ms step_avg:75.19ms +[2025-09-02 05:47:56] [Rank 0] step:3921/10000 train_time:294902ms step_avg:75.21ms +[2025-09-02 05:47:56] [Rank 0] step:3921/10000 train_time:294902ms step_avg:75.21ms +[2025-09-02 05:47:58] [Rank 0] step:3941/10000 train_time:296478ms step_avg:75.23ms +[2025-09-02 05:47:58] [Rank 0] step:3941/10000 train_time:296478ms step_avg:75.23ms +[2025-09-02 05:47:59] [Rank 0] step:3961/10000 train_time:298050ms step_avg:75.25ms +[2025-09-02 05:47:59] [Rank 0] step:3961/10000 train_time:298050ms step_avg:75.25ms +[2025-09-02 05:48:01] [Rank 0] step:3981/10000 train_time:299623ms step_avg:75.26ms +[2025-09-02 05:48:01] [Rank 0] step:3981/10000 train_time:299623ms step_avg:75.26ms +[2025-09-02 05:48:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:48:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:48:14] [Rank 0] PRINT: step:4000/10000 val_loss:4.0132 svd_entropy: attn_qk:H=0.7707,top10E=0.26,eRank=185.4,q75/q25=56.63 attn_vo:H=0.8028,top10E=0.13,eRank=281.6,q75/q25=inf mlp_w1:H=0.8905,top10E=0.16,eRank=375.2,q75/q25=5.12 mlp_w2:H=0.9704,top10E=0.04,eRank=631.1,q75/q25=2.93 vo_prod:H=0.6667,top10E=0.22,eRank=121.9,q75/q25=inf train_time:301274ms step_avg:75.32ms +[2025-09-02 05:48:14] [Rank 0] PRINT: step:4000/10000 val_loss:4.0132 svd_entropy: attn_qk:H=0.7707,top10E=0.26,eRank=185.4,q75/q25=56.63 attn_vo:H=0.8028,top10E=0.13,eRank=281.6,q75/q25=inf mlp_w1:H=0.8905,top10E=0.16,eRank=375.2,q75/q25=5.12 mlp_w2:H=0.9704,top10E=0.04,eRank=631.1,q75/q25=2.93 vo_prod:H=0.6667,top10E=0.22,eRank=121.9,q75/q25=inf train_time:301274ms step_avg:75.32ms +[2025-09-02 05:48:15] [Rank 0] step:4001/10000 train_time:301290ms step_avg:75.30ms +[2025-09-02 05:48:15] [Rank 0] step:4001/10000 train_time:301290ms step_avg:75.30ms +[2025-09-02 05:48:16] [Rank 0] step:4021/10000 train_time:302800ms step_avg:75.30ms +[2025-09-02 05:48:16] [Rank 0] step:4021/10000 train_time:302800ms step_avg:75.30ms +[2025-09-02 05:48:18] [Rank 0] step:4041/10000 train_time:304371ms step_avg:75.32ms +[2025-09-02 05:48:18] [Rank 0] step:4041/10000 train_time:304371ms step_avg:75.32ms +[2025-09-02 05:48:19] [Rank 0] step:4061/10000 train_time:305941ms step_avg:75.34ms +[2025-09-02 05:48:19] [Rank 0] step:4061/10000 train_time:305941ms step_avg:75.34ms +[2025-09-02 05:48:21] [Rank 0] step:4081/10000 train_time:307687ms step_avg:75.39ms +[2025-09-02 05:48:21] [Rank 0] step:4081/10000 train_time:307687ms step_avg:75.39ms +[2025-09-02 05:48:23] [Rank 0] step:4101/10000 train_time:309257ms step_avg:75.41ms +[2025-09-02 05:48:23] [Rank 0] step:4101/10000 train_time:309257ms step_avg:75.41ms +[2025-09-02 05:48:24] [Rank 0] step:4121/10000 train_time:310829ms step_avg:75.43ms +[2025-09-02 05:48:24] [Rank 0] step:4121/10000 train_time:310829ms step_avg:75.43ms +[2025-09-02 05:48:26] [Rank 0] step:4141/10000 train_time:312402ms step_avg:75.44ms +[2025-09-02 05:48:26] [Rank 0] step:4141/10000 train_time:312402ms step_avg:75.44ms +[2025-09-02 05:48:27] [Rank 0] step:4161/10000 train_time:313974ms step_avg:75.46ms +[2025-09-02 05:48:27] [Rank 0] step:4161/10000 train_time:313974ms step_avg:75.46ms +[2025-09-02 05:48:29] [Rank 0] step:4181/10000 train_time:315548ms step_avg:75.47ms +[2025-09-02 05:48:29] [Rank 0] step:4181/10000 train_time:315548ms step_avg:75.47ms +[2025-09-02 05:48:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:48:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:48:42] [Rank 0] PRINT: step:4200/10000 val_loss:3.9954 svd_entropy: attn_qk:H=0.7724,top10E=0.26,eRank=187.1,q75/q25=56.47 attn_vo:H=0.8047,top10E=0.13,eRank=284.5,q75/q25=inf mlp_w1:H=0.8926,top10E=0.16,eRank=380.2,q75/q25=5.04 mlp_w2:H=0.9705,top10E=0.04,eRank=631.3,q75/q25=2.92 vo_prod:H=0.6695,top10E=0.21,eRank=124.5,q75/q25=inf train_time:317201ms step_avg:75.52ms +[2025-09-02 05:48:42] [Rank 0] PRINT: step:4200/10000 val_loss:3.9954 svd_entropy: attn_qk:H=0.7724,top10E=0.26,eRank=187.1,q75/q25=56.47 attn_vo:H=0.8047,top10E=0.13,eRank=284.5,q75/q25=inf mlp_w1:H=0.8926,top10E=0.16,eRank=380.2,q75/q25=5.04 mlp_w2:H=0.9705,top10E=0.04,eRank=631.3,q75/q25=2.92 vo_prod:H=0.6695,top10E=0.21,eRank=124.5,q75/q25=inf train_time:317201ms step_avg:75.52ms +[2025-09-02 05:48:43] [Rank 0] step:4201/10000 train_time:317217ms step_avg:75.51ms +[2025-09-02 05:48:43] [Rank 0] step:4201/10000 train_time:317217ms step_avg:75.51ms +[2025-09-02 05:48:44] [Rank 0] step:4221/10000 train_time:318717ms step_avg:75.51ms +[2025-09-02 05:48:44] [Rank 0] step:4221/10000 train_time:318717ms step_avg:75.51ms +[2025-09-02 05:48:46] [Rank 0] step:4241/10000 train_time:320290ms step_avg:75.52ms +[2025-09-02 05:48:46] [Rank 0] step:4241/10000 train_time:320290ms step_avg:75.52ms +[2025-09-02 05:48:47] [Rank 0] step:4261/10000 train_time:321862ms step_avg:75.54ms +[2025-09-02 05:48:47] [Rank 0] step:4261/10000 train_time:321862ms step_avg:75.54ms +[2025-09-02 05:48:49] [Rank 0] step:4281/10000 train_time:323433ms step_avg:75.55ms +[2025-09-02 05:48:49] [Rank 0] step:4281/10000 train_time:323433ms step_avg:75.55ms +[2025-09-02 05:48:50] [Rank 0] step:4301/10000 train_time:325006ms step_avg:75.57ms +[2025-09-02 05:48:50] [Rank 0] step:4301/10000 train_time:325006ms step_avg:75.57ms +[2025-09-02 05:48:52] [Rank 0] step:4321/10000 train_time:326579ms step_avg:75.58ms +[2025-09-02 05:48:52] [Rank 0] step:4321/10000 train_time:326579ms step_avg:75.58ms +[2025-09-02 05:48:54] [Rank 0] step:4341/10000 train_time:328150ms step_avg:75.59ms +[2025-09-02 05:48:54] [Rank 0] step:4341/10000 train_time:328150ms step_avg:75.59ms +[2025-09-02 05:48:55] [Rank 0] step:4361/10000 train_time:329726ms step_avg:75.61ms +[2025-09-02 05:48:55] [Rank 0] step:4361/10000 train_time:329726ms step_avg:75.61ms +[2025-09-02 05:48:57] [Rank 0] step:4381/10000 train_time:331366ms step_avg:75.64ms +[2025-09-02 05:48:57] [Rank 0] step:4381/10000 train_time:331366ms step_avg:75.64ms +[2025-09-02 05:48:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:48:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:49:10] [Rank 0] PRINT: step:4400/10000 val_loss:3.9715 svd_entropy: attn_qk:H=0.7741,top10E=0.26,eRank=188.7,q75/q25=55.80 attn_vo:H=0.8064,top10E=0.13,eRank=287.2,q75/q25=inf mlp_w1:H=0.8946,top10E=0.15,eRank=385.1,q75/q25=4.98 mlp_w2:H=0.9705,top10E=0.04,eRank=631.5,q75/q25=2.92 vo_prod:H=0.6719,top10E=0.21,eRank=126.8,q75/q25=inf train_time:333018ms step_avg:75.69ms +[2025-09-02 05:49:10] [Rank 0] PRINT: step:4400/10000 val_loss:3.9715 svd_entropy: attn_qk:H=0.7741,top10E=0.26,eRank=188.7,q75/q25=55.80 attn_vo:H=0.8064,top10E=0.13,eRank=287.2,q75/q25=inf mlp_w1:H=0.8946,top10E=0.15,eRank=385.1,q75/q25=4.98 mlp_w2:H=0.9705,top10E=0.04,eRank=631.5,q75/q25=2.92 vo_prod:H=0.6719,top10E=0.21,eRank=126.8,q75/q25=inf train_time:333018ms step_avg:75.69ms +[2025-09-02 05:49:10] [Rank 0] step:4401/10000 train_time:333032ms step_avg:75.67ms +[2025-09-02 05:49:10] [Rank 0] step:4401/10000 train_time:333032ms step_avg:75.67ms +[2025-09-02 05:49:12] [Rank 0] step:4421/10000 train_time:334540ms step_avg:75.67ms +[2025-09-02 05:49:12] [Rank 0] step:4421/10000 train_time:334540ms step_avg:75.67ms +[2025-09-02 05:49:13] [Rank 0] step:4441/10000 train_time:336109ms step_avg:75.68ms +[2025-09-02 05:49:13] [Rank 0] step:4441/10000 train_time:336109ms step_avg:75.68ms +[2025-09-02 05:49:15] [Rank 0] step:4461/10000 train_time:337686ms step_avg:75.70ms +[2025-09-02 05:49:15] [Rank 0] step:4461/10000 train_time:337686ms step_avg:75.70ms +[2025-09-02 05:49:17] [Rank 0] step:4481/10000 train_time:339366ms step_avg:75.73ms +[2025-09-02 05:49:17] [Rank 0] step:4481/10000 train_time:339366ms step_avg:75.73ms +[2025-09-02 05:49:18] [Rank 0] step:4501/10000 train_time:340943ms step_avg:75.75ms +[2025-09-02 05:49:18] [Rank 0] step:4501/10000 train_time:340943ms step_avg:75.75ms +[2025-09-02 05:49:20] [Rank 0] step:4521/10000 train_time:342520ms step_avg:75.76ms +[2025-09-02 05:49:20] [Rank 0] step:4521/10000 train_time:342520ms step_avg:75.76ms +[2025-09-02 05:49:21] [Rank 0] step:4541/10000 train_time:344101ms step_avg:75.78ms +[2025-09-02 05:49:21] [Rank 0] step:4541/10000 train_time:344101ms step_avg:75.78ms +[2025-09-02 05:49:23] [Rank 0] step:4561/10000 train_time:345679ms step_avg:75.79ms +[2025-09-02 05:49:23] [Rank 0] step:4561/10000 train_time:345679ms step_avg:75.79ms +[2025-09-02 05:49:24] [Rank 0] step:4581/10000 train_time:347260ms step_avg:75.80ms +[2025-09-02 05:49:24] [Rank 0] step:4581/10000 train_time:347260ms step_avg:75.80ms +[2025-09-02 05:49:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:49:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:49:38] [Rank 0] PRINT: step:4600/10000 val_loss:3.9467 svd_entropy: attn_qk:H=0.7756,top10E=0.25,eRank=190.2,q75/q25=55.65 attn_vo:H=0.8081,top10E=0.13,eRank=289.9,q75/q25=inf mlp_w1:H=0.8964,top10E=0.15,eRank=389.8,q75/q25=4.90 mlp_w2:H=0.9706,top10E=0.04,eRank=631.7,q75/q25=2.91 vo_prod:H=0.6745,top10E=0.21,eRank=129.3,q75/q25=inf train_time:348921ms step_avg:75.85ms +[2025-09-02 05:49:38] [Rank 0] PRINT: step:4600/10000 val_loss:3.9467 svd_entropy: attn_qk:H=0.7756,top10E=0.25,eRank=190.2,q75/q25=55.65 attn_vo:H=0.8081,top10E=0.13,eRank=289.9,q75/q25=inf mlp_w1:H=0.8964,top10E=0.15,eRank=389.8,q75/q25=4.90 mlp_w2:H=0.9706,top10E=0.04,eRank=631.7,q75/q25=2.91 vo_prod:H=0.6745,top10E=0.21,eRank=129.3,q75/q25=inf train_time:348921ms step_avg:75.85ms +[2025-09-02 05:49:38] [Rank 0] step:4601/10000 train_time:348936ms step_avg:75.84ms +[2025-09-02 05:49:38] [Rank 0] step:4601/10000 train_time:348936ms step_avg:75.84ms +[2025-09-02 05:49:40] [Rank 0] step:4621/10000 train_time:350443ms step_avg:75.84ms +[2025-09-02 05:49:40] [Rank 0] step:4621/10000 train_time:350443ms step_avg:75.84ms +[2025-09-02 05:49:41] [Rank 0] step:4641/10000 train_time:352023ms step_avg:75.85ms +[2025-09-02 05:49:41] [Rank 0] step:4641/10000 train_time:352023ms step_avg:75.85ms +[2025-09-02 05:49:43] [Rank 0] step:4661/10000 train_time:353606ms step_avg:75.86ms +[2025-09-02 05:49:43] [Rank 0] step:4661/10000 train_time:353606ms step_avg:75.86ms +[2025-09-02 05:49:44] [Rank 0] step:4681/10000 train_time:355186ms step_avg:75.88ms +[2025-09-02 05:49:44] [Rank 0] step:4681/10000 train_time:355186ms step_avg:75.88ms +[2025-09-02 05:49:46] [Rank 0] step:4701/10000 train_time:356767ms step_avg:75.89ms +[2025-09-02 05:49:46] [Rank 0] step:4701/10000 train_time:356767ms step_avg:75.89ms +[2025-09-02 05:49:47] [Rank 0] step:4721/10000 train_time:358347ms step_avg:75.90ms +[2025-09-02 05:49:47] [Rank 0] step:4721/10000 train_time:358347ms step_avg:75.90ms +[2025-09-02 05:49:49] [Rank 0] step:4741/10000 train_time:359929ms step_avg:75.92ms +[2025-09-02 05:49:49] [Rank 0] step:4741/10000 train_time:359929ms step_avg:75.92ms +[2025-09-02 05:49:51] [Rank 0] step:4761/10000 train_time:361512ms step_avg:75.93ms +[2025-09-02 05:49:51] [Rank 0] step:4761/10000 train_time:361512ms step_avg:75.93ms +[2025-09-02 05:49:52] [Rank 0] step:4781/10000 train_time:363193ms step_avg:75.97ms +[2025-09-02 05:49:52] [Rank 0] step:4781/10000 train_time:363193ms step_avg:75.97ms +[2025-09-02 05:49:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:49:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:50:06] [Rank 0] PRINT: step:4800/10000 val_loss:3.9352 svd_entropy: attn_qk:H=0.7771,top10E=0.25,eRank=191.7,q75/q25=54.88 attn_vo:H=0.8097,top10E=0.13,eRank=292.3,q75/q25=inf mlp_w1:H=0.8982,top10E=0.15,eRank=394.1,q75/q25=4.85 mlp_w2:H=0.9706,top10E=0.04,eRank=631.8,q75/q25=2.91 vo_prod:H=0.6767,top10E=0.20,eRank=131.4,q75/q25=inf train_time:364856ms step_avg:76.01ms +[2025-09-02 05:50:06] [Rank 0] PRINT: step:4800/10000 val_loss:3.9352 svd_entropy: attn_qk:H=0.7771,top10E=0.25,eRank=191.7,q75/q25=54.88 attn_vo:H=0.8097,top10E=0.13,eRank=292.3,q75/q25=inf mlp_w1:H=0.8982,top10E=0.15,eRank=394.1,q75/q25=4.85 mlp_w2:H=0.9706,top10E=0.04,eRank=631.8,q75/q25=2.91 vo_prod:H=0.6767,top10E=0.20,eRank=131.4,q75/q25=inf train_time:364856ms step_avg:76.01ms +[2025-09-02 05:50:06] [Rank 0] step:4801/10000 train_time:364871ms step_avg:76.00ms +[2025-09-02 05:50:06] [Rank 0] step:4801/10000 train_time:364871ms step_avg:76.00ms +[2025-09-02 05:50:07] [Rank 0] step:4821/10000 train_time:366377ms step_avg:76.00ms +[2025-09-02 05:50:07] [Rank 0] step:4821/10000 train_time:366377ms step_avg:76.00ms +[2025-09-02 05:50:09] [Rank 0] step:4841/10000 train_time:368056ms step_avg:76.03ms +[2025-09-02 05:50:09] [Rank 0] step:4841/10000 train_time:368056ms step_avg:76.03ms +[2025-09-02 05:50:11] [Rank 0] step:4861/10000 train_time:369646ms step_avg:76.04ms +[2025-09-02 05:50:11] [Rank 0] step:4861/10000 train_time:369646ms step_avg:76.04ms +[2025-09-02 05:50:12] [Rank 0] step:4881/10000 train_time:371324ms step_avg:76.08ms +[2025-09-02 05:50:12] [Rank 0] step:4881/10000 train_time:371324ms step_avg:76.08ms +[2025-09-02 05:50:14] [Rank 0] step:4901/10000 train_time:372901ms step_avg:76.09ms +[2025-09-02 05:50:14] [Rank 0] step:4901/10000 train_time:372901ms step_avg:76.09ms +[2025-09-02 05:50:16] [Rank 0] step:4921/10000 train_time:374594ms step_avg:76.12ms +[2025-09-02 05:50:16] [Rank 0] step:4921/10000 train_time:374594ms step_avg:76.12ms +[2025-09-02 05:50:17] [Rank 0] step:4941/10000 train_time:376207ms step_avg:76.14ms +[2025-09-02 05:50:17] [Rank 0] step:4941/10000 train_time:376207ms step_avg:76.14ms +[2025-09-02 05:50:19] [Rank 0] step:4961/10000 train_time:377787ms step_avg:76.15ms +[2025-09-02 05:50:19] [Rank 0] step:4961/10000 train_time:377787ms step_avg:76.15ms +[2025-09-02 05:50:20] [Rank 0] step:4981/10000 train_time:379368ms step_avg:76.16ms +[2025-09-02 05:50:20] [Rank 0] step:4981/10000 train_time:379368ms step_avg:76.16ms +[2025-09-02 05:50:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:50:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:50:34] [Rank 0] PRINT: step:5000/10000 val_loss:3.9143 svd_entropy: attn_qk:H=0.7785,top10E=0.25,eRank=193.1,q75/q25=54.62 attn_vo:H=0.8110,top10E=0.13,eRank=294.5,q75/q25=inf mlp_w1:H=0.8997,top10E=0.15,eRank=398.1,q75/q25=4.79 mlp_w2:H=0.9706,top10E=0.04,eRank=631.9,q75/q25=2.90 vo_prod:H=0.6787,top10E=0.20,eRank=133.4,q75/q25=inf train_time:381028ms step_avg:76.21ms +[2025-09-02 05:50:34] [Rank 0] PRINT: step:5000/10000 val_loss:3.9143 svd_entropy: attn_qk:H=0.7785,top10E=0.25,eRank=193.1,q75/q25=54.62 attn_vo:H=0.8110,top10E=0.13,eRank=294.5,q75/q25=inf mlp_w1:H=0.8997,top10E=0.15,eRank=398.1,q75/q25=4.79 mlp_w2:H=0.9706,top10E=0.04,eRank=631.9,q75/q25=2.90 vo_prod:H=0.6787,top10E=0.20,eRank=133.4,q75/q25=inf train_time:381028ms step_avg:76.21ms +[2025-09-02 05:50:34] [Rank 0] step:5001/10000 train_time:381043ms step_avg:76.19ms +[2025-09-02 05:50:34] [Rank 0] step:5001/10000 train_time:381043ms step_avg:76.19ms +[2025-09-02 05:50:35] [Rank 0] step:5021/10000 train_time:382540ms step_avg:76.19ms +[2025-09-02 05:50:35] [Rank 0] step:5021/10000 train_time:382540ms step_avg:76.19ms +[2025-09-02 05:50:37] [Rank 0] step:5041/10000 train_time:384219ms step_avg:76.22ms +[2025-09-02 05:50:37] [Rank 0] step:5041/10000 train_time:384219ms step_avg:76.22ms +[2025-09-02 05:50:38] [Rank 0] step:5061/10000 train_time:385794ms step_avg:76.23ms +[2025-09-02 05:50:38] [Rank 0] step:5061/10000 train_time:385794ms step_avg:76.23ms +[2025-09-02 05:50:40] [Rank 0] step:5081/10000 train_time:387372ms step_avg:76.24ms +[2025-09-02 05:50:40] [Rank 0] step:5081/10000 train_time:387372ms step_avg:76.24ms +[2025-09-02 05:50:42] [Rank 0] step:5101/10000 train_time:388950ms step_avg:76.25ms +[2025-09-02 05:50:42] [Rank 0] step:5101/10000 train_time:388950ms step_avg:76.25ms +[2025-09-02 05:50:43] [Rank 0] step:5121/10000 train_time:390529ms step_avg:76.26ms +[2025-09-02 05:50:43] [Rank 0] step:5121/10000 train_time:390529ms step_avg:76.26ms +[2025-09-02 05:50:45] [Rank 0] step:5141/10000 train_time:392112ms step_avg:76.27ms +[2025-09-02 05:50:45] [Rank 0] step:5141/10000 train_time:392112ms step_avg:76.27ms +[2025-09-02 05:50:46] [Rank 0] step:5161/10000 train_time:393689ms step_avg:76.28ms +[2025-09-02 05:50:46] [Rank 0] step:5161/10000 train_time:393689ms step_avg:76.28ms +[2025-09-02 05:50:48] [Rank 0] step:5181/10000 train_time:395270ms step_avg:76.29ms +[2025-09-02 05:50:48] [Rank 0] step:5181/10000 train_time:395270ms step_avg:76.29ms +[2025-09-02 05:50:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:50:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:51:01] [Rank 0] PRINT: step:5200/10000 val_loss:3.8956 svd_entropy: attn_qk:H=0.7798,top10E=0.25,eRank=194.4,q75/q25=54.02 attn_vo:H=0.8123,top10E=0.12,eRank=296.5,q75/q25=inf mlp_w1:H=0.9012,top10E=0.15,eRank=401.9,q75/q25=4.74 mlp_w2:H=0.9707,top10E=0.04,eRank=632.0,q75/q25=2.90 vo_prod:H=0.6806,top10E=0.20,eRank=135.4,q75/q25=inf train_time:396956ms step_avg:76.34ms +[2025-09-02 05:51:01] [Rank 0] PRINT: step:5200/10000 val_loss:3.8956 svd_entropy: attn_qk:H=0.7798,top10E=0.25,eRank=194.4,q75/q25=54.02 attn_vo:H=0.8123,top10E=0.12,eRank=296.5,q75/q25=inf mlp_w1:H=0.9012,top10E=0.15,eRank=401.9,q75/q25=4.74 mlp_w2:H=0.9707,top10E=0.04,eRank=632.0,q75/q25=2.90 vo_prod:H=0.6806,top10E=0.20,eRank=135.4,q75/q25=inf train_time:396956ms step_avg:76.34ms +[2025-09-02 05:51:01] [Rank 0] step:5201/10000 train_time:396971ms step_avg:76.33ms +[2025-09-02 05:51:01] [Rank 0] step:5201/10000 train_time:396971ms step_avg:76.33ms +[2025-09-02 05:51:03] [Rank 0] step:5221/10000 train_time:398552ms step_avg:76.34ms +[2025-09-02 05:51:03] [Rank 0] step:5221/10000 train_time:398552ms step_avg:76.34ms +[2025-09-02 05:51:05] [Rank 0] step:5241/10000 train_time:400162ms step_avg:76.35ms +[2025-09-02 05:51:05] [Rank 0] step:5241/10000 train_time:400162ms step_avg:76.35ms +[2025-09-02 05:51:06] [Rank 0] step:5261/10000 train_time:401771ms step_avg:76.37ms +[2025-09-02 05:51:06] [Rank 0] step:5261/10000 train_time:401771ms step_avg:76.37ms +[2025-09-02 05:51:08] [Rank 0] step:5281/10000 train_time:403383ms step_avg:76.38ms +[2025-09-02 05:51:08] [Rank 0] step:5281/10000 train_time:403383ms step_avg:76.38ms +[2025-09-02 05:51:09] [Rank 0] step:5301/10000 train_time:404997ms step_avg:76.40ms +[2025-09-02 05:51:09] [Rank 0] step:5301/10000 train_time:404997ms step_avg:76.40ms +[2025-09-02 05:51:11] [Rank 0] step:5321/10000 train_time:406606ms step_avg:76.42ms +[2025-09-02 05:51:11] [Rank 0] step:5321/10000 train_time:406606ms step_avg:76.42ms +[2025-09-02 05:51:13] [Rank 0] step:5341/10000 train_time:408215ms step_avg:76.43ms +[2025-09-02 05:51:13] [Rank 0] step:5341/10000 train_time:408215ms step_avg:76.43ms +[2025-09-02 05:51:14] [Rank 0] step:5361/10000 train_time:409829ms step_avg:76.45ms +[2025-09-02 05:51:14] [Rank 0] step:5361/10000 train_time:409829ms step_avg:76.45ms +[2025-09-02 05:51:16] [Rank 0] step:5381/10000 train_time:411439ms step_avg:76.46ms +[2025-09-02 05:51:16] [Rank 0] step:5381/10000 train_time:411439ms step_avg:76.46ms +[2025-09-02 05:51:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:51:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:51:29] [Rank 0] PRINT: step:5400/10000 val_loss:3.8783 svd_entropy: attn_qk:H=0.7810,top10E=0.25,eRank=195.6,q75/q25=53.37 attn_vo:H=0.8134,top10E=0.12,eRank=298.4,q75/q25=inf mlp_w1:H=0.9026,top10E=0.14,eRank=405.6,q75/q25=4.69 mlp_w2:H=0.9707,top10E=0.04,eRank=632.1,q75/q25=2.90 vo_prod:H=0.6824,top10E=0.20,eRank=137.2,q75/q25=inf train_time:413234ms step_avg:76.52ms +[2025-09-02 05:51:29] [Rank 0] PRINT: step:5400/10000 val_loss:3.8783 svd_entropy: attn_qk:H=0.7810,top10E=0.25,eRank=195.6,q75/q25=53.37 attn_vo:H=0.8134,top10E=0.12,eRank=298.4,q75/q25=inf mlp_w1:H=0.9026,top10E=0.14,eRank=405.6,q75/q25=4.69 mlp_w2:H=0.9707,top10E=0.04,eRank=632.1,q75/q25=2.90 vo_prod:H=0.6824,top10E=0.20,eRank=137.2,q75/q25=inf train_time:413234ms step_avg:76.52ms +[2025-09-02 05:51:29] [Rank 0] step:5401/10000 train_time:413249ms step_avg:76.51ms +[2025-09-02 05:51:29] [Rank 0] step:5401/10000 train_time:413249ms step_avg:76.51ms +[2025-09-02 05:51:31] [Rank 0] step:5421/10000 train_time:414775ms step_avg:76.51ms +[2025-09-02 05:51:31] [Rank 0] step:5421/10000 train_time:414775ms step_avg:76.51ms +[2025-09-02 05:51:33] [Rank 0] step:5441/10000 train_time:416380ms step_avg:76.53ms +[2025-09-02 05:51:33] [Rank 0] step:5441/10000 train_time:416380ms step_avg:76.53ms +[2025-09-02 05:51:34] [Rank 0] step:5461/10000 train_time:417994ms step_avg:76.54ms +[2025-09-02 05:51:34] [Rank 0] step:5461/10000 train_time:417994ms step_avg:76.54ms +[2025-09-02 05:51:36] [Rank 0] step:5481/10000 train_time:419708ms step_avg:76.58ms +[2025-09-02 05:51:36] [Rank 0] step:5481/10000 train_time:419708ms step_avg:76.58ms +[2025-09-02 05:51:38] [Rank 0] step:5501/10000 train_time:421423ms step_avg:76.61ms +[2025-09-02 05:51:38] [Rank 0] step:5501/10000 train_time:421423ms step_avg:76.61ms +[2025-09-02 05:51:39] [Rank 0] step:5521/10000 train_time:423038ms step_avg:76.62ms +[2025-09-02 05:51:39] [Rank 0] step:5521/10000 train_time:423038ms step_avg:76.62ms +[2025-09-02 05:51:41] [Rank 0] step:5541/10000 train_time:424750ms step_avg:76.66ms +[2025-09-02 05:51:41] [Rank 0] step:5541/10000 train_time:424750ms step_avg:76.66ms +[2025-09-02 05:51:43] [Rank 0] step:5561/10000 train_time:426365ms step_avg:76.67ms +[2025-09-02 05:51:43] [Rank 0] step:5561/10000 train_time:426365ms step_avg:76.67ms +[2025-09-02 05:51:44] [Rank 0] step:5581/10000 train_time:427980ms step_avg:76.69ms +[2025-09-02 05:51:44] [Rank 0] step:5581/10000 train_time:427980ms step_avg:76.69ms +[2025-09-02 05:51:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:51:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:51:57] [Rank 0] PRINT: step:5600/10000 val_loss:3.8645 svd_entropy: attn_qk:H=0.7822,top10E=0.25,eRank=196.8,q75/q25=53.19 attn_vo:H=0.8145,top10E=0.12,eRank=300.1,q75/q25=inf mlp_w1:H=0.9039,top10E=0.14,eRank=408.9,q75/q25=4.65 mlp_w2:H=0.9707,top10E=0.04,eRank=632.2,q75/q25=2.90 vo_prod:H=0.6838,top10E=0.20,eRank=138.6,q75/q25=inf train_time:429674ms step_avg:76.73ms +[2025-09-02 05:51:57] [Rank 0] PRINT: step:5600/10000 val_loss:3.8645 svd_entropy: attn_qk:H=0.7822,top10E=0.25,eRank=196.8,q75/q25=53.19 attn_vo:H=0.8145,top10E=0.12,eRank=300.1,q75/q25=inf mlp_w1:H=0.9039,top10E=0.14,eRank=408.9,q75/q25=4.65 mlp_w2:H=0.9707,top10E=0.04,eRank=632.2,q75/q25=2.90 vo_prod:H=0.6838,top10E=0.20,eRank=138.6,q75/q25=inf train_time:429674ms step_avg:76.73ms +[2025-09-02 05:51:58] [Rank 0] step:5601/10000 train_time:429689ms step_avg:76.72ms +[2025-09-02 05:51:58] [Rank 0] step:5601/10000 train_time:429689ms step_avg:76.72ms +[2025-09-02 05:51:59] [Rank 0] step:5621/10000 train_time:431224ms step_avg:76.72ms +[2025-09-02 05:51:59] [Rank 0] step:5621/10000 train_time:431224ms step_avg:76.72ms +[2025-09-02 05:52:01] [Rank 0] step:5641/10000 train_time:432837ms step_avg:76.73ms +[2025-09-02 05:52:01] [Rank 0] step:5641/10000 train_time:432837ms step_avg:76.73ms +[2025-09-02 05:52:02] [Rank 0] step:5661/10000 train_time:434446ms step_avg:76.74ms +[2025-09-02 05:52:02] [Rank 0] step:5661/10000 train_time:434446ms step_avg:76.74ms +[2025-09-02 05:52:04] [Rank 0] step:5681/10000 train_time:436061ms step_avg:76.76ms +[2025-09-02 05:52:04] [Rank 0] step:5681/10000 train_time:436061ms step_avg:76.76ms +[2025-09-02 05:52:06] [Rank 0] step:5701/10000 train_time:437707ms step_avg:76.78ms +[2025-09-02 05:52:06] [Rank 0] step:5701/10000 train_time:437707ms step_avg:76.78ms +[2025-09-02 05:52:07] [Rank 0] step:5721/10000 train_time:439320ms step_avg:76.79ms +[2025-09-02 05:52:07] [Rank 0] step:5721/10000 train_time:439320ms step_avg:76.79ms +[2025-09-02 05:52:09] [Rank 0] step:5741/10000 train_time:440930ms step_avg:76.80ms +[2025-09-02 05:52:09] [Rank 0] step:5741/10000 train_time:440930ms step_avg:76.80ms +[2025-09-02 05:52:11] [Rank 0] step:5761/10000 train_time:442645ms step_avg:76.83ms +[2025-09-02 05:52:11] [Rank 0] step:5761/10000 train_time:442645ms step_avg:76.83ms +[2025-09-02 05:52:12] [Rank 0] step:5781/10000 train_time:444358ms step_avg:76.87ms +[2025-09-02 05:52:12] [Rank 0] step:5781/10000 train_time:444358ms step_avg:76.87ms +[2025-09-02 05:52:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:52:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:52:26] [Rank 0] PRINT: step:5800/10000 val_loss:3.8567 svd_entropy: attn_qk:H=0.7834,top10E=0.24,eRank=198.1,q75/q25=52.70 attn_vo:H=0.8155,top10E=0.12,eRank=301.8,q75/q25=inf mlp_w1:H=0.9050,top10E=0.14,eRank=412.0,q75/q25=4.62 mlp_w2:H=0.9707,top10E=0.04,eRank=632.2,q75/q25=2.90 vo_prod:H=0.6854,top10E=0.19,eRank=140.3,q75/q25=inf train_time:446054ms step_avg:76.91ms +[2025-09-02 05:52:26] [Rank 0] PRINT: step:5800/10000 val_loss:3.8567 svd_entropy: attn_qk:H=0.7834,top10E=0.24,eRank=198.1,q75/q25=52.70 attn_vo:H=0.8155,top10E=0.12,eRank=301.8,q75/q25=inf mlp_w1:H=0.9050,top10E=0.14,eRank=412.0,q75/q25=4.62 mlp_w2:H=0.9707,top10E=0.04,eRank=632.2,q75/q25=2.90 vo_prod:H=0.6854,top10E=0.19,eRank=140.3,q75/q25=inf train_time:446054ms step_avg:76.91ms +[2025-09-02 05:52:26] [Rank 0] step:5801/10000 train_time:446070ms step_avg:76.90ms +[2025-09-02 05:52:26] [Rank 0] step:5801/10000 train_time:446070ms step_avg:76.90ms +[2025-09-02 05:52:27] [Rank 0] step:5821/10000 train_time:447616ms step_avg:76.90ms +[2025-09-02 05:52:27] [Rank 0] step:5821/10000 train_time:447616ms step_avg:76.90ms +[2025-09-02 05:52:29] [Rank 0] step:5841/10000 train_time:449231ms step_avg:76.91ms +[2025-09-02 05:52:29] [Rank 0] step:5841/10000 train_time:449231ms step_avg:76.91ms +[2025-09-02 05:52:31] [Rank 0] step:5861/10000 train_time:450842ms step_avg:76.92ms +[2025-09-02 05:52:31] [Rank 0] step:5861/10000 train_time:450842ms step_avg:76.92ms +[2025-09-02 05:52:32] [Rank 0] step:5881/10000 train_time:452456ms step_avg:76.94ms +[2025-09-02 05:52:32] [Rank 0] step:5881/10000 train_time:452456ms step_avg:76.94ms +[2025-09-02 05:52:34] [Rank 0] step:5901/10000 train_time:454073ms step_avg:76.95ms +[2025-09-02 05:52:34] [Rank 0] step:5901/10000 train_time:454073ms step_avg:76.95ms +[2025-09-02 05:52:35] [Rank 0] step:5921/10000 train_time:455689ms step_avg:76.96ms +[2025-09-02 05:52:35] [Rank 0] step:5921/10000 train_time:455689ms step_avg:76.96ms +[2025-09-02 05:52:37] [Rank 0] step:5941/10000 train_time:457308ms step_avg:76.97ms +[2025-09-02 05:52:37] [Rank 0] step:5941/10000 train_time:457308ms step_avg:76.97ms +[2025-09-02 05:52:39] [Rank 0] step:5961/10000 train_time:458925ms step_avg:76.99ms +[2025-09-02 05:52:39] [Rank 0] step:5961/10000 train_time:458925ms step_avg:76.99ms +[2025-09-02 05:52:40] [Rank 0] step:5981/10000 train_time:460543ms step_avg:77.00ms +[2025-09-02 05:52:40] [Rank 0] step:5981/10000 train_time:460543ms step_avg:77.00ms +[2025-09-02 05:52:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:52:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:52:54] [Rank 0] PRINT: step:6000/10000 val_loss:3.8332 svd_entropy: attn_qk:H=0.7845,top10E=0.24,eRank=199.3,q75/q25=52.10 attn_vo:H=0.8165,top10E=0.12,eRank=303.4,q75/q25=inf mlp_w1:H=0.9062,top10E=0.14,eRank=415.1,q75/q25=4.59 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.90 vo_prod:H=0.6867,top10E=0.19,eRank=141.7,q75/q25=inf train_time:462239ms step_avg:77.04ms +[2025-09-02 05:52:54] [Rank 0] PRINT: step:6000/10000 val_loss:3.8332 svd_entropy: attn_qk:H=0.7845,top10E=0.24,eRank=199.3,q75/q25=52.10 attn_vo:H=0.8165,top10E=0.12,eRank=303.4,q75/q25=inf mlp_w1:H=0.9062,top10E=0.14,eRank=415.1,q75/q25=4.59 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.90 vo_prod:H=0.6867,top10E=0.19,eRank=141.7,q75/q25=inf train_time:462239ms step_avg:77.04ms +[2025-09-02 05:52:54] [Rank 0] step:6001/10000 train_time:462254ms step_avg:77.03ms +[2025-09-02 05:52:54] [Rank 0] step:6001/10000 train_time:462254ms step_avg:77.03ms +[2025-09-02 05:52:55] [Rank 0] step:6021/10000 train_time:463790ms step_avg:77.03ms +[2025-09-02 05:52:55] [Rank 0] step:6021/10000 train_time:463790ms step_avg:77.03ms +[2025-09-02 05:52:57] [Rank 0] step:6041/10000 train_time:465403ms step_avg:77.04ms +[2025-09-02 05:52:57] [Rank 0] step:6041/10000 train_time:465403ms step_avg:77.04ms +[2025-09-02 05:52:59] [Rank 0] step:6061/10000 train_time:467019ms step_avg:77.05ms +[2025-09-02 05:52:59] [Rank 0] step:6061/10000 train_time:467019ms step_avg:77.05ms +[2025-09-02 05:53:00] [Rank 0] step:6081/10000 train_time:468632ms step_avg:77.06ms +[2025-09-02 05:53:00] [Rank 0] step:6081/10000 train_time:468632ms step_avg:77.06ms +[2025-09-02 05:53:02] [Rank 0] step:6101/10000 train_time:470249ms step_avg:77.08ms +[2025-09-02 05:53:02] [Rank 0] step:6101/10000 train_time:470249ms step_avg:77.08ms +[2025-09-02 05:53:04] [Rank 0] step:6121/10000 train_time:472126ms step_avg:77.13ms +[2025-09-02 05:53:04] [Rank 0] step:6121/10000 train_time:472126ms step_avg:77.13ms +[2025-09-02 05:53:05] [Rank 0] step:6141/10000 train_time:473751ms step_avg:77.15ms +[2025-09-02 05:53:05] [Rank 0] step:6141/10000 train_time:473751ms step_avg:77.15ms +[2025-09-02 05:53:07] [Rank 0] step:6161/10000 train_time:475367ms step_avg:77.16ms +[2025-09-02 05:53:07] [Rank 0] step:6161/10000 train_time:475367ms step_avg:77.16ms +[2025-09-02 05:53:09] [Rank 0] step:6181/10000 train_time:476979ms step_avg:77.17ms +[2025-09-02 05:53:09] [Rank 0] step:6181/10000 train_time:476979ms step_avg:77.17ms +[2025-09-02 05:53:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:53:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:53:22] [Rank 0] PRINT: step:6200/10000 val_loss:3.8186 svd_entropy: attn_qk:H=0.7855,top10E=0.24,eRank=200.3,q75/q25=51.86 attn_vo:H=0.8174,top10E=0.12,eRank=304.9,q75/q25=inf mlp_w1:H=0.9072,top10E=0.14,eRank=417.9,q75/q25=4.55 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.90 vo_prod:H=0.6882,top10E=0.19,eRank=143.2,q75/q25=inf train_time:478744ms step_avg:77.22ms +[2025-09-02 05:53:22] [Rank 0] PRINT: step:6200/10000 val_loss:3.8186 svd_entropy: attn_qk:H=0.7855,top10E=0.24,eRank=200.3,q75/q25=51.86 attn_vo:H=0.8174,top10E=0.12,eRank=304.9,q75/q25=inf mlp_w1:H=0.9072,top10E=0.14,eRank=417.9,q75/q25=4.55 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.90 vo_prod:H=0.6882,top10E=0.19,eRank=143.2,q75/q25=inf train_time:478744ms step_avg:77.22ms +[2025-09-02 05:53:22] [Rank 0] step:6201/10000 train_time:478759ms step_avg:77.21ms +[2025-09-02 05:53:22] [Rank 0] step:6201/10000 train_time:478759ms step_avg:77.21ms +[2025-09-02 05:53:23] [Rank 0] step:6221/10000 train_time:480295ms step_avg:77.21ms +[2025-09-02 05:53:23] [Rank 0] step:6221/10000 train_time:480295ms step_avg:77.21ms +[2025-09-02 05:53:25] [Rank 0] step:6241/10000 train_time:481912ms step_avg:77.22ms +[2025-09-02 05:53:25] [Rank 0] step:6241/10000 train_time:481912ms step_avg:77.22ms +[2025-09-02 05:53:27] [Rank 0] step:6261/10000 train_time:483527ms step_avg:77.23ms +[2025-09-02 05:53:27] [Rank 0] step:6261/10000 train_time:483527ms step_avg:77.23ms +[2025-09-02 05:53:28] [Rank 0] step:6281/10000 train_time:485142ms step_avg:77.24ms +[2025-09-02 05:53:28] [Rank 0] step:6281/10000 train_time:485142ms step_avg:77.24ms +[2025-09-02 05:53:30] [Rank 0] step:6301/10000 train_time:486761ms step_avg:77.25ms +[2025-09-02 05:53:30] [Rank 0] step:6301/10000 train_time:486761ms step_avg:77.25ms +[2025-09-02 05:53:32] [Rank 0] step:6321/10000 train_time:488377ms step_avg:77.26ms +[2025-09-02 05:53:32] [Rank 0] step:6321/10000 train_time:488377ms step_avg:77.26ms +[2025-09-02 05:53:33] [Rank 0] step:6341/10000 train_time:489998ms step_avg:77.27ms +[2025-09-02 05:53:33] [Rank 0] step:6341/10000 train_time:489998ms step_avg:77.27ms +[2025-09-02 05:53:35] [Rank 0] step:6361/10000 train_time:491622ms step_avg:77.29ms +[2025-09-02 05:53:35] [Rank 0] step:6361/10000 train_time:491622ms step_avg:77.29ms +[2025-09-02 05:53:36] [Rank 0] step:6381/10000 train_time:493245ms step_avg:77.30ms +[2025-09-02 05:53:36] [Rank 0] step:6381/10000 train_time:493245ms step_avg:77.30ms +[2025-09-02 05:53:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:53:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:53:49] [Rank 0] PRINT: step:6400/10000 val_loss:3.8013 svd_entropy: attn_qk:H=0.7865,top10E=0.24,eRank=201.3,q75/q25=51.23 attn_vo:H=0.8182,top10E=0.12,eRank=306.2,q75/q25=inf mlp_w1:H=0.9082,top10E=0.14,eRank=420.5,q75/q25=4.52 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.89 vo_prod:H=0.6894,top10E=0.19,eRank=144.5,q75/q25=inf train_time:494945ms step_avg:77.34ms +[2025-09-02 05:53:49] [Rank 0] PRINT: step:6400/10000 val_loss:3.8013 svd_entropy: attn_qk:H=0.7865,top10E=0.24,eRank=201.3,q75/q25=51.23 attn_vo:H=0.8182,top10E=0.12,eRank=306.2,q75/q25=inf mlp_w1:H=0.9082,top10E=0.14,eRank=420.5,q75/q25=4.52 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.89 vo_prod:H=0.6894,top10E=0.19,eRank=144.5,q75/q25=inf train_time:494945ms step_avg:77.34ms +[2025-09-02 05:53:50] [Rank 0] step:6401/10000 train_time:494959ms step_avg:77.33ms +[2025-09-02 05:53:50] [Rank 0] step:6401/10000 train_time:494959ms step_avg:77.33ms +[2025-09-02 05:53:51] [Rank 0] step:6421/10000 train_time:496507ms step_avg:77.33ms +[2025-09-02 05:53:51] [Rank 0] step:6421/10000 train_time:496507ms step_avg:77.33ms +[2025-09-02 05:53:53] [Rank 0] step:6441/10000 train_time:498124ms step_avg:77.34ms +[2025-09-02 05:53:53] [Rank 0] step:6441/10000 train_time:498124ms step_avg:77.34ms +[2025-09-02 05:53:54] [Rank 0] step:6461/10000 train_time:499741ms step_avg:77.35ms +[2025-09-02 05:53:54] [Rank 0] step:6461/10000 train_time:499741ms step_avg:77.35ms +[2025-09-02 05:53:56] [Rank 0] step:6481/10000 train_time:501364ms step_avg:77.36ms +[2025-09-02 05:53:56] [Rank 0] step:6481/10000 train_time:501364ms step_avg:77.36ms +[2025-09-02 05:53:58] [Rank 0] step:6501/10000 train_time:502975ms step_avg:77.37ms +[2025-09-02 05:53:58] [Rank 0] step:6501/10000 train_time:502975ms step_avg:77.37ms +[2025-09-02 05:53:59] [Rank 0] step:6521/10000 train_time:504587ms step_avg:77.38ms +[2025-09-02 05:53:59] [Rank 0] step:6521/10000 train_time:504587ms step_avg:77.38ms +[2025-09-02 05:54:01] [Rank 0] step:6541/10000 train_time:506206ms step_avg:77.39ms +[2025-09-02 05:54:01] [Rank 0] step:6541/10000 train_time:506206ms step_avg:77.39ms +[2025-09-02 05:54:03] [Rank 0] step:6561/10000 train_time:507827ms step_avg:77.40ms +[2025-09-02 05:54:03] [Rank 0] step:6561/10000 train_time:507827ms step_avg:77.40ms +[2025-09-02 05:54:04] [Rank 0] step:6581/10000 train_time:509441ms step_avg:77.41ms +[2025-09-02 05:54:04] [Rank 0] step:6581/10000 train_time:509441ms step_avg:77.41ms +[2025-09-02 05:54:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:54:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:54:17] [Rank 0] PRINT: step:6600/10000 val_loss:3.7878 svd_entropy: attn_qk:H=0.7873,top10E=0.24,eRank=202.2,q75/q25=51.06 attn_vo:H=0.8189,top10E=0.12,eRank=307.5,q75/q25=inf mlp_w1:H=0.9090,top10E=0.14,eRank=422.8,q75/q25=4.49 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.89 vo_prod:H=0.6906,top10E=0.19,eRank=145.7,q75/q25=inf train_time:511141ms step_avg:77.45ms +[2025-09-02 05:54:17] [Rank 0] PRINT: step:6600/10000 val_loss:3.7878 svd_entropy: attn_qk:H=0.7873,top10E=0.24,eRank=202.2,q75/q25=51.06 attn_vo:H=0.8189,top10E=0.12,eRank=307.5,q75/q25=inf mlp_w1:H=0.9090,top10E=0.14,eRank=422.8,q75/q25=4.49 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.89 vo_prod:H=0.6906,top10E=0.19,eRank=145.7,q75/q25=inf train_time:511141ms step_avg:77.45ms +[2025-09-02 05:54:18] [Rank 0] step:6601/10000 train_time:511156ms step_avg:77.44ms +[2025-09-02 05:54:18] [Rank 0] step:6601/10000 train_time:511156ms step_avg:77.44ms +[2025-09-02 05:54:19] [Rank 0] step:6621/10000 train_time:512711ms step_avg:77.44ms +[2025-09-02 05:54:19] [Rank 0] step:6621/10000 train_time:512711ms step_avg:77.44ms +[2025-09-02 05:54:21] [Rank 0] step:6641/10000 train_time:514336ms step_avg:77.45ms +[2025-09-02 05:54:21] [Rank 0] step:6641/10000 train_time:514336ms step_avg:77.45ms +[2025-09-02 05:54:22] [Rank 0] step:6661/10000 train_time:515953ms step_avg:77.46ms +[2025-09-02 05:54:22] [Rank 0] step:6661/10000 train_time:515953ms step_avg:77.46ms +[2025-09-02 05:54:24] [Rank 0] step:6681/10000 train_time:517592ms step_avg:77.47ms +[2025-09-02 05:54:24] [Rank 0] step:6681/10000 train_time:517592ms step_avg:77.47ms +[2025-09-02 05:54:26] [Rank 0] step:6701/10000 train_time:519244ms step_avg:77.49ms +[2025-09-02 05:54:26] [Rank 0] step:6701/10000 train_time:519244ms step_avg:77.49ms +[2025-09-02 05:54:27] [Rank 0] step:6721/10000 train_time:520894ms step_avg:77.50ms +[2025-09-02 05:54:27] [Rank 0] step:6721/10000 train_time:520894ms step_avg:77.50ms +[2025-09-02 05:54:29] [Rank 0] step:6741/10000 train_time:522537ms step_avg:77.52ms +[2025-09-02 05:54:29] [Rank 0] step:6741/10000 train_time:522537ms step_avg:77.52ms +[2025-09-02 05:54:31] [Rank 0] step:6761/10000 train_time:524180ms step_avg:77.53ms +[2025-09-02 05:54:31] [Rank 0] step:6761/10000 train_time:524180ms step_avg:77.53ms +[2025-09-02 05:54:32] [Rank 0] step:6781/10000 train_time:525831ms step_avg:77.54ms +[2025-09-02 05:54:32] [Rank 0] step:6781/10000 train_time:525831ms step_avg:77.54ms +[2025-09-02 05:54:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:54:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:54:46] [Rank 0] PRINT: step:6800/10000 val_loss:3.7717 svd_entropy: attn_qk:H=0.7879,top10E=0.24,eRank=202.9,q75/q25=50.81 attn_vo:H=0.8196,top10E=0.12,eRank=308.6,q75/q25=inf mlp_w1:H=0.9098,top10E=0.14,eRank=424.9,q75/q25=4.46 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.89 vo_prod:H=0.6917,top10E=0.19,eRank=146.9,q75/q25=inf train_time:527567ms step_avg:77.58ms +[2025-09-02 05:54:46] [Rank 0] PRINT: step:6800/10000 val_loss:3.7717 svd_entropy: attn_qk:H=0.7879,top10E=0.24,eRank=202.9,q75/q25=50.81 attn_vo:H=0.8196,top10E=0.12,eRank=308.6,q75/q25=inf mlp_w1:H=0.9098,top10E=0.14,eRank=424.9,q75/q25=4.46 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.89 vo_prod:H=0.6917,top10E=0.19,eRank=146.9,q75/q25=inf train_time:527567ms step_avg:77.58ms +[2025-09-02 05:54:46] [Rank 0] step:6801/10000 train_time:527582ms step_avg:77.57ms +[2025-09-02 05:54:46] [Rank 0] step:6801/10000 train_time:527582ms step_avg:77.57ms +[2025-09-02 05:54:47] [Rank 0] step:6821/10000 train_time:529144ms step_avg:77.58ms +[2025-09-02 05:54:47] [Rank 0] step:6821/10000 train_time:529144ms step_avg:77.58ms +[2025-09-02 05:54:49] [Rank 0] step:6841/10000 train_time:530787ms step_avg:77.59ms +[2025-09-02 05:54:49] [Rank 0] step:6841/10000 train_time:530787ms step_avg:77.59ms +[2025-09-02 05:54:51] [Rank 0] step:6861/10000 train_time:532433ms step_avg:77.60ms +[2025-09-02 05:54:51] [Rank 0] step:6861/10000 train_time:532433ms step_avg:77.60ms +[2025-09-02 05:54:52] [Rank 0] step:6881/10000 train_time:534080ms step_avg:77.62ms +[2025-09-02 05:54:52] [Rank 0] step:6881/10000 train_time:534080ms step_avg:77.62ms +[2025-09-02 05:54:54] [Rank 0] step:6901/10000 train_time:535724ms step_avg:77.63ms +[2025-09-02 05:54:54] [Rank 0] step:6901/10000 train_time:535724ms step_avg:77.63ms +[2025-09-02 05:54:56] [Rank 0] step:6921/10000 train_time:537369ms step_avg:77.64ms +[2025-09-02 05:54:56] [Rank 0] step:6921/10000 train_time:537369ms step_avg:77.64ms +[2025-09-02 05:54:57] [Rank 0] step:6941/10000 train_time:539020ms step_avg:77.66ms +[2025-09-02 05:54:57] [Rank 0] step:6941/10000 train_time:539020ms step_avg:77.66ms +[2025-09-02 05:54:59] [Rank 0] step:6961/10000 train_time:540683ms step_avg:77.67ms +[2025-09-02 05:54:59] [Rank 0] step:6961/10000 train_time:540683ms step_avg:77.67ms +[2025-09-02 05:55:01] [Rank 0] step:6981/10000 train_time:542334ms step_avg:77.69ms +[2025-09-02 05:55:01] [Rank 0] step:6981/10000 train_time:542334ms step_avg:77.69ms +[2025-09-02 05:55:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:55:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:55:14] [Rank 0] PRINT: step:7000/10000 val_loss:3.7543 svd_entropy: attn_qk:H=0.7886,top10E=0.24,eRank=203.6,q75/q25=50.35 attn_vo:H=0.8202,top10E=0.12,eRank=309.7,q75/q25=inf mlp_w1:H=0.9105,top10E=0.13,eRank=426.8,q75/q25=4.44 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.89 vo_prod:H=0.6927,top10E=0.19,eRank=147.9,q75/q25=inf train_time:544073ms step_avg:77.72ms +[2025-09-02 05:55:14] [Rank 0] PRINT: step:7000/10000 val_loss:3.7543 svd_entropy: attn_qk:H=0.7886,top10E=0.24,eRank=203.6,q75/q25=50.35 attn_vo:H=0.8202,top10E=0.12,eRank=309.7,q75/q25=inf mlp_w1:H=0.9105,top10E=0.13,eRank=426.8,q75/q25=4.44 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.89 vo_prod:H=0.6927,top10E=0.19,eRank=147.9,q75/q25=inf train_time:544073ms step_avg:77.72ms +[2025-09-02 05:55:14] [Rank 0] step:7001/10000 train_time:544088ms step_avg:77.72ms +[2025-09-02 05:55:14] [Rank 0] step:7001/10000 train_time:544088ms step_avg:77.72ms +[2025-09-02 05:55:16] [Rank 0] step:7021/10000 train_time:545756ms step_avg:77.73ms +[2025-09-02 05:55:16] [Rank 0] step:7021/10000 train_time:545756ms step_avg:77.73ms +[2025-09-02 05:55:18] [Rank 0] step:7041/10000 train_time:547402ms step_avg:77.74ms +[2025-09-02 05:55:18] [Rank 0] step:7041/10000 train_time:547402ms step_avg:77.74ms +[2025-09-02 05:55:19] [Rank 0] step:7061/10000 train_time:549047ms step_avg:77.76ms +[2025-09-02 05:55:19] [Rank 0] step:7061/10000 train_time:549047ms step_avg:77.76ms +[2025-09-02 05:55:21] [Rank 0] step:7081/10000 train_time:550688ms step_avg:77.77ms +[2025-09-02 05:55:21] [Rank 0] step:7081/10000 train_time:550688ms step_avg:77.77ms +[2025-09-02 05:55:23] [Rank 0] step:7101/10000 train_time:552337ms step_avg:77.78ms +[2025-09-02 05:55:23] [Rank 0] step:7101/10000 train_time:552337ms step_avg:77.78ms +[2025-09-02 05:55:24] [Rank 0] step:7121/10000 train_time:553983ms step_avg:77.80ms +[2025-09-02 05:55:24] [Rank 0] step:7121/10000 train_time:553983ms step_avg:77.80ms +[2025-09-02 05:55:26] [Rank 0] step:7141/10000 train_time:555630ms step_avg:77.81ms +[2025-09-02 05:55:26] [Rank 0] step:7141/10000 train_time:555630ms step_avg:77.81ms +[2025-09-02 05:55:28] [Rank 0] step:7161/10000 train_time:557276ms step_avg:77.82ms +[2025-09-02 05:55:28] [Rank 0] step:7161/10000 train_time:557276ms step_avg:77.82ms +[2025-09-02 05:55:29] [Rank 0] step:7181/10000 train_time:558923ms step_avg:77.83ms +[2025-09-02 05:55:29] [Rank 0] step:7181/10000 train_time:558923ms step_avg:77.83ms +[2025-09-02 05:55:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:55:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:55:43] [Rank 0] PRINT: step:7200/10000 val_loss:3.7452 svd_entropy: attn_qk:H=0.7892,top10E=0.24,eRank=204.3,q75/q25=50.00 attn_vo:H=0.8208,top10E=0.12,eRank=310.6,q75/q25=inf mlp_w1:H=0.9111,top10E=0.13,eRank=428.6,q75/q25=4.41 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.89 vo_prod:H=0.6937,top10E=0.19,eRank=149.0,q75/q25=inf train_time:560653ms step_avg:77.87ms +[2025-09-02 05:55:43] [Rank 0] PRINT: step:7200/10000 val_loss:3.7452 svd_entropy: attn_qk:H=0.7892,top10E=0.24,eRank=204.3,q75/q25=50.00 attn_vo:H=0.8208,top10E=0.12,eRank=310.6,q75/q25=inf mlp_w1:H=0.9111,top10E=0.13,eRank=428.6,q75/q25=4.41 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.89 vo_prod:H=0.6937,top10E=0.19,eRank=149.0,q75/q25=inf train_time:560653ms step_avg:77.87ms +[2025-09-02 05:55:43] [Rank 0] step:7201/10000 train_time:560669ms step_avg:77.86ms +[2025-09-02 05:55:43] [Rank 0] step:7201/10000 train_time:560669ms step_avg:77.86ms +[2025-09-02 05:55:45] [Rank 0] step:7221/10000 train_time:562256ms step_avg:77.86ms +[2025-09-02 05:55:45] [Rank 0] step:7221/10000 train_time:562256ms step_avg:77.86ms +[2025-09-02 05:55:46] [Rank 0] step:7241/10000 train_time:563896ms step_avg:77.88ms +[2025-09-02 05:55:46] [Rank 0] step:7241/10000 train_time:563896ms step_avg:77.88ms +[2025-09-02 05:55:48] [Rank 0] step:7261/10000 train_time:565536ms step_avg:77.89ms +[2025-09-02 05:55:48] [Rank 0] step:7261/10000 train_time:565536ms step_avg:77.89ms +[2025-09-02 05:55:50] [Rank 0] step:7281/10000 train_time:567187ms step_avg:77.90ms +[2025-09-02 05:55:50] [Rank 0] step:7281/10000 train_time:567187ms step_avg:77.90ms +[2025-09-02 05:55:51] [Rank 0] step:7301/10000 train_time:568832ms step_avg:77.91ms +[2025-09-02 05:55:51] [Rank 0] step:7301/10000 train_time:568832ms step_avg:77.91ms +[2025-09-02 05:55:53] [Rank 0] step:7321/10000 train_time:570489ms step_avg:77.93ms +[2025-09-02 05:55:53] [Rank 0] step:7321/10000 train_time:570489ms step_avg:77.93ms +[2025-09-02 05:55:55] [Rank 0] step:7341/10000 train_time:572138ms step_avg:77.94ms +[2025-09-02 05:55:55] [Rank 0] step:7341/10000 train_time:572138ms step_avg:77.94ms +[2025-09-02 05:55:56] [Rank 0] step:7361/10000 train_time:573893ms step_avg:77.96ms +[2025-09-02 05:55:56] [Rank 0] step:7361/10000 train_time:573893ms step_avg:77.96ms +[2025-09-02 05:55:58] [Rank 0] step:7381/10000 train_time:575546ms step_avg:77.98ms +[2025-09-02 05:55:58] [Rank 0] step:7381/10000 train_time:575546ms step_avg:77.98ms +[2025-09-02 05:55:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:55:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:56:12] [Rank 0] PRINT: step:7400/10000 val_loss:3.7264 svd_entropy: attn_qk:H=0.7896,top10E=0.24,eRank=204.8,q75/q25=49.67 attn_vo:H=0.8213,top10E=0.12,eRank=311.5,q75/q25=inf mlp_w1:H=0.9117,top10E=0.13,eRank=430.1,q75/q25=4.38 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.88 vo_prod:H=0.6946,top10E=0.19,eRank=150.0,q75/q25=inf train_time:577260ms step_avg:78.01ms +[2025-09-02 05:56:12] [Rank 0] PRINT: step:7400/10000 val_loss:3.7264 svd_entropy: attn_qk:H=0.7896,top10E=0.24,eRank=204.8,q75/q25=49.67 attn_vo:H=0.8213,top10E=0.12,eRank=311.5,q75/q25=inf mlp_w1:H=0.9117,top10E=0.13,eRank=430.1,q75/q25=4.38 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.88 vo_prod:H=0.6946,top10E=0.19,eRank=150.0,q75/q25=inf train_time:577260ms step_avg:78.01ms +[2025-09-02 05:56:12] [Rank 0] step:7401/10000 train_time:577275ms step_avg:78.00ms +[2025-09-02 05:56:12] [Rank 0] step:7401/10000 train_time:577275ms step_avg:78.00ms +[2025-09-02 05:56:13] [Rank 0] step:7421/10000 train_time:578835ms step_avg:78.00ms +[2025-09-02 05:56:13] [Rank 0] step:7421/10000 train_time:578835ms step_avg:78.00ms +[2025-09-02 05:56:15] [Rank 0] step:7441/10000 train_time:580477ms step_avg:78.01ms +[2025-09-02 05:56:15] [Rank 0] step:7441/10000 train_time:580477ms step_avg:78.01ms +[2025-09-02 05:56:17] [Rank 0] step:7461/10000 train_time:582123ms step_avg:78.02ms +[2025-09-02 05:56:17] [Rank 0] step:7461/10000 train_time:582123ms step_avg:78.02ms +[2025-09-02 05:56:18] [Rank 0] step:7481/10000 train_time:583777ms step_avg:78.03ms +[2025-09-02 05:56:18] [Rank 0] step:7481/10000 train_time:583777ms step_avg:78.03ms +[2025-09-02 05:56:20] [Rank 0] step:7501/10000 train_time:585484ms step_avg:78.05ms +[2025-09-02 05:56:20] [Rank 0] step:7501/10000 train_time:585484ms step_avg:78.05ms +[2025-09-02 05:56:22] [Rank 0] step:7521/10000 train_time:587138ms step_avg:78.07ms +[2025-09-02 05:56:22] [Rank 0] step:7521/10000 train_time:587138ms step_avg:78.07ms +[2025-09-02 05:56:23] [Rank 0] step:7541/10000 train_time:588799ms step_avg:78.08ms +[2025-09-02 05:56:23] [Rank 0] step:7541/10000 train_time:588799ms step_avg:78.08ms +[2025-09-02 05:56:25] [Rank 0] step:7561/10000 train_time:590437ms step_avg:78.09ms +[2025-09-02 05:56:25] [Rank 0] step:7561/10000 train_time:590437ms step_avg:78.09ms +[2025-09-02 05:56:27] [Rank 0] step:7581/10000 train_time:592097ms step_avg:78.10ms +[2025-09-02 05:56:27] [Rank 0] step:7581/10000 train_time:592097ms step_avg:78.10ms +[2025-09-02 05:56:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:56:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:56:40] [Rank 0] PRINT: step:7600/10000 val_loss:3.7197 svd_entropy: attn_qk:H=0.7902,top10E=0.24,eRank=205.4,q75/q25=49.31 attn_vo:H=0.8218,top10E=0.12,eRank=312.2,q75/q25=inf mlp_w1:H=0.9122,top10E=0.13,eRank=431.6,q75/q25=4.37 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.89 vo_prod:H=0.6954,top10E=0.19,eRank=150.8,q75/q25=inf train_time:593841ms step_avg:78.14ms +[2025-09-02 05:56:40] [Rank 0] PRINT: step:7600/10000 val_loss:3.7197 svd_entropy: attn_qk:H=0.7902,top10E=0.24,eRank=205.4,q75/q25=49.31 attn_vo:H=0.8218,top10E=0.12,eRank=312.2,q75/q25=inf mlp_w1:H=0.9122,top10E=0.13,eRank=431.6,q75/q25=4.37 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.89 vo_prod:H=0.6954,top10E=0.19,eRank=150.8,q75/q25=inf train_time:593841ms step_avg:78.14ms +[2025-09-02 05:56:40] [Rank 0] step:7601/10000 train_time:593856ms step_avg:78.13ms +[2025-09-02 05:56:40] [Rank 0] step:7601/10000 train_time:593856ms step_avg:78.13ms +[2025-09-02 05:56:42] [Rank 0] step:7621/10000 train_time:595425ms step_avg:78.13ms +[2025-09-02 05:56:42] [Rank 0] step:7621/10000 train_time:595425ms step_avg:78.13ms +[2025-09-02 05:56:44] [Rank 0] step:7641/10000 train_time:597073ms step_avg:78.14ms +[2025-09-02 05:56:44] [Rank 0] step:7641/10000 train_time:597073ms step_avg:78.14ms +[2025-09-02 05:56:45] [Rank 0] step:7661/10000 train_time:598725ms step_avg:78.15ms +[2025-09-02 05:56:45] [Rank 0] step:7661/10000 train_time:598725ms step_avg:78.15ms +[2025-09-02 05:56:47] [Rank 0] step:7681/10000 train_time:600372ms step_avg:78.16ms +[2025-09-02 05:56:47] [Rank 0] step:7681/10000 train_time:600372ms step_avg:78.16ms +[2025-09-02 05:56:48] [Rank 0] step:7701/10000 train_time:602020ms step_avg:78.17ms +[2025-09-02 05:56:48] [Rank 0] step:7701/10000 train_time:602020ms step_avg:78.17ms +[2025-09-02 05:56:50] [Rank 0] step:7721/10000 train_time:603677ms step_avg:78.19ms +[2025-09-02 05:56:50] [Rank 0] step:7721/10000 train_time:603677ms step_avg:78.19ms +[2025-09-02 05:56:52] [Rank 0] step:7741/10000 train_time:605329ms step_avg:78.20ms +[2025-09-02 05:56:52] [Rank 0] step:7741/10000 train_time:605329ms step_avg:78.20ms +[2025-09-02 05:56:53] [Rank 0] step:7761/10000 train_time:606990ms step_avg:78.21ms +[2025-09-02 05:56:53] [Rank 0] step:7761/10000 train_time:606990ms step_avg:78.21ms +[2025-09-02 05:56:55] [Rank 0] step:7781/10000 train_time:608644ms step_avg:78.22ms +[2025-09-02 05:56:55] [Rank 0] step:7781/10000 train_time:608644ms step_avg:78.22ms +[2025-09-02 05:56:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:56:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:57:09] [Rank 0] PRINT: step:7800/10000 val_loss:3.7061 svd_entropy: attn_qk:H=0.7907,top10E=0.24,eRank=205.9,q75/q25=48.98 attn_vo:H=0.8222,top10E=0.11,eRank=313.0,q75/q25=inf mlp_w1:H=0.9127,top10E=0.13,eRank=432.9,q75/q25=4.35 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.89 vo_prod:H=0.6962,top10E=0.18,eRank=151.7,q75/q25=inf train_time:610390ms step_avg:78.26ms +[2025-09-02 05:57:09] [Rank 0] PRINT: step:7800/10000 val_loss:3.7061 svd_entropy: attn_qk:H=0.7907,top10E=0.24,eRank=205.9,q75/q25=48.98 attn_vo:H=0.8222,top10E=0.11,eRank=313.0,q75/q25=inf mlp_w1:H=0.9127,top10E=0.13,eRank=432.9,q75/q25=4.35 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.89 vo_prod:H=0.6962,top10E=0.18,eRank=151.7,q75/q25=inf train_time:610390ms step_avg:78.26ms +[2025-09-02 05:57:09] [Rank 0] step:7801/10000 train_time:610406ms step_avg:78.25ms +[2025-09-02 05:57:09] [Rank 0] step:7801/10000 train_time:610406ms step_avg:78.25ms +[2025-09-02 05:57:10] [Rank 0] step:7821/10000 train_time:611985ms step_avg:78.25ms +[2025-09-02 05:57:10] [Rank 0] step:7821/10000 train_time:611985ms step_avg:78.25ms +[2025-09-02 05:57:12] [Rank 0] step:7841/10000 train_time:613633ms step_avg:78.26ms +[2025-09-02 05:57:12] [Rank 0] step:7841/10000 train_time:613633ms step_avg:78.26ms +[2025-09-02 05:57:14] [Rank 0] step:7861/10000 train_time:615289ms step_avg:78.27ms +[2025-09-02 05:57:14] [Rank 0] step:7861/10000 train_time:615289ms step_avg:78.27ms +[2025-09-02 05:57:15] [Rank 0] step:7881/10000 train_time:616948ms step_avg:78.28ms +[2025-09-02 05:57:15] [Rank 0] step:7881/10000 train_time:616948ms step_avg:78.28ms +[2025-09-02 05:57:17] [Rank 0] step:7901/10000 train_time:618601ms step_avg:78.29ms +[2025-09-02 05:57:17] [Rank 0] step:7901/10000 train_time:618601ms step_avg:78.29ms +[2025-09-02 05:57:19] [Rank 0] step:7921/10000 train_time:620256ms step_avg:78.31ms +[2025-09-02 05:57:19] [Rank 0] step:7921/10000 train_time:620256ms step_avg:78.31ms +[2025-09-02 05:57:20] [Rank 0] step:7941/10000 train_time:621920ms step_avg:78.32ms +[2025-09-02 05:57:20] [Rank 0] step:7941/10000 train_time:621920ms step_avg:78.32ms +[2025-09-02 05:57:22] [Rank 0] step:7961/10000 train_time:623576ms step_avg:78.33ms +[2025-09-02 05:57:22] [Rank 0] step:7961/10000 train_time:623576ms step_avg:78.33ms +[2025-09-02 05:57:24] [Rank 0] step:7981/10000 train_time:625223ms step_avg:78.34ms +[2025-09-02 05:57:24] [Rank 0] step:7981/10000 train_time:625223ms step_avg:78.34ms +[2025-09-02 05:57:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:57:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:57:37] [Rank 0] PRINT: step:8000/10000 val_loss:3.6899 svd_entropy: attn_qk:H=0.7911,top10E=0.24,eRank=206.3,q75/q25=48.62 attn_vo:H=0.8226,top10E=0.11,eRank=313.7,q75/q25=inf mlp_w1:H=0.9131,top10E=0.13,eRank=434.0,q75/q25=4.33 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.89 vo_prod:H=0.6969,top10E=0.18,eRank=152.5,q75/q25=inf train_time:626962ms step_avg:78.37ms +[2025-09-02 05:57:37] [Rank 0] PRINT: step:8000/10000 val_loss:3.6899 svd_entropy: attn_qk:H=0.7911,top10E=0.24,eRank=206.3,q75/q25=48.62 attn_vo:H=0.8226,top10E=0.11,eRank=313.7,q75/q25=inf mlp_w1:H=0.9131,top10E=0.13,eRank=434.0,q75/q25=4.33 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.89 vo_prod:H=0.6969,top10E=0.18,eRank=152.5,q75/q25=inf train_time:626962ms step_avg:78.37ms +[2025-09-02 05:57:37] [Rank 0] step:8001/10000 train_time:626977ms step_avg:78.36ms +[2025-09-02 05:57:37] [Rank 0] step:8001/10000 train_time:626977ms step_avg:78.36ms +[2025-09-02 05:57:39] [Rank 0] step:8021/10000 train_time:628565ms step_avg:78.36ms +[2025-09-02 05:57:39] [Rank 0] step:8021/10000 train_time:628565ms step_avg:78.36ms +[2025-09-02 05:57:41] [Rank 0] step:8041/10000 train_time:630223ms step_avg:78.38ms +[2025-09-02 05:57:41] [Rank 0] step:8041/10000 train_time:630223ms step_avg:78.38ms +[2025-09-02 05:57:42] [Rank 0] step:8061/10000 train_time:631877ms step_avg:78.39ms +[2025-09-02 05:57:42] [Rank 0] step:8061/10000 train_time:631877ms step_avg:78.39ms +[2025-09-02 05:57:44] [Rank 0] step:8081/10000 train_time:633518ms step_avg:78.40ms +[2025-09-02 05:57:44] [Rank 0] step:8081/10000 train_time:633518ms step_avg:78.40ms +[2025-09-02 05:57:46] [Rank 0] step:8101/10000 train_time:635178ms step_avg:78.41ms +[2025-09-02 05:57:46] [Rank 0] step:8101/10000 train_time:635178ms step_avg:78.41ms +[2025-09-02 05:57:47] [Rank 0] step:8121/10000 train_time:636830ms step_avg:78.42ms +[2025-09-02 05:57:47] [Rank 0] step:8121/10000 train_time:636830ms step_avg:78.42ms +[2025-09-02 05:57:49] [Rank 0] step:8141/10000 train_time:638655ms step_avg:78.45ms +[2025-09-02 05:57:49] [Rank 0] step:8141/10000 train_time:638655ms step_avg:78.45ms +[2025-09-02 05:57:51] [Rank 0] step:8161/10000 train_time:640322ms step_avg:78.46ms +[2025-09-02 05:57:51] [Rank 0] step:8161/10000 train_time:640322ms step_avg:78.46ms +[2025-09-02 05:57:53] [Rank 0] step:8181/10000 train_time:642004ms step_avg:78.48ms +[2025-09-02 05:57:53] [Rank 0] step:8181/10000 train_time:642004ms step_avg:78.48ms +[2025-09-02 05:57:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:57:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:58:06] [Rank 0] PRINT: step:8200/10000 val_loss:3.6804 svd_entropy: attn_qk:H=0.7914,top10E=0.24,eRank=206.7,q75/q25=48.47 attn_vo:H=0.8230,top10E=0.11,eRank=314.3,q75/q25=inf mlp_w1:H=0.9135,top10E=0.13,eRank=435.1,q75/q25=4.32 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.89 vo_prod:H=0.6976,top10E=0.18,eRank=153.2,q75/q25=inf train_time:643798ms step_avg:78.51ms +[2025-09-02 05:58:06] [Rank 0] PRINT: step:8200/10000 val_loss:3.6804 svd_entropy: attn_qk:H=0.7914,top10E=0.24,eRank=206.7,q75/q25=48.47 attn_vo:H=0.8230,top10E=0.11,eRank=314.3,q75/q25=inf mlp_w1:H=0.9135,top10E=0.13,eRank=435.1,q75/q25=4.32 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.89 vo_prod:H=0.6976,top10E=0.18,eRank=153.2,q75/q25=inf train_time:643798ms step_avg:78.51ms +[2025-09-02 05:58:06] [Rank 0] step:8201/10000 train_time:643812ms step_avg:78.50ms +[2025-09-02 05:58:06] [Rank 0] step:8201/10000 train_time:643812ms step_avg:78.50ms +[2025-09-02 05:58:08] [Rank 0] step:8221/10000 train_time:645427ms step_avg:78.51ms +[2025-09-02 05:58:08] [Rank 0] step:8221/10000 train_time:645427ms step_avg:78.51ms +[2025-09-02 05:58:09] [Rank 0] step:8241/10000 train_time:647112ms step_avg:78.52ms +[2025-09-02 05:58:09] [Rank 0] step:8241/10000 train_time:647112ms step_avg:78.52ms +[2025-09-02 05:58:11] [Rank 0] step:8261/10000 train_time:648791ms step_avg:78.54ms +[2025-09-02 05:58:11] [Rank 0] step:8261/10000 train_time:648791ms step_avg:78.54ms +[2025-09-02 05:58:13] [Rank 0] step:8281/10000 train_time:650472ms step_avg:78.55ms +[2025-09-02 05:58:13] [Rank 0] step:8281/10000 train_time:650472ms step_avg:78.55ms +[2025-09-02 05:58:15] [Rank 0] step:8301/10000 train_time:652152ms step_avg:78.56ms +[2025-09-02 05:58:15] [Rank 0] step:8301/10000 train_time:652152ms step_avg:78.56ms +[2025-09-02 05:58:16] [Rank 0] step:8321/10000 train_time:653823ms step_avg:78.58ms +[2025-09-02 05:58:16] [Rank 0] step:8321/10000 train_time:653823ms step_avg:78.58ms +[2025-09-02 05:58:18] [Rank 0] step:8341/10000 train_time:655506ms step_avg:78.59ms +[2025-09-02 05:58:18] [Rank 0] step:8341/10000 train_time:655506ms step_avg:78.59ms +[2025-09-02 05:58:20] [Rank 0] step:8361/10000 train_time:657191ms step_avg:78.60ms +[2025-09-02 05:58:20] [Rank 0] step:8361/10000 train_time:657191ms step_avg:78.60ms +[2025-09-02 05:58:21] [Rank 0] step:8381/10000 train_time:658869ms step_avg:78.61ms +[2025-09-02 05:58:21] [Rank 0] step:8381/10000 train_time:658869ms step_avg:78.61ms +[2025-09-02 05:58:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:58:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:58:35] [Rank 0] PRINT: step:8400/10000 val_loss:3.6686 svd_entropy: attn_qk:H=0.7916,top10E=0.24,eRank=206.9,q75/q25=48.39 attn_vo:H=0.8234,top10E=0.11,eRank=314.9,q75/q25=inf mlp_w1:H=0.9138,top10E=0.13,eRank=436.1,q75/q25=4.31 mlp_w2:H=0.9707,top10E=0.04,eRank=632.2,q75/q25=2.88 vo_prod:H=0.6983,top10E=0.18,eRank=154.0,q75/q25=inf train_time:660633ms step_avg:78.65ms +[2025-09-02 05:58:35] [Rank 0] PRINT: step:8400/10000 val_loss:3.6686 svd_entropy: attn_qk:H=0.7916,top10E=0.24,eRank=206.9,q75/q25=48.39 attn_vo:H=0.8234,top10E=0.11,eRank=314.9,q75/q25=inf mlp_w1:H=0.9138,top10E=0.13,eRank=436.1,q75/q25=4.31 mlp_w2:H=0.9707,top10E=0.04,eRank=632.2,q75/q25=2.88 vo_prod:H=0.6983,top10E=0.18,eRank=154.0,q75/q25=inf train_time:660633ms step_avg:78.65ms +[2025-09-02 05:58:35] [Rank 0] step:8401/10000 train_time:660648ms step_avg:78.64ms +[2025-09-02 05:58:35] [Rank 0] step:8401/10000 train_time:660648ms step_avg:78.64ms +[2025-09-02 05:58:37] [Rank 0] step:8421/10000 train_time:662233ms step_avg:78.64ms +[2025-09-02 05:58:37] [Rank 0] step:8421/10000 train_time:662233ms step_avg:78.64ms +[2025-09-02 05:58:38] [Rank 0] step:8441/10000 train_time:663905ms step_avg:78.65ms +[2025-09-02 05:58:38] [Rank 0] step:8441/10000 train_time:663905ms step_avg:78.65ms +[2025-09-02 05:58:40] [Rank 0] step:8461/10000 train_time:665581ms step_avg:78.66ms +[2025-09-02 05:58:40] [Rank 0] step:8461/10000 train_time:665581ms step_avg:78.66ms +[2025-09-02 05:58:42] [Rank 0] step:8481/10000 train_time:667261ms step_avg:78.68ms +[2025-09-02 05:58:42] [Rank 0] step:8481/10000 train_time:667261ms step_avg:78.68ms +[2025-09-02 05:58:43] [Rank 0] step:8501/10000 train_time:668962ms step_avg:78.69ms +[2025-09-02 05:58:43] [Rank 0] step:8501/10000 train_time:668962ms step_avg:78.69ms +[2025-09-02 05:58:45] [Rank 0] step:8521/10000 train_time:670645ms step_avg:78.70ms +[2025-09-02 05:58:45] [Rank 0] step:8521/10000 train_time:670645ms step_avg:78.70ms +[2025-09-02 05:58:47] [Rank 0] step:8541/10000 train_time:672337ms step_avg:78.72ms +[2025-09-02 05:58:47] [Rank 0] step:8541/10000 train_time:672337ms step_avg:78.72ms +[2025-09-02 05:58:48] [Rank 0] step:8561/10000 train_time:674019ms step_avg:78.73ms +[2025-09-02 05:58:48] [Rank 0] step:8561/10000 train_time:674019ms step_avg:78.73ms +[2025-09-02 05:58:50] [Rank 0] step:8581/10000 train_time:675701ms step_avg:78.74ms +[2025-09-02 05:58:50] [Rank 0] step:8581/10000 train_time:675701ms step_avg:78.74ms +[2025-09-02 05:58:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:58:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:59:04] [Rank 0] PRINT: step:8600/10000 val_loss:3.6597 svd_entropy: attn_qk:H=0.7919,top10E=0.24,eRank=207.2,q75/q25=48.35 attn_vo:H=0.8236,top10E=0.11,eRank=315.4,q75/q25=inf mlp_w1:H=0.9141,top10E=0.13,eRank=436.9,q75/q25=4.30 mlp_w2:H=0.9707,top10E=0.04,eRank=632.2,q75/q25=2.89 vo_prod:H=0.6989,top10E=0.18,eRank=154.6,q75/q25=inf train_time:677459ms step_avg:78.77ms +[2025-09-02 05:59:04] [Rank 0] PRINT: step:8600/10000 val_loss:3.6597 svd_entropy: attn_qk:H=0.7919,top10E=0.24,eRank=207.2,q75/q25=48.35 attn_vo:H=0.8236,top10E=0.11,eRank=315.4,q75/q25=inf mlp_w1:H=0.9141,top10E=0.13,eRank=436.9,q75/q25=4.30 mlp_w2:H=0.9707,top10E=0.04,eRank=632.2,q75/q25=2.89 vo_prod:H=0.6989,top10E=0.18,eRank=154.6,q75/q25=inf train_time:677459ms step_avg:78.77ms +[2025-09-02 05:59:04] [Rank 0] step:8601/10000 train_time:677474ms step_avg:78.77ms +[2025-09-02 05:59:04] [Rank 0] step:8601/10000 train_time:677474ms step_avg:78.77ms +[2025-09-02 05:59:05] [Rank 0] step:8621/10000 train_time:679068ms step_avg:78.77ms +[2025-09-02 05:59:05] [Rank 0] step:8621/10000 train_time:679068ms step_avg:78.77ms +[2025-09-02 05:59:07] [Rank 0] step:8641/10000 train_time:680746ms step_avg:78.78ms +[2025-09-02 05:59:07] [Rank 0] step:8641/10000 train_time:680746ms step_avg:78.78ms +[2025-09-02 05:59:09] [Rank 0] step:8661/10000 train_time:682426ms step_avg:78.79ms +[2025-09-02 05:59:09] [Rank 0] step:8661/10000 train_time:682426ms step_avg:78.79ms +[2025-09-02 05:59:10] [Rank 0] step:8681/10000 train_time:684104ms step_avg:78.80ms +[2025-09-02 05:59:10] [Rank 0] step:8681/10000 train_time:684104ms step_avg:78.80ms +[2025-09-02 05:59:12] [Rank 0] step:8701/10000 train_time:685776ms step_avg:78.82ms +[2025-09-02 05:59:12] [Rank 0] step:8701/10000 train_time:685776ms step_avg:78.82ms +[2025-09-02 05:59:14] [Rank 0] step:8721/10000 train_time:687457ms step_avg:78.83ms +[2025-09-02 05:59:14] [Rank 0] step:8721/10000 train_time:687457ms step_avg:78.83ms +[2025-09-02 05:59:15] [Rank 0] step:8741/10000 train_time:689124ms step_avg:78.84ms +[2025-09-02 05:59:15] [Rank 0] step:8741/10000 train_time:689124ms step_avg:78.84ms +[2025-09-02 05:59:17] [Rank 0] step:8761/10000 train_time:690802ms step_avg:78.85ms +[2025-09-02 05:59:17] [Rank 0] step:8761/10000 train_time:690802ms step_avg:78.85ms +[2025-09-02 05:59:19] [Rank 0] step:8781/10000 train_time:692486ms step_avg:78.86ms +[2025-09-02 05:59:19] [Rank 0] step:8781/10000 train_time:692486ms step_avg:78.86ms +[2025-09-02 05:59:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:59:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:59:32] [Rank 0] PRINT: step:8800/10000 val_loss:3.6497 svd_entropy: attn_qk:H=0.7921,top10E=0.24,eRank=207.4,q75/q25=48.13 attn_vo:H=0.8239,top10E=0.11,eRank=315.9,q75/q25=inf mlp_w1:H=0.9144,top10E=0.13,eRank=437.7,q75/q25=4.29 mlp_w2:H=0.9707,top10E=0.04,eRank=632.2,q75/q25=2.88 vo_prod:H=0.6994,top10E=0.18,eRank=155.2,q75/q25=inf train_time:694257ms step_avg:78.89ms +[2025-09-02 05:59:32] [Rank 0] PRINT: step:8800/10000 val_loss:3.6497 svd_entropy: attn_qk:H=0.7921,top10E=0.24,eRank=207.4,q75/q25=48.13 attn_vo:H=0.8239,top10E=0.11,eRank=315.9,q75/q25=inf mlp_w1:H=0.9144,top10E=0.13,eRank=437.7,q75/q25=4.29 mlp_w2:H=0.9707,top10E=0.04,eRank=632.2,q75/q25=2.88 vo_prod:H=0.6994,top10E=0.18,eRank=155.2,q75/q25=inf train_time:694257ms step_avg:78.89ms +[2025-09-02 05:59:32] [Rank 0] step:8801/10000 train_time:694273ms step_avg:78.89ms +[2025-09-02 05:59:32] [Rank 0] step:8801/10000 train_time:694273ms step_avg:78.89ms +[2025-09-02 05:59:34] [Rank 0] step:8821/10000 train_time:695870ms step_avg:78.89ms +[2025-09-02 05:59:34] [Rank 0] step:8821/10000 train_time:695870ms step_avg:78.89ms +[2025-09-02 05:59:36] [Rank 0] step:8841/10000 train_time:697567ms step_avg:78.90ms +[2025-09-02 05:59:36] [Rank 0] step:8841/10000 train_time:697567ms step_avg:78.90ms +[2025-09-02 05:59:38] [Rank 0] step:8861/10000 train_time:699241ms step_avg:78.91ms +[2025-09-02 05:59:38] [Rank 0] step:8861/10000 train_time:699241ms step_avg:78.91ms +[2025-09-02 05:59:39] [Rank 0] step:8881/10000 train_time:700918ms step_avg:78.92ms +[2025-09-02 05:59:39] [Rank 0] step:8881/10000 train_time:700918ms step_avg:78.92ms +[2025-09-02 05:59:41] [Rank 0] step:8901/10000 train_time:702599ms step_avg:78.93ms +[2025-09-02 05:59:41] [Rank 0] step:8901/10000 train_time:702599ms step_avg:78.93ms +[2025-09-02 05:59:43] [Rank 0] step:8921/10000 train_time:704292ms step_avg:78.95ms +[2025-09-02 05:59:43] [Rank 0] step:8921/10000 train_time:704292ms step_avg:78.95ms +[2025-09-02 05:59:44] [Rank 0] step:8941/10000 train_time:705980ms step_avg:78.96ms +[2025-09-02 05:59:44] [Rank 0] step:8941/10000 train_time:705980ms step_avg:78.96ms +[2025-09-02 05:59:46] [Rank 0] step:8961/10000 train_time:707658ms step_avg:78.97ms +[2025-09-02 05:59:46] [Rank 0] step:8961/10000 train_time:707658ms step_avg:78.97ms +[2025-09-02 05:59:48] [Rank 0] step:8981/10000 train_time:709336ms step_avg:78.98ms +[2025-09-02 05:59:48] [Rank 0] step:8981/10000 train_time:709336ms step_avg:78.98ms +[2025-09-02 05:59:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:59:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:00:01] [Rank 0] PRINT: step:9000/10000 val_loss:3.6408 svd_entropy: attn_qk:H=0.7923,top10E=0.24,eRank=207.6,q75/q25=47.98 attn_vo:H=0.8242,top10E=0.11,eRank=316.3,q75/q25=inf mlp_w1:H=0.9146,top10E=0.13,eRank=438.3,q75/q25=4.28 mlp_w2:H=0.9707,top10E=0.04,eRank=632.2,q75/q25=2.88 vo_prod:H=0.6999,top10E=0.18,eRank=155.8,q75/q25=inf train_time:711098ms step_avg:79.01ms +[2025-09-02 06:00:01] [Rank 0] PRINT: step:9000/10000 val_loss:3.6408 svd_entropy: attn_qk:H=0.7923,top10E=0.24,eRank=207.6,q75/q25=47.98 attn_vo:H=0.8242,top10E=0.11,eRank=316.3,q75/q25=inf mlp_w1:H=0.9146,top10E=0.13,eRank=438.3,q75/q25=4.28 mlp_w2:H=0.9707,top10E=0.04,eRank=632.2,q75/q25=2.88 vo_prod:H=0.6999,top10E=0.18,eRank=155.8,q75/q25=inf train_time:711098ms step_avg:79.01ms +[2025-09-02 06:00:01] [Rank 0] step:9001/10000 train_time:711114ms step_avg:79.00ms +[2025-09-02 06:00:01] [Rank 0] step:9001/10000 train_time:711114ms step_avg:79.00ms +[2025-09-02 06:00:03] [Rank 0] step:9021/10000 train_time:712713ms step_avg:79.01ms +[2025-09-02 06:00:03] [Rank 0] step:9021/10000 train_time:712713ms step_avg:79.01ms +[2025-09-02 06:00:05] [Rank 0] step:9041/10000 train_time:714393ms step_avg:79.02ms +[2025-09-02 06:00:05] [Rank 0] step:9041/10000 train_time:714393ms step_avg:79.02ms +[2025-09-02 06:00:06] [Rank 0] step:9061/10000 train_time:716081ms step_avg:79.03ms +[2025-09-02 06:00:06] [Rank 0] step:9061/10000 train_time:716081ms step_avg:79.03ms +[2025-09-02 06:00:08] [Rank 0] step:9081/10000 train_time:717772ms step_avg:79.04ms +[2025-09-02 06:00:08] [Rank 0] step:9081/10000 train_time:717772ms step_avg:79.04ms +[2025-09-02 06:00:10] [Rank 0] step:9101/10000 train_time:719469ms step_avg:79.05ms +[2025-09-02 06:00:10] [Rank 0] step:9101/10000 train_time:719469ms step_avg:79.05ms +[2025-09-02 06:00:11] [Rank 0] step:9121/10000 train_time:721155ms step_avg:79.07ms +[2025-09-02 06:00:11] [Rank 0] step:9121/10000 train_time:721155ms step_avg:79.07ms +[2025-09-02 06:00:13] [Rank 0] step:9141/10000 train_time:722827ms step_avg:79.08ms +[2025-09-02 06:00:13] [Rank 0] step:9141/10000 train_time:722827ms step_avg:79.08ms +[2025-09-02 06:00:15] [Rank 0] step:9161/10000 train_time:724506ms step_avg:79.09ms +[2025-09-02 06:00:15] [Rank 0] step:9161/10000 train_time:724506ms step_avg:79.09ms +[2025-09-02 06:00:17] [Rank 0] step:9181/10000 train_time:726220ms step_avg:79.10ms +[2025-09-02 06:00:17] [Rank 0] step:9181/10000 train_time:726220ms step_avg:79.10ms +[2025-09-02 06:00:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:00:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:00:30] [Rank 0] PRINT: step:9200/10000 val_loss:3.6330 svd_entropy: attn_qk:H=0.7924,top10E=0.23,eRank=207.8,q75/q25=47.82 attn_vo:H=0.8244,top10E=0.11,eRank=316.7,q75/q25=inf mlp_w1:H=0.9148,top10E=0.13,eRank=438.9,q75/q25=4.27 mlp_w2:H=0.9707,top10E=0.04,eRank=632.2,q75/q25=2.88 vo_prod:H=0.7004,top10E=0.18,eRank=156.3,q75/q25=inf train_time:727982ms step_avg:79.13ms +[2025-09-02 06:00:30] [Rank 0] PRINT: step:9200/10000 val_loss:3.6330 svd_entropy: attn_qk:H=0.7924,top10E=0.23,eRank=207.8,q75/q25=47.82 attn_vo:H=0.8244,top10E=0.11,eRank=316.7,q75/q25=inf mlp_w1:H=0.9148,top10E=0.13,eRank=438.9,q75/q25=4.27 mlp_w2:H=0.9707,top10E=0.04,eRank=632.2,q75/q25=2.88 vo_prod:H=0.7004,top10E=0.18,eRank=156.3,q75/q25=inf train_time:727982ms step_avg:79.13ms +[2025-09-02 06:00:30] [Rank 0] step:9201/10000 train_time:727998ms step_avg:79.12ms +[2025-09-02 06:00:30] [Rank 0] step:9201/10000 train_time:727998ms step_avg:79.12ms +[2025-09-02 06:00:32] [Rank 0] step:9221/10000 train_time:729652ms step_avg:79.13ms +[2025-09-02 06:00:32] [Rank 0] step:9221/10000 train_time:729652ms step_avg:79.13ms +[2025-09-02 06:00:34] [Rank 0] step:9241/10000 train_time:731344ms step_avg:79.14ms +[2025-09-02 06:00:34] [Rank 0] step:9241/10000 train_time:731344ms step_avg:79.14ms +[2025-09-02 06:00:35] [Rank 0] step:9261/10000 train_time:733037ms step_avg:79.15ms +[2025-09-02 06:00:35] [Rank 0] step:9261/10000 train_time:733037ms step_avg:79.15ms +[2025-09-02 06:00:37] [Rank 0] step:9281/10000 train_time:734717ms step_avg:79.16ms +[2025-09-02 06:00:37] [Rank 0] step:9281/10000 train_time:734717ms step_avg:79.16ms +[2025-09-02 06:00:39] [Rank 0] step:9301/10000 train_time:736397ms step_avg:79.17ms +[2025-09-02 06:00:39] [Rank 0] step:9301/10000 train_time:736397ms step_avg:79.17ms +[2025-09-02 06:00:40] [Rank 0] step:9321/10000 train_time:738083ms step_avg:79.18ms +[2025-09-02 06:00:40] [Rank 0] step:9321/10000 train_time:738083ms step_avg:79.18ms +[2025-09-02 06:00:42] [Rank 0] step:9341/10000 train_time:739769ms step_avg:79.20ms +[2025-09-02 06:00:42] [Rank 0] step:9341/10000 train_time:739769ms step_avg:79.20ms +[2025-09-02 06:00:44] [Rank 0] step:9361/10000 train_time:741458ms step_avg:79.21ms +[2025-09-02 06:00:44] [Rank 0] step:9361/10000 train_time:741458ms step_avg:79.21ms +[2025-09-02 06:00:45] [Rank 0] step:9381/10000 train_time:743158ms step_avg:79.22ms +[2025-09-02 06:00:45] [Rank 0] step:9381/10000 train_time:743158ms step_avg:79.22ms +[2025-09-02 06:00:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:00:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:00:59] [Rank 0] PRINT: step:9400/10000 val_loss:3.6250 svd_entropy: attn_qk:H=0.7926,top10E=0.23,eRank=207.9,q75/q25=47.73 attn_vo:H=0.8246,top10E=0.11,eRank=317.0,q75/q25=inf mlp_w1:H=0.9150,top10E=0.13,eRank=439.4,q75/q25=4.26 mlp_w2:H=0.9707,top10E=0.04,eRank=632.2,q75/q25=2.88 vo_prod:H=0.7007,top10E=0.18,eRank=156.7,q75/q25=inf train_time:744932ms step_avg:79.25ms +[2025-09-02 06:00:59] [Rank 0] PRINT: step:9400/10000 val_loss:3.6250 svd_entropy: attn_qk:H=0.7926,top10E=0.23,eRank=207.9,q75/q25=47.73 attn_vo:H=0.8246,top10E=0.11,eRank=317.0,q75/q25=inf mlp_w1:H=0.9150,top10E=0.13,eRank=439.4,q75/q25=4.26 mlp_w2:H=0.9707,top10E=0.04,eRank=632.2,q75/q25=2.88 vo_prod:H=0.7007,top10E=0.18,eRank=156.7,q75/q25=inf train_time:744932ms step_avg:79.25ms +[2025-09-02 06:00:59] [Rank 0] step:9401/10000 train_time:744948ms step_avg:79.24ms +[2025-09-02 06:00:59] [Rank 0] step:9401/10000 train_time:744948ms step_avg:79.24ms +[2025-09-02 06:01:01] [Rank 0] step:9421/10000 train_time:746539ms step_avg:79.24ms +[2025-09-02 06:01:01] [Rank 0] step:9421/10000 train_time:746539ms step_avg:79.24ms +[2025-09-02 06:01:03] [Rank 0] step:9441/10000 train_time:748223ms step_avg:79.25ms +[2025-09-02 06:01:03] [Rank 0] step:9441/10000 train_time:748223ms step_avg:79.25ms +[2025-09-02 06:01:04] [Rank 0] step:9461/10000 train_time:749913ms step_avg:79.26ms +[2025-09-02 06:01:04] [Rank 0] step:9461/10000 train_time:749913ms step_avg:79.26ms +[2025-09-02 06:01:06] [Rank 0] step:9481/10000 train_time:751601ms step_avg:79.27ms +[2025-09-02 06:01:06] [Rank 0] step:9481/10000 train_time:751601ms step_avg:79.27ms +[2025-09-02 06:01:08] [Rank 0] step:9501/10000 train_time:753300ms step_avg:79.29ms +[2025-09-02 06:01:08] [Rank 0] step:9501/10000 train_time:753300ms step_avg:79.29ms +[2025-09-02 06:01:10] [Rank 0] step:9521/10000 train_time:755080ms step_avg:79.31ms +[2025-09-02 06:01:10] [Rank 0] step:9521/10000 train_time:755080ms step_avg:79.31ms +[2025-09-02 06:01:11] [Rank 0] step:9541/10000 train_time:756763ms step_avg:79.32ms +[2025-09-02 06:01:11] [Rank 0] step:9541/10000 train_time:756763ms step_avg:79.32ms +[2025-09-02 06:01:13] [Rank 0] step:9561/10000 train_time:758442ms step_avg:79.33ms +[2025-09-02 06:01:13] [Rank 0] step:9561/10000 train_time:758442ms step_avg:79.33ms +[2025-09-02 06:01:15] [Rank 0] step:9581/10000 train_time:760126ms step_avg:79.34ms +[2025-09-02 06:01:15] [Rank 0] step:9581/10000 train_time:760126ms step_avg:79.34ms +[2025-09-02 06:01:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:01:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:01:28] [Rank 0] PRINT: step:9600/10000 val_loss:3.6189 svd_entropy: attn_qk:H=0.7927,top10E=0.23,eRank=208.1,q75/q25=47.62 attn_vo:H=0.8247,top10E=0.11,eRank=317.2,q75/q25=inf mlp_w1:H=0.9151,top10E=0.13,eRank=439.8,q75/q25=4.25 mlp_w2:H=0.9707,top10E=0.04,eRank=632.2,q75/q25=2.88 vo_prod:H=0.7011,top10E=0.18,eRank=157.0,q75/q25=inf train_time:761912ms step_avg:79.37ms +[2025-09-02 06:01:28] [Rank 0] PRINT: step:9600/10000 val_loss:3.6189 svd_entropy: attn_qk:H=0.7927,top10E=0.23,eRank=208.1,q75/q25=47.62 attn_vo:H=0.8247,top10E=0.11,eRank=317.2,q75/q25=inf mlp_w1:H=0.9151,top10E=0.13,eRank=439.8,q75/q25=4.25 mlp_w2:H=0.9707,top10E=0.04,eRank=632.2,q75/q25=2.88 vo_prod:H=0.7011,top10E=0.18,eRank=157.0,q75/q25=inf train_time:761912ms step_avg:79.37ms +[2025-09-02 06:01:28] [Rank 0] step:9601/10000 train_time:761927ms step_avg:79.36ms +[2025-09-02 06:01:28] [Rank 0] step:9601/10000 train_time:761927ms step_avg:79.36ms +[2025-09-02 06:01:30] [Rank 0] step:9621/10000 train_time:763529ms step_avg:79.36ms +[2025-09-02 06:01:30] [Rank 0] step:9621/10000 train_time:763529ms step_avg:79.36ms +[2025-09-02 06:01:32] [Rank 0] step:9641/10000 train_time:765216ms step_avg:79.37ms +[2025-09-02 06:01:32] [Rank 0] step:9641/10000 train_time:765216ms step_avg:79.37ms +[2025-09-02 06:01:33] [Rank 0] step:9661/10000 train_time:766931ms step_avg:79.38ms +[2025-09-02 06:01:33] [Rank 0] step:9661/10000 train_time:766931ms step_avg:79.38ms +[2025-09-02 06:01:35] [Rank 0] step:9681/10000 train_time:768678ms step_avg:79.40ms +[2025-09-02 06:01:35] [Rank 0] step:9681/10000 train_time:768678ms step_avg:79.40ms +[2025-09-02 06:01:37] [Rank 0] step:9701/10000 train_time:770400ms step_avg:79.41ms +[2025-09-02 06:01:37] [Rank 0] step:9701/10000 train_time:770400ms step_avg:79.41ms +[2025-09-02 06:01:39] [Rank 0] step:9721/10000 train_time:772100ms step_avg:79.43ms +[2025-09-02 06:01:39] [Rank 0] step:9721/10000 train_time:772100ms step_avg:79.43ms +[2025-09-02 06:01:40] [Rank 0] step:9741/10000 train_time:773824ms step_avg:79.44ms +[2025-09-02 06:01:40] [Rank 0] step:9741/10000 train_time:773824ms step_avg:79.44ms +[2025-09-02 06:01:42] [Rank 0] step:9761/10000 train_time:775538ms step_avg:79.45ms +[2025-09-02 06:01:42] [Rank 0] step:9761/10000 train_time:775538ms step_avg:79.45ms +[2025-09-02 06:01:44] [Rank 0] step:9781/10000 train_time:777255ms step_avg:79.47ms +[2025-09-02 06:01:44] [Rank 0] step:9781/10000 train_time:777255ms step_avg:79.47ms +[2025-09-02 06:01:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:01:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:01:57] [Rank 0] PRINT: step:9800/10000 val_loss:3.6115 svd_entropy: attn_qk:H=0.7928,top10E=0.23,eRank=208.1,q75/q25=47.51 attn_vo:H=0.8248,top10E=0.11,eRank=317.4,q75/q25=inf mlp_w1:H=0.9152,top10E=0.13,eRank=440.1,q75/q25=4.25 mlp_w2:H=0.9707,top10E=0.04,eRank=632.2,q75/q25=2.88 vo_prod:H=0.7014,top10E=0.18,eRank=157.3,q75/q25=inf train_time:779066ms step_avg:79.50ms +[2025-09-02 06:01:57] [Rank 0] PRINT: step:9800/10000 val_loss:3.6115 svd_entropy: attn_qk:H=0.7928,top10E=0.23,eRank=208.1,q75/q25=47.51 attn_vo:H=0.8248,top10E=0.11,eRank=317.4,q75/q25=inf mlp_w1:H=0.9152,top10E=0.13,eRank=440.1,q75/q25=4.25 mlp_w2:H=0.9707,top10E=0.04,eRank=632.2,q75/q25=2.88 vo_prod:H=0.7014,top10E=0.18,eRank=157.3,q75/q25=inf train_time:779066ms step_avg:79.50ms +[2025-09-02 06:01:58] [Rank 0] step:9801/10000 train_time:779082ms step_avg:79.49ms +[2025-09-02 06:01:58] [Rank 0] step:9801/10000 train_time:779082ms step_avg:79.49ms +[2025-09-02 06:01:59] [Rank 0] step:9821/10000 train_time:780717ms step_avg:79.49ms +[2025-09-02 06:01:59] [Rank 0] step:9821/10000 train_time:780717ms step_avg:79.49ms +[2025-09-02 06:02:01] [Rank 0] step:9841/10000 train_time:782438ms step_avg:79.51ms +[2025-09-02 06:02:01] [Rank 0] step:9841/10000 train_time:782438ms step_avg:79.51ms +[2025-09-02 06:02:03] [Rank 0] step:9861/10000 train_time:784136ms step_avg:79.52ms +[2025-09-02 06:02:03] [Rank 0] step:9861/10000 train_time:784136ms step_avg:79.52ms +[2025-09-02 06:02:04] [Rank 0] step:9881/10000 train_time:785832ms step_avg:79.53ms +[2025-09-02 06:02:04] [Rank 0] step:9881/10000 train_time:785832ms step_avg:79.53ms +[2025-09-02 06:02:06] [Rank 0] step:9901/10000 train_time:787543ms step_avg:79.54ms +[2025-09-02 06:02:06] [Rank 0] step:9901/10000 train_time:787543ms step_avg:79.54ms +[2025-09-02 06:02:08] [Rank 0] step:9921/10000 train_time:789254ms step_avg:79.55ms +[2025-09-02 06:02:08] [Rank 0] step:9921/10000 train_time:789254ms step_avg:79.55ms +[2025-09-02 06:02:10] [Rank 0] step:9941/10000 train_time:790965ms step_avg:79.57ms +[2025-09-02 06:02:10] [Rank 0] step:9941/10000 train_time:790965ms step_avg:79.57ms +[2025-09-02 06:02:11] [Rank 0] step:9961/10000 train_time:792671ms step_avg:79.58ms +[2025-09-02 06:02:11] [Rank 0] step:9961/10000 train_time:792671ms step_avg:79.58ms +[2025-09-02 06:02:13] [Rank 0] step:9981/10000 train_time:794380ms step_avg:79.59ms +[2025-09-02 06:02:13] [Rank 0] step:9981/10000 train_time:794380ms step_avg:79.59ms +[2025-09-02 06:02:15] [Rank 0] step:10000/10000 train_time:796006ms step_avg:79.60ms +[2025-09-02 06:02:15] [Rank 0] step:10000/10000 train_time:796006ms step_avg:79.60ms +[2025-09-02 06:02:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:02:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:02:27] [Rank 0] PRINT: step:10000/10000 val_loss:3.6059 svd_entropy: attn_qk:H=0.7928,top10E=0.23,eRank=208.2,q75/q25=47.43 attn_vo:H=0.8249,top10E=0.11,eRank=317.6,q75/q25=inf mlp_w1:H=0.9153,top10E=0.13,eRank=440.3,q75/q25=4.24 mlp_w2:H=0.9707,top10E=0.04,eRank=632.2,q75/q25=2.88 vo_prod:H=0.7015,top10E=0.18,eRank=157.5,q75/q25=inf train_time:796189ms step_avg:79.62ms +[2025-09-02 06:02:27] [Rank 0] PRINT: step:10000/10000 val_loss:3.6059 svd_entropy: attn_qk:H=0.7928,top10E=0.23,eRank=208.2,q75/q25=47.43 attn_vo:H=0.8249,top10E=0.11,eRank=317.6,q75/q25=inf mlp_w1:H=0.9153,top10E=0.13,eRank=440.3,q75/q25=4.24 mlp_w2:H=0.9707,top10E=0.04,eRank=632.2,q75/q25=2.88 vo_prod:H=0.7015,top10E=0.18,eRank=157.5,q75/q25=inf train_time:796189ms step_avg:79.62ms +[2025-09-02 06:02:27] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 06:02:27 2025 --- +[2025-09-02 06:02:27] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 06:02:27 2025 --- +[2025-09-02 06:02:27] [Rank 0] PRINT: Peak memory allocated: 10115 MiB reserved: 15076 MiB +[2025-09-02 06:02:27] [Rank 0] PRINT: Peak memory allocated: 10115 MiB reserved: 15076 MiB diff --git a/logs_svd_qkvo/mode_13_param_qkvo_seed_43/config.json b/logs_svd_qkvo/mode_13_param_qkvo_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..d09e7a2d08e48c74f09fbccbd712778b1e4d511f --- /dev/null +++ b/logs_svd_qkvo/mode_13_param_qkvo_seed_43/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 13, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "1439cf05-d04a-42fe-9955-326def4d0a3e", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_13_param_qkvo_seed_43/training_log_1439cf05-d04a-42fe-9955-326def4d0a3e.txt b/logs_svd_qkvo/mode_13_param_qkvo_seed_43/training_log_1439cf05-d04a-42fe-9955-326def4d0a3e.txt new file mode 100644 index 0000000000000000000000000000000000000000..7098bd97e0d6abcdca24e9df71f52063576fb94a --- /dev/null +++ b/logs_svd_qkvo/mode_13_param_qkvo_seed_43/training_log_1439cf05-d04a-42fe-9955-326def4d0a3e.txt @@ -0,0 +1,2984 @@ +[2025-09-02 06:51:02] [Rank 0] PRINT: --- Script Start: Tue Sep 2 06:51:02 2025 --- +[2025-09-02 06:51:02] [Rank 0] PRINT: --- Script Start: Tue Sep 2 06:51:02 2025 --- +[2025-09-02 06:51:02] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=13, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 06:51:02] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=13, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 06:51:02] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 06:51:02] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 06:51:02] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-02 06:51:02] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-02 06:51:02] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_13_param_qkvo_seed_43 +[2025-09-02 06:51:02] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_13_param_qkvo_seed_43 +[2025-09-02 06:51:02] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 06:51:02] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 06:51:02] [Rank 0] PRINT: Constructing model... +[2025-09-02 06:51:02] [Rank 0] PRINT: Constructing model... +[2025-09-02 06:51:04] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 06:51:04] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 06:51:04] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 06:51:04] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 06:51:05] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 06:51:05] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 06:51:05] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 13 +[2025-09-02 06:51:05] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 13 +[2025-09-02 06:51:05] [Rank 0] PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: 0.008). +[2025-09-02 06:51:05] [Rank 0] PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: 0.008). +[2025-09-02 06:51:05] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 06:51:05] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 06:51:05] [Rank 0] PRINT: Muon optimizer is active with 23 parameters. +[2025-09-02 06:51:05] [Rank 0] PRINT: Muon optimizer is active with 23 parameters. +[2025-09-02 06:51:05] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 06:51:05] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 06:51:05] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 06:51:05] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 06:51:05] [Rank 0] PRINT: Starting warmup... +[2025-09-02 06:51:05] [Rank 0] PRINT: Starting warmup... +[2025-09-02 06:51:45] [Rank 0] PRINT: Warmup complete. +[2025-09-02 06:51:45] [Rank 0] PRINT: Warmup complete. +[2025-09-02 06:51:45] [Rank 0] PRINT: Starting training... +[2025-09-02 06:51:45] [Rank 0] PRINT: Starting training... +[2025-09-02 06:51:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:51:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:52:52] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.25 attn_vo:H=0.4624,top10E=0.02,eRank=233.0,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.7,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 06:52:52] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.25 attn_vo:H=0.4624,top10E=0.02,eRank=233.0,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.7,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 06:52:54] [Rank 0] step:21/10000 train_time:1429ms step_avg:68.03ms +[2025-09-02 06:52:54] [Rank 0] step:21/10000 train_time:1429ms step_avg:68.03ms +[2025-09-02 06:52:55] [Rank 0] step:41/10000 train_time:2879ms step_avg:70.22ms +[2025-09-02 06:52:55] [Rank 0] step:41/10000 train_time:2879ms step_avg:70.22ms +[2025-09-02 06:52:57] [Rank 0] step:61/10000 train_time:4330ms step_avg:70.98ms +[2025-09-02 06:52:57] [Rank 0] step:61/10000 train_time:4330ms step_avg:70.98ms +[2025-09-02 06:52:58] [Rank 0] step:81/10000 train_time:5784ms step_avg:71.41ms +[2025-09-02 06:52:58] [Rank 0] step:81/10000 train_time:5784ms step_avg:71.41ms +[2025-09-02 06:53:00] [Rank 0] step:101/10000 train_time:7237ms step_avg:71.66ms +[2025-09-02 06:53:00] [Rank 0] step:101/10000 train_time:7237ms step_avg:71.66ms +[2025-09-02 06:53:01] [Rank 0] step:121/10000 train_time:8690ms step_avg:71.82ms +[2025-09-02 06:53:01] [Rank 0] step:121/10000 train_time:8690ms step_avg:71.82ms +[2025-09-02 06:53:03] [Rank 0] step:141/10000 train_time:10143ms step_avg:71.94ms +[2025-09-02 06:53:03] [Rank 0] step:141/10000 train_time:10143ms step_avg:71.94ms +[2025-09-02 06:53:04] [Rank 0] step:161/10000 train_time:11596ms step_avg:72.03ms +[2025-09-02 06:53:04] [Rank 0] step:161/10000 train_time:11596ms step_avg:72.03ms +[2025-09-02 06:53:06] [Rank 0] step:181/10000 train_time:13051ms step_avg:72.11ms +[2025-09-02 06:53:06] [Rank 0] step:181/10000 train_time:13051ms step_avg:72.11ms +[2025-09-02 06:53:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:53:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:53:19] [Rank 0] PRINT: step:200/10000 val_loss:6.2051 svd_entropy: attn_qk:H=0.6111,top10E=0.54,eRank=97.3,q75/q25=13.15 attn_vo:H=0.5239,top10E=0.56,eRank=77.3,q75/q25=inf mlp_w1:H=0.6685,top10E=0.50,eRank=99.2,q75/q25=2.98 mlp_w2:H=0.8219,top10E=0.16,eRank=238.6,q75/q25=17.13 vo_prod:H=0.3345,top10E=0.80,eRank=15.2,q75/q25=inf train_time:14579ms step_avg:72.90ms +[2025-09-02 06:53:19] [Rank 0] PRINT: step:200/10000 val_loss:6.2051 svd_entropy: attn_qk:H=0.6111,top10E=0.54,eRank=97.3,q75/q25=13.15 attn_vo:H=0.5239,top10E=0.56,eRank=77.3,q75/q25=inf mlp_w1:H=0.6685,top10E=0.50,eRank=99.2,q75/q25=2.98 mlp_w2:H=0.8219,top10E=0.16,eRank=238.6,q75/q25=17.13 vo_prod:H=0.3345,top10E=0.80,eRank=15.2,q75/q25=inf train_time:14579ms step_avg:72.90ms +[2025-09-02 06:53:19] [Rank 0] step:201/10000 train_time:14592ms step_avg:72.60ms +[2025-09-02 06:53:19] [Rank 0] step:201/10000 train_time:14592ms step_avg:72.60ms +[2025-09-02 06:53:20] [Rank 0] step:221/10000 train_time:15969ms step_avg:72.26ms +[2025-09-02 06:53:20] [Rank 0] step:221/10000 train_time:15969ms step_avg:72.26ms +[2025-09-02 06:53:22] [Rank 0] step:241/10000 train_time:17419ms step_avg:72.28ms +[2025-09-02 06:53:22] [Rank 0] step:241/10000 train_time:17419ms step_avg:72.28ms +[2025-09-02 06:53:23] [Rank 0] step:261/10000 train_time:18869ms step_avg:72.29ms +[2025-09-02 06:53:23] [Rank 0] step:261/10000 train_time:18869ms step_avg:72.29ms +[2025-09-02 06:53:25] [Rank 0] step:281/10000 train_time:20318ms step_avg:72.31ms +[2025-09-02 06:53:25] [Rank 0] step:281/10000 train_time:20318ms step_avg:72.31ms +[2025-09-02 06:53:26] [Rank 0] step:301/10000 train_time:21768ms step_avg:72.32ms +[2025-09-02 06:53:26] [Rank 0] step:301/10000 train_time:21768ms step_avg:72.32ms +[2025-09-02 06:53:28] [Rank 0] step:321/10000 train_time:23218ms step_avg:72.33ms +[2025-09-02 06:53:28] [Rank 0] step:321/10000 train_time:23218ms step_avg:72.33ms +[2025-09-02 06:53:29] [Rank 0] step:341/10000 train_time:24668ms step_avg:72.34ms +[2025-09-02 06:53:29] [Rank 0] step:341/10000 train_time:24668ms step_avg:72.34ms +[2025-09-02 06:53:30] [Rank 0] step:361/10000 train_time:26118ms step_avg:72.35ms +[2025-09-02 06:53:30] [Rank 0] step:361/10000 train_time:26118ms step_avg:72.35ms +[2025-09-02 06:53:32] [Rank 0] step:381/10000 train_time:27568ms step_avg:72.36ms +[2025-09-02 06:53:32] [Rank 0] step:381/10000 train_time:27568ms step_avg:72.36ms +[2025-09-02 06:53:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:53:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:53:45] [Rank 0] PRINT: step:400/10000 val_loss:5.7066 svd_entropy: attn_qk:H=0.6497,top10E=0.44,eRank=111.4,q75/q25=15.74 attn_vo:H=0.6090,top10E=0.41,eRank=108.8,q75/q25=inf mlp_w1:H=0.6885,top10E=0.41,eRank=116.3,q75/q25=4.56 mlp_w2:H=0.9318,top10E=0.06,eRank=489.3,q75/q25=6.17 vo_prod:H=0.4321,top10E=0.64,eRank=25.3,q75/q25=inf train_time:29093ms step_avg:72.73ms +[2025-09-02 06:53:45] [Rank 0] PRINT: step:400/10000 val_loss:5.7066 svd_entropy: attn_qk:H=0.6497,top10E=0.44,eRank=111.4,q75/q25=15.74 attn_vo:H=0.6090,top10E=0.41,eRank=108.8,q75/q25=inf mlp_w1:H=0.6885,top10E=0.41,eRank=116.3,q75/q25=4.56 mlp_w2:H=0.9318,top10E=0.06,eRank=489.3,q75/q25=6.17 vo_prod:H=0.4321,top10E=0.64,eRank=25.3,q75/q25=inf train_time:29093ms step_avg:72.73ms +[2025-09-02 06:53:45] [Rank 0] step:401/10000 train_time:29106ms step_avg:72.58ms +[2025-09-02 06:53:45] [Rank 0] step:401/10000 train_time:29106ms step_avg:72.58ms +[2025-09-02 06:53:47] [Rank 0] step:421/10000 train_time:30488ms step_avg:72.42ms +[2025-09-02 06:53:47] [Rank 0] step:421/10000 train_time:30488ms step_avg:72.42ms +[2025-09-02 06:53:48] [Rank 0] step:441/10000 train_time:31937ms step_avg:72.42ms +[2025-09-02 06:53:48] [Rank 0] step:441/10000 train_time:31937ms step_avg:72.42ms +[2025-09-02 06:53:49] [Rank 0] step:461/10000 train_time:33386ms step_avg:72.42ms +[2025-09-02 06:53:49] [Rank 0] step:461/10000 train_time:33386ms step_avg:72.42ms +[2025-09-02 06:53:51] [Rank 0] step:481/10000 train_time:34835ms step_avg:72.42ms +[2025-09-02 06:53:51] [Rank 0] step:481/10000 train_time:34835ms step_avg:72.42ms +[2025-09-02 06:53:52] [Rank 0] step:501/10000 train_time:36285ms step_avg:72.42ms +[2025-09-02 06:53:52] [Rank 0] step:501/10000 train_time:36285ms step_avg:72.42ms +[2025-09-02 06:53:54] [Rank 0] step:521/10000 train_time:37733ms step_avg:72.42ms +[2025-09-02 06:53:54] [Rank 0] step:521/10000 train_time:37733ms step_avg:72.42ms +[2025-09-02 06:53:55] [Rank 0] step:541/10000 train_time:39183ms step_avg:72.43ms +[2025-09-02 06:53:55] [Rank 0] step:541/10000 train_time:39183ms step_avg:72.43ms +[2025-09-02 06:53:57] [Rank 0] step:561/10000 train_time:40633ms step_avg:72.43ms +[2025-09-02 06:53:57] [Rank 0] step:561/10000 train_time:40633ms step_avg:72.43ms +[2025-09-02 06:53:58] [Rank 0] step:581/10000 train_time:42083ms step_avg:72.43ms +[2025-09-02 06:53:58] [Rank 0] step:581/10000 train_time:42083ms step_avg:72.43ms +[2025-09-02 06:53:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:53:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:54:11] [Rank 0] PRINT: step:600/10000 val_loss:5.4145 svd_entropy: attn_qk:H=0.6767,top10E=0.39,eRank=123.0,q75/q25=20.27 attn_vo:H=0.6554,top10E=0.33,eRank=134.5,q75/q25=inf mlp_w1:H=0.7292,top10E=0.35,eRank=145.3,q75/q25=6.25 mlp_w2:H=0.9484,top10E=0.05,eRank=545.8,q75/q25=4.55 vo_prod:H=0.4865,top10E=0.53,eRank=35.0,q75/q25=inf train_time:43607ms step_avg:72.68ms +[2025-09-02 06:54:11] [Rank 0] PRINT: step:600/10000 val_loss:5.4145 svd_entropy: attn_qk:H=0.6767,top10E=0.39,eRank=123.0,q75/q25=20.27 attn_vo:H=0.6554,top10E=0.33,eRank=134.5,q75/q25=inf mlp_w1:H=0.7292,top10E=0.35,eRank=145.3,q75/q25=6.25 mlp_w2:H=0.9484,top10E=0.05,eRank=545.8,q75/q25=4.55 vo_prod:H=0.4865,top10E=0.53,eRank=35.0,q75/q25=inf train_time:43607ms step_avg:72.68ms +[2025-09-02 06:54:11] [Rank 0] step:601/10000 train_time:43620ms step_avg:72.58ms +[2025-09-02 06:54:11] [Rank 0] step:601/10000 train_time:43620ms step_avg:72.58ms +[2025-09-02 06:54:13] [Rank 0] step:621/10000 train_time:45008ms step_avg:72.48ms +[2025-09-02 06:54:13] [Rank 0] step:621/10000 train_time:45008ms step_avg:72.48ms +[2025-09-02 06:54:14] [Rank 0] step:641/10000 train_time:46455ms step_avg:72.47ms +[2025-09-02 06:54:14] [Rank 0] step:641/10000 train_time:46455ms step_avg:72.47ms +[2025-09-02 06:54:16] [Rank 0] step:661/10000 train_time:47904ms step_avg:72.47ms +[2025-09-02 06:54:16] [Rank 0] step:661/10000 train_time:47904ms step_avg:72.47ms +[2025-09-02 06:54:17] [Rank 0] step:681/10000 train_time:49352ms step_avg:72.47ms +[2025-09-02 06:54:17] [Rank 0] step:681/10000 train_time:49352ms step_avg:72.47ms +[2025-09-02 06:54:18] [Rank 0] step:701/10000 train_time:50801ms step_avg:72.47ms +[2025-09-02 06:54:18] [Rank 0] step:701/10000 train_time:50801ms step_avg:72.47ms +[2025-09-02 06:54:20] [Rank 0] step:721/10000 train_time:52251ms step_avg:72.47ms +[2025-09-02 06:54:20] [Rank 0] step:721/10000 train_time:52251ms step_avg:72.47ms +[2025-09-02 06:54:21] [Rank 0] step:741/10000 train_time:53703ms step_avg:72.47ms +[2025-09-02 06:54:21] [Rank 0] step:741/10000 train_time:53703ms step_avg:72.47ms +[2025-09-02 06:54:23] [Rank 0] step:761/10000 train_time:55164ms step_avg:72.49ms +[2025-09-02 06:54:23] [Rank 0] step:761/10000 train_time:55164ms step_avg:72.49ms +[2025-09-02 06:54:24] [Rank 0] step:781/10000 train_time:56627ms step_avg:72.51ms +[2025-09-02 06:54:24] [Rank 0] step:781/10000 train_time:56627ms step_avg:72.51ms +[2025-09-02 06:54:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:54:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:54:37] [Rank 0] PRINT: step:800/10000 val_loss:5.1751 svd_entropy: attn_qk:H=0.6948,top10E=0.36,eRank=131.6,q75/q25=26.40 attn_vo:H=0.6866,top10E=0.29,eRank=156.2,q75/q25=inf mlp_w1:H=0.7599,top10E=0.31,eRank=172.0,q75/q25=7.09 mlp_w2:H=0.9540,top10E=0.05,eRank=566.6,q75/q25=4.07 vo_prod:H=0.5243,top10E=0.46,eRank=44.8,q75/q25=inf train_time:58166ms step_avg:72.71ms +[2025-09-02 06:54:37] [Rank 0] PRINT: step:800/10000 val_loss:5.1751 svd_entropy: attn_qk:H=0.6948,top10E=0.36,eRank=131.6,q75/q25=26.40 attn_vo:H=0.6866,top10E=0.29,eRank=156.2,q75/q25=inf mlp_w1:H=0.7599,top10E=0.31,eRank=172.0,q75/q25=7.09 mlp_w2:H=0.9540,top10E=0.05,eRank=566.6,q75/q25=4.07 vo_prod:H=0.5243,top10E=0.46,eRank=44.8,q75/q25=inf train_time:58166ms step_avg:72.71ms +[2025-09-02 06:54:37] [Rank 0] step:801/10000 train_time:58179ms step_avg:72.63ms +[2025-09-02 06:54:37] [Rank 0] step:801/10000 train_time:58179ms step_avg:72.63ms +[2025-09-02 06:54:39] [Rank 0] step:821/10000 train_time:59589ms step_avg:72.58ms +[2025-09-02 06:54:39] [Rank 0] step:821/10000 train_time:59589ms step_avg:72.58ms +[2025-09-02 06:54:40] [Rank 0] step:841/10000 train_time:61051ms step_avg:72.59ms +[2025-09-02 06:54:40] [Rank 0] step:841/10000 train_time:61051ms step_avg:72.59ms +[2025-09-02 06:54:42] [Rank 0] step:861/10000 train_time:62513ms step_avg:72.61ms +[2025-09-02 06:54:42] [Rank 0] step:861/10000 train_time:62513ms step_avg:72.61ms +[2025-09-02 06:54:43] [Rank 0] step:881/10000 train_time:63975ms step_avg:72.62ms +[2025-09-02 06:54:43] [Rank 0] step:881/10000 train_time:63975ms step_avg:72.62ms +[2025-09-02 06:54:45] [Rank 0] step:901/10000 train_time:65438ms step_avg:72.63ms +[2025-09-02 06:54:45] [Rank 0] step:901/10000 train_time:65438ms step_avg:72.63ms +[2025-09-02 06:54:46] [Rank 0] step:921/10000 train_time:66902ms step_avg:72.64ms +[2025-09-02 06:54:46] [Rank 0] step:921/10000 train_time:66902ms step_avg:72.64ms +[2025-09-02 06:54:48] [Rank 0] step:941/10000 train_time:68367ms step_avg:72.65ms +[2025-09-02 06:54:48] [Rank 0] step:941/10000 train_time:68367ms step_avg:72.65ms +[2025-09-02 06:54:49] [Rank 0] step:961/10000 train_time:69832ms step_avg:72.67ms +[2025-09-02 06:54:49] [Rank 0] step:961/10000 train_time:69832ms step_avg:72.67ms +[2025-09-02 06:54:51] [Rank 0] step:981/10000 train_time:71297ms step_avg:72.68ms +[2025-09-02 06:54:51] [Rank 0] step:981/10000 train_time:71297ms step_avg:72.68ms +[2025-09-02 06:54:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:54:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:55:04] [Rank 0] PRINT: step:1000/10000 val_loss:4.9760 svd_entropy: attn_qk:H=0.7085,top10E=0.34,eRank=139.1,q75/q25=32.23 attn_vo:H=0.7090,top10E=0.26,eRank=174.5,q75/q25=inf mlp_w1:H=0.7853,top10E=0.28,eRank=198.3,q75/q25=7.34 mlp_w2:H=0.9585,top10E=0.05,eRank=583.6,q75/q25=3.73 vo_prod:H=0.5494,top10E=0.41,eRank=52.8,q75/q25=inf train_time:72837ms step_avg:72.84ms +[2025-09-02 06:55:04] [Rank 0] PRINT: step:1000/10000 val_loss:4.9760 svd_entropy: attn_qk:H=0.7085,top10E=0.34,eRank=139.1,q75/q25=32.23 attn_vo:H=0.7090,top10E=0.26,eRank=174.5,q75/q25=inf mlp_w1:H=0.7853,top10E=0.28,eRank=198.3,q75/q25=7.34 mlp_w2:H=0.9585,top10E=0.05,eRank=583.6,q75/q25=3.73 vo_prod:H=0.5494,top10E=0.41,eRank=52.8,q75/q25=inf train_time:72837ms step_avg:72.84ms +[2025-09-02 06:55:04] [Rank 0] step:1001/10000 train_time:72850ms step_avg:72.78ms +[2025-09-02 06:55:04] [Rank 0] step:1001/10000 train_time:72850ms step_avg:72.78ms +[2025-09-02 06:55:05] [Rank 0] step:1021/10000 train_time:74241ms step_avg:72.71ms +[2025-09-02 06:55:05] [Rank 0] step:1021/10000 train_time:74241ms step_avg:72.71ms +[2025-09-02 06:55:07] [Rank 0] step:1041/10000 train_time:75705ms step_avg:72.72ms +[2025-09-02 06:55:07] [Rank 0] step:1041/10000 train_time:75705ms step_avg:72.72ms +[2025-09-02 06:55:08] [Rank 0] step:1061/10000 train_time:77170ms step_avg:72.73ms +[2025-09-02 06:55:08] [Rank 0] step:1061/10000 train_time:77170ms step_avg:72.73ms +[2025-09-02 06:55:10] [Rank 0] step:1081/10000 train_time:78633ms step_avg:72.74ms +[2025-09-02 06:55:10] [Rank 0] step:1081/10000 train_time:78633ms step_avg:72.74ms +[2025-09-02 06:55:11] [Rank 0] step:1101/10000 train_time:80098ms step_avg:72.75ms +[2025-09-02 06:55:11] [Rank 0] step:1101/10000 train_time:80098ms step_avg:72.75ms +[2025-09-02 06:55:13] [Rank 0] step:1121/10000 train_time:81562ms step_avg:72.76ms +[2025-09-02 06:55:13] [Rank 0] step:1121/10000 train_time:81562ms step_avg:72.76ms +[2025-09-02 06:55:14] [Rank 0] step:1141/10000 train_time:83027ms step_avg:72.77ms +[2025-09-02 06:55:14] [Rank 0] step:1141/10000 train_time:83027ms step_avg:72.77ms +[2025-09-02 06:55:16] [Rank 0] step:1161/10000 train_time:84492ms step_avg:72.78ms +[2025-09-02 06:55:16] [Rank 0] step:1161/10000 train_time:84492ms step_avg:72.78ms +[2025-09-02 06:55:17] [Rank 0] step:1181/10000 train_time:85956ms step_avg:72.78ms +[2025-09-02 06:55:17] [Rank 0] step:1181/10000 train_time:85956ms step_avg:72.78ms +[2025-09-02 06:55:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:55:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:55:30] [Rank 0] PRINT: step:1200/10000 val_loss:4.8045 svd_entropy: attn_qk:H=0.7188,top10E=0.32,eRank=145.6,q75/q25=37.97 attn_vo:H=0.7265,top10E=0.23,eRank=191.1,q75/q25=inf mlp_w1:H=0.8053,top10E=0.26,eRank=222.6,q75/q25=7.30 mlp_w2:H=0.9616,top10E=0.04,eRank=595.7,q75/q25=3.51 vo_prod:H=0.5691,top10E=0.37,eRank=60.5,q75/q25=inf train_time:87497ms step_avg:72.91ms +[2025-09-02 06:55:30] [Rank 0] PRINT: step:1200/10000 val_loss:4.8045 svd_entropy: attn_qk:H=0.7188,top10E=0.32,eRank=145.6,q75/q25=37.97 attn_vo:H=0.7265,top10E=0.23,eRank=191.1,q75/q25=inf mlp_w1:H=0.8053,top10E=0.26,eRank=222.6,q75/q25=7.30 mlp_w2:H=0.9616,top10E=0.04,eRank=595.7,q75/q25=3.51 vo_prod:H=0.5691,top10E=0.37,eRank=60.5,q75/q25=inf train_time:87497ms step_avg:72.91ms +[2025-09-02 06:55:30] [Rank 0] step:1201/10000 train_time:87509ms step_avg:72.86ms +[2025-09-02 06:55:30] [Rank 0] step:1201/10000 train_time:87509ms step_avg:72.86ms +[2025-09-02 06:55:32] [Rank 0] step:1221/10000 train_time:88916ms step_avg:72.82ms +[2025-09-02 06:55:32] [Rank 0] step:1221/10000 train_time:88916ms step_avg:72.82ms +[2025-09-02 06:55:33] [Rank 0] step:1241/10000 train_time:90378ms step_avg:72.83ms +[2025-09-02 06:55:33] [Rank 0] step:1241/10000 train_time:90378ms step_avg:72.83ms +[2025-09-02 06:55:35] [Rank 0] step:1261/10000 train_time:91841ms step_avg:72.83ms +[2025-09-02 06:55:35] [Rank 0] step:1261/10000 train_time:91841ms step_avg:72.83ms +[2025-09-02 06:55:36] [Rank 0] step:1281/10000 train_time:93304ms step_avg:72.84ms +[2025-09-02 06:55:36] [Rank 0] step:1281/10000 train_time:93304ms step_avg:72.84ms +[2025-09-02 06:55:37] [Rank 0] step:1301/10000 train_time:94766ms step_avg:72.84ms +[2025-09-02 06:55:37] [Rank 0] step:1301/10000 train_time:94766ms step_avg:72.84ms +[2025-09-02 06:55:39] [Rank 0] step:1321/10000 train_time:96231ms step_avg:72.85ms +[2025-09-02 06:55:39] [Rank 0] step:1321/10000 train_time:96231ms step_avg:72.85ms +[2025-09-02 06:55:40] [Rank 0] step:1341/10000 train_time:97697ms step_avg:72.85ms +[2025-09-02 06:55:40] [Rank 0] step:1341/10000 train_time:97697ms step_avg:72.85ms +[2025-09-02 06:55:42] [Rank 0] step:1361/10000 train_time:99161ms step_avg:72.86ms +[2025-09-02 06:55:42] [Rank 0] step:1361/10000 train_time:99161ms step_avg:72.86ms +[2025-09-02 06:55:43] [Rank 0] step:1381/10000 train_time:100627ms step_avg:72.87ms +[2025-09-02 06:55:43] [Rank 0] step:1381/10000 train_time:100627ms step_avg:72.87ms +[2025-09-02 06:55:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:55:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:55:56] [Rank 0] PRINT: step:1400/10000 val_loss:4.7012 svd_entropy: attn_qk:H=0.7271,top10E=0.31,eRank=151.1,q75/q25=42.77 attn_vo:H=0.7402,top10E=0.21,eRank=205.0,q75/q25=inf mlp_w1:H=0.8203,top10E=0.24,eRank=243.3,q75/q25=7.11 mlp_w2:H=0.9638,top10E=0.04,eRank=604.3,q75/q25=3.37 vo_prod:H=0.5844,top10E=0.34,eRank=67.0,q75/q25=inf train_time:102165ms step_avg:72.98ms +[2025-09-02 06:55:56] [Rank 0] PRINT: step:1400/10000 val_loss:4.7012 svd_entropy: attn_qk:H=0.7271,top10E=0.31,eRank=151.1,q75/q25=42.77 attn_vo:H=0.7402,top10E=0.21,eRank=205.0,q75/q25=inf mlp_w1:H=0.8203,top10E=0.24,eRank=243.3,q75/q25=7.11 mlp_w2:H=0.9638,top10E=0.04,eRank=604.3,q75/q25=3.37 vo_prod:H=0.5844,top10E=0.34,eRank=67.0,q75/q25=inf train_time:102165ms step_avg:72.98ms +[2025-09-02 06:55:56] [Rank 0] step:1401/10000 train_time:102178ms step_avg:72.93ms +[2025-09-02 06:55:56] [Rank 0] step:1401/10000 train_time:102178ms step_avg:72.93ms +[2025-09-02 06:55:58] [Rank 0] step:1421/10000 train_time:103576ms step_avg:72.89ms +[2025-09-02 06:55:58] [Rank 0] step:1421/10000 train_time:103576ms step_avg:72.89ms +[2025-09-02 06:55:59] [Rank 0] step:1441/10000 train_time:105039ms step_avg:72.89ms +[2025-09-02 06:55:59] [Rank 0] step:1441/10000 train_time:105039ms step_avg:72.89ms +[2025-09-02 06:56:01] [Rank 0] step:1461/10000 train_time:106505ms step_avg:72.90ms +[2025-09-02 06:56:01] [Rank 0] step:1461/10000 train_time:106505ms step_avg:72.90ms +[2025-09-02 06:56:02] [Rank 0] step:1481/10000 train_time:107972ms step_avg:72.90ms +[2025-09-02 06:56:02] [Rank 0] step:1481/10000 train_time:107972ms step_avg:72.90ms +[2025-09-02 06:56:04] [Rank 0] step:1501/10000 train_time:109445ms step_avg:72.91ms +[2025-09-02 06:56:04] [Rank 0] step:1501/10000 train_time:109445ms step_avg:72.91ms +[2025-09-02 06:56:05] [Rank 0] step:1521/10000 train_time:110920ms step_avg:72.93ms +[2025-09-02 06:56:05] [Rank 0] step:1521/10000 train_time:110920ms step_avg:72.93ms +[2025-09-02 06:56:07] [Rank 0] step:1541/10000 train_time:112394ms step_avg:72.94ms +[2025-09-02 06:56:07] [Rank 0] step:1541/10000 train_time:112394ms step_avg:72.94ms +[2025-09-02 06:56:08] [Rank 0] step:1561/10000 train_time:113870ms step_avg:72.95ms +[2025-09-02 06:56:08] [Rank 0] step:1561/10000 train_time:113870ms step_avg:72.95ms +[2025-09-02 06:56:10] [Rank 0] step:1581/10000 train_time:115346ms step_avg:72.96ms +[2025-09-02 06:56:10] [Rank 0] step:1581/10000 train_time:115346ms step_avg:72.96ms +[2025-09-02 06:56:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:56:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:56:23] [Rank 0] PRINT: step:1600/10000 val_loss:4.5789 svd_entropy: attn_qk:H=0.7337,top10E=0.30,eRank=155.5,q75/q25=47.16 attn_vo:H=0.7511,top10E=0.20,eRank=216.8,q75/q25=inf mlp_w1:H=0.8325,top10E=0.22,eRank=261.8,q75/q25=6.89 mlp_w2:H=0.9654,top10E=0.04,eRank=610.6,q75/q25=3.26 vo_prod:H=0.5973,top10E=0.32,eRank=73.4,q75/q25=inf train_time:116896ms step_avg:73.06ms +[2025-09-02 06:56:23] [Rank 0] PRINT: step:1600/10000 val_loss:4.5789 svd_entropy: attn_qk:H=0.7337,top10E=0.30,eRank=155.5,q75/q25=47.16 attn_vo:H=0.7511,top10E=0.20,eRank=216.8,q75/q25=inf mlp_w1:H=0.8325,top10E=0.22,eRank=261.8,q75/q25=6.89 mlp_w2:H=0.9654,top10E=0.04,eRank=610.6,q75/q25=3.26 vo_prod:H=0.5973,top10E=0.32,eRank=73.4,q75/q25=inf train_time:116896ms step_avg:73.06ms +[2025-09-02 06:56:23] [Rank 0] step:1601/10000 train_time:116909ms step_avg:73.02ms +[2025-09-02 06:56:23] [Rank 0] step:1601/10000 train_time:116909ms step_avg:73.02ms +[2025-09-02 06:56:24] [Rank 0] step:1621/10000 train_time:118372ms step_avg:73.02ms +[2025-09-02 06:56:24] [Rank 0] step:1621/10000 train_time:118372ms step_avg:73.02ms +[2025-09-02 06:56:26] [Rank 0] step:1641/10000 train_time:119846ms step_avg:73.03ms +[2025-09-02 06:56:26] [Rank 0] step:1641/10000 train_time:119846ms step_avg:73.03ms +[2025-09-02 06:56:27] [Rank 0] step:1661/10000 train_time:121321ms step_avg:73.04ms +[2025-09-02 06:56:27] [Rank 0] step:1661/10000 train_time:121321ms step_avg:73.04ms +[2025-09-02 06:56:29] [Rank 0] step:1681/10000 train_time:122796ms step_avg:73.05ms +[2025-09-02 06:56:29] [Rank 0] step:1681/10000 train_time:122796ms step_avg:73.05ms +[2025-09-02 06:56:30] [Rank 0] step:1701/10000 train_time:124271ms step_avg:73.06ms +[2025-09-02 06:56:30] [Rank 0] step:1701/10000 train_time:124271ms step_avg:73.06ms +[2025-09-02 06:56:32] [Rank 0] step:1721/10000 train_time:125748ms step_avg:73.07ms +[2025-09-02 06:56:32] [Rank 0] step:1721/10000 train_time:125748ms step_avg:73.07ms +[2025-09-02 06:56:33] [Rank 0] step:1741/10000 train_time:127223ms step_avg:73.07ms +[2025-09-02 06:56:33] [Rank 0] step:1741/10000 train_time:127223ms step_avg:73.07ms +[2025-09-02 06:56:35] [Rank 0] step:1761/10000 train_time:128700ms step_avg:73.08ms +[2025-09-02 06:56:35] [Rank 0] step:1761/10000 train_time:128700ms step_avg:73.08ms +[2025-09-02 06:56:36] [Rank 0] step:1781/10000 train_time:130177ms step_avg:73.09ms +[2025-09-02 06:56:36] [Rank 0] step:1781/10000 train_time:130177ms step_avg:73.09ms +[2025-09-02 06:56:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:56:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:56:49] [Rank 0] PRINT: step:1800/10000 val_loss:4.4893 svd_entropy: attn_qk:H=0.7393,top10E=0.30,eRank=159.5,q75/q25=50.00 attn_vo:H=0.7598,top10E=0.19,eRank=226.8,q75/q25=inf mlp_w1:H=0.8422,top10E=0.21,eRank=277.6,q75/q25=6.64 mlp_w2:H=0.9666,top10E=0.04,eRank=615.2,q75/q25=3.19 vo_prod:H=0.6079,top10E=0.30,eRank=79.0,q75/q25=inf train_time:131730ms step_avg:73.18ms +[2025-09-02 06:56:49] [Rank 0] PRINT: step:1800/10000 val_loss:4.4893 svd_entropy: attn_qk:H=0.7393,top10E=0.30,eRank=159.5,q75/q25=50.00 attn_vo:H=0.7598,top10E=0.19,eRank=226.8,q75/q25=inf mlp_w1:H=0.8422,top10E=0.21,eRank=277.6,q75/q25=6.64 mlp_w2:H=0.9666,top10E=0.04,eRank=615.2,q75/q25=3.19 vo_prod:H=0.6079,top10E=0.30,eRank=79.0,q75/q25=inf train_time:131730ms step_avg:73.18ms +[2025-09-02 06:56:49] [Rank 0] step:1801/10000 train_time:131742ms step_avg:73.15ms +[2025-09-02 06:56:49] [Rank 0] step:1801/10000 train_time:131742ms step_avg:73.15ms +[2025-09-02 06:56:51] [Rank 0] step:1821/10000 train_time:133143ms step_avg:73.12ms +[2025-09-02 06:56:51] [Rank 0] step:1821/10000 train_time:133143ms step_avg:73.12ms +[2025-09-02 06:56:52] [Rank 0] step:1841/10000 train_time:134619ms step_avg:73.12ms +[2025-09-02 06:56:52] [Rank 0] step:1841/10000 train_time:134619ms step_avg:73.12ms +[2025-09-02 06:56:54] [Rank 0] step:1861/10000 train_time:136098ms step_avg:73.13ms +[2025-09-02 06:56:54] [Rank 0] step:1861/10000 train_time:136098ms step_avg:73.13ms +[2025-09-02 06:56:55] [Rank 0] step:1881/10000 train_time:137576ms step_avg:73.14ms +[2025-09-02 06:56:55] [Rank 0] step:1881/10000 train_time:137576ms step_avg:73.14ms +[2025-09-02 06:56:57] [Rank 0] step:1901/10000 train_time:139053ms step_avg:73.15ms +[2025-09-02 06:56:57] [Rank 0] step:1901/10000 train_time:139053ms step_avg:73.15ms +[2025-09-02 06:56:58] [Rank 0] step:1921/10000 train_time:140530ms step_avg:73.15ms +[2025-09-02 06:56:58] [Rank 0] step:1921/10000 train_time:140530ms step_avg:73.15ms +[2025-09-02 06:57:00] [Rank 0] step:1941/10000 train_time:142008ms step_avg:73.16ms +[2025-09-02 06:57:00] [Rank 0] step:1941/10000 train_time:142008ms step_avg:73.16ms +[2025-09-02 06:57:01] [Rank 0] step:1961/10000 train_time:143486ms step_avg:73.17ms +[2025-09-02 06:57:01] [Rank 0] step:1961/10000 train_time:143486ms step_avg:73.17ms +[2025-09-02 06:57:03] [Rank 0] step:1981/10000 train_time:144966ms step_avg:73.18ms +[2025-09-02 06:57:03] [Rank 0] step:1981/10000 train_time:144966ms step_avg:73.18ms +[2025-09-02 06:57:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:57:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:57:16] [Rank 0] PRINT: step:2000/10000 val_loss:4.4341 svd_entropy: attn_qk:H=0.7442,top10E=0.29,eRank=163.0,q75/q25=52.31 attn_vo:H=0.7672,top10E=0.18,eRank=235.4,q75/q25=inf mlp_w1:H=0.8500,top10E=0.20,eRank=291.2,q75/q25=6.39 mlp_w2:H=0.9674,top10E=0.04,eRank=618.6,q75/q25=3.13 vo_prod:H=0.6173,top10E=0.29,eRank=84.5,q75/q25=inf train_time:146520ms step_avg:73.26ms +[2025-09-02 06:57:16] [Rank 0] PRINT: step:2000/10000 val_loss:4.4341 svd_entropy: attn_qk:H=0.7442,top10E=0.29,eRank=163.0,q75/q25=52.31 attn_vo:H=0.7672,top10E=0.18,eRank=235.4,q75/q25=inf mlp_w1:H=0.8500,top10E=0.20,eRank=291.2,q75/q25=6.39 mlp_w2:H=0.9674,top10E=0.04,eRank=618.6,q75/q25=3.13 vo_prod:H=0.6173,top10E=0.29,eRank=84.5,q75/q25=inf train_time:146520ms step_avg:73.26ms +[2025-09-02 06:57:16] [Rank 0] step:2001/10000 train_time:146532ms step_avg:73.23ms +[2025-09-02 06:57:16] [Rank 0] step:2001/10000 train_time:146532ms step_avg:73.23ms +[2025-09-02 06:57:17] [Rank 0] step:2021/10000 train_time:147957ms step_avg:73.21ms +[2025-09-02 06:57:17] [Rank 0] step:2021/10000 train_time:147957ms step_avg:73.21ms +[2025-09-02 06:57:19] [Rank 0] step:2041/10000 train_time:149437ms step_avg:73.22ms +[2025-09-02 06:57:19] [Rank 0] step:2041/10000 train_time:149437ms step_avg:73.22ms +[2025-09-02 06:57:20] [Rank 0] step:2061/10000 train_time:150917ms step_avg:73.23ms +[2025-09-02 06:57:20] [Rank 0] step:2061/10000 train_time:150917ms step_avg:73.23ms +[2025-09-02 06:57:22] [Rank 0] step:2081/10000 train_time:152399ms step_avg:73.23ms +[2025-09-02 06:57:22] [Rank 0] step:2081/10000 train_time:152399ms step_avg:73.23ms +[2025-09-02 06:57:23] [Rank 0] step:2101/10000 train_time:153880ms step_avg:73.24ms +[2025-09-02 06:57:23] [Rank 0] step:2101/10000 train_time:153880ms step_avg:73.24ms +[2025-09-02 06:57:25] [Rank 0] step:2121/10000 train_time:155363ms step_avg:73.25ms +[2025-09-02 06:57:25] [Rank 0] step:2121/10000 train_time:155363ms step_avg:73.25ms +[2025-09-02 06:57:26] [Rank 0] step:2141/10000 train_time:156844ms step_avg:73.26ms +[2025-09-02 06:57:26] [Rank 0] step:2141/10000 train_time:156844ms step_avg:73.26ms +[2025-09-02 06:57:28] [Rank 0] step:2161/10000 train_time:158407ms step_avg:73.30ms +[2025-09-02 06:57:28] [Rank 0] step:2161/10000 train_time:158407ms step_avg:73.30ms +[2025-09-02 06:57:29] [Rank 0] step:2181/10000 train_time:159888ms step_avg:73.31ms +[2025-09-02 06:57:29] [Rank 0] step:2181/10000 train_time:159888ms step_avg:73.31ms +[2025-09-02 06:57:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:57:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:57:42] [Rank 0] PRINT: step:2200/10000 val_loss:4.3658 svd_entropy: attn_qk:H=0.7482,top10E=0.28,eRank=166.2,q75/q25=54.07 attn_vo:H=0.7729,top10E=0.17,eRank=242.3,q75/q25=inf mlp_w1:H=0.8563,top10E=0.20,eRank=302.8,q75/q25=6.20 mlp_w2:H=0.9681,top10E=0.04,eRank=621.2,q75/q25=3.09 vo_prod:H=0.6247,top10E=0.28,eRank=89.1,q75/q25=inf train_time:161440ms step_avg:73.38ms +[2025-09-02 06:57:42] [Rank 0] PRINT: step:2200/10000 val_loss:4.3658 svd_entropy: attn_qk:H=0.7482,top10E=0.28,eRank=166.2,q75/q25=54.07 attn_vo:H=0.7729,top10E=0.17,eRank=242.3,q75/q25=inf mlp_w1:H=0.8563,top10E=0.20,eRank=302.8,q75/q25=6.20 mlp_w2:H=0.9681,top10E=0.04,eRank=621.2,q75/q25=3.09 vo_prod:H=0.6247,top10E=0.28,eRank=89.1,q75/q25=inf train_time:161440ms step_avg:73.38ms +[2025-09-02 06:57:42] [Rank 0] step:2201/10000 train_time:161452ms step_avg:73.35ms +[2025-09-02 06:57:42] [Rank 0] step:2201/10000 train_time:161452ms step_avg:73.35ms +[2025-09-02 06:57:44] [Rank 0] step:2221/10000 train_time:162858ms step_avg:73.33ms +[2025-09-02 06:57:44] [Rank 0] step:2221/10000 train_time:162858ms step_avg:73.33ms +[2025-09-02 06:57:45] [Rank 0] step:2241/10000 train_time:164369ms step_avg:73.35ms +[2025-09-02 06:57:45] [Rank 0] step:2241/10000 train_time:164369ms step_avg:73.35ms +[2025-09-02 06:57:47] [Rank 0] step:2261/10000 train_time:165887ms step_avg:73.37ms +[2025-09-02 06:57:47] [Rank 0] step:2261/10000 train_time:165887ms step_avg:73.37ms +[2025-09-02 06:57:48] [Rank 0] step:2281/10000 train_time:167406ms step_avg:73.39ms +[2025-09-02 06:57:48] [Rank 0] step:2281/10000 train_time:167406ms step_avg:73.39ms +[2025-09-02 06:57:50] [Rank 0] step:2301/10000 train_time:168927ms step_avg:73.41ms +[2025-09-02 06:57:50] [Rank 0] step:2301/10000 train_time:168927ms step_avg:73.41ms +[2025-09-02 06:57:52] [Rank 0] step:2321/10000 train_time:170446ms step_avg:73.44ms +[2025-09-02 06:57:52] [Rank 0] step:2321/10000 train_time:170446ms step_avg:73.44ms +[2025-09-02 06:57:53] [Rank 0] step:2341/10000 train_time:171966ms step_avg:73.46ms +[2025-09-02 06:57:53] [Rank 0] step:2341/10000 train_time:171966ms step_avg:73.46ms +[2025-09-02 06:57:55] [Rank 0] step:2361/10000 train_time:173486ms step_avg:73.48ms +[2025-09-02 06:57:55] [Rank 0] step:2361/10000 train_time:173486ms step_avg:73.48ms +[2025-09-02 06:57:56] [Rank 0] step:2381/10000 train_time:175006ms step_avg:73.50ms +[2025-09-02 06:57:56] [Rank 0] step:2381/10000 train_time:175006ms step_avg:73.50ms +[2025-09-02 06:57:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:57:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:58:09] [Rank 0] PRINT: step:2400/10000 val_loss:4.2913 svd_entropy: attn_qk:H=0.7508,top10E=0.28,eRank=168.2,q75/q25=55.11 attn_vo:H=0.7781,top10E=0.17,eRank=248.7,q75/q25=inf mlp_w1:H=0.8618,top10E=0.19,eRank=313.4,q75/q25=6.00 mlp_w2:H=0.9685,top10E=0.04,eRank=623.2,q75/q25=3.06 vo_prod:H=0.6316,top10E=0.27,eRank=93.7,q75/q25=inf train_time:176603ms step_avg:73.58ms +[2025-09-02 06:58:09] [Rank 0] PRINT: step:2400/10000 val_loss:4.2913 svd_entropy: attn_qk:H=0.7508,top10E=0.28,eRank=168.2,q75/q25=55.11 attn_vo:H=0.7781,top10E=0.17,eRank=248.7,q75/q25=inf mlp_w1:H=0.8618,top10E=0.19,eRank=313.4,q75/q25=6.00 mlp_w2:H=0.9685,top10E=0.04,eRank=623.2,q75/q25=3.06 vo_prod:H=0.6316,top10E=0.27,eRank=93.7,q75/q25=inf train_time:176603ms step_avg:73.58ms +[2025-09-02 06:58:09] [Rank 0] step:2401/10000 train_time:176616ms step_avg:73.56ms +[2025-09-02 06:58:09] [Rank 0] step:2401/10000 train_time:176616ms step_avg:73.56ms +[2025-09-02 06:58:11] [Rank 0] step:2421/10000 train_time:178064ms step_avg:73.55ms +[2025-09-02 06:58:11] [Rank 0] step:2421/10000 train_time:178064ms step_avg:73.55ms +[2025-09-02 06:58:12] [Rank 0] step:2441/10000 train_time:179582ms step_avg:73.57ms +[2025-09-02 06:58:12] [Rank 0] step:2441/10000 train_time:179582ms step_avg:73.57ms +[2025-09-02 06:58:14] [Rank 0] step:2461/10000 train_time:181100ms step_avg:73.59ms +[2025-09-02 06:58:14] [Rank 0] step:2461/10000 train_time:181100ms step_avg:73.59ms +[2025-09-02 06:58:16] [Rank 0] step:2481/10000 train_time:182618ms step_avg:73.61ms +[2025-09-02 06:58:16] [Rank 0] step:2481/10000 train_time:182618ms step_avg:73.61ms +[2025-09-02 06:58:17] [Rank 0] step:2501/10000 train_time:184137ms step_avg:73.63ms +[2025-09-02 06:58:17] [Rank 0] step:2501/10000 train_time:184137ms step_avg:73.63ms +[2025-09-02 06:58:19] [Rank 0] step:2521/10000 train_time:185657ms step_avg:73.64ms +[2025-09-02 06:58:19] [Rank 0] step:2521/10000 train_time:185657ms step_avg:73.64ms +[2025-09-02 06:58:20] [Rank 0] step:2541/10000 train_time:187176ms step_avg:73.66ms +[2025-09-02 06:58:20] [Rank 0] step:2541/10000 train_time:187176ms step_avg:73.66ms +[2025-09-02 06:58:22] [Rank 0] step:2561/10000 train_time:188696ms step_avg:73.68ms +[2025-09-02 06:58:22] [Rank 0] step:2561/10000 train_time:188696ms step_avg:73.68ms +[2025-09-02 06:58:23] [Rank 0] step:2581/10000 train_time:190215ms step_avg:73.70ms +[2025-09-02 06:58:23] [Rank 0] step:2581/10000 train_time:190215ms step_avg:73.70ms +[2025-09-02 06:58:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:58:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:58:36] [Rank 0] PRINT: step:2600/10000 val_loss:4.2443 svd_entropy: attn_qk:H=0.7540,top10E=0.28,eRank=170.8,q75/q25=55.97 attn_vo:H=0.7826,top10E=0.16,eRank=254.4,q75/q25=inf mlp_w1:H=0.8665,top10E=0.19,eRank=322.7,q75/q25=5.81 mlp_w2:H=0.9689,top10E=0.04,eRank=624.7,q75/q25=3.04 vo_prod:H=0.6380,top10E=0.26,eRank=98.2,q75/q25=inf train_time:191813ms step_avg:73.77ms +[2025-09-02 06:58:36] [Rank 0] PRINT: step:2600/10000 val_loss:4.2443 svd_entropy: attn_qk:H=0.7540,top10E=0.28,eRank=170.8,q75/q25=55.97 attn_vo:H=0.7826,top10E=0.16,eRank=254.4,q75/q25=inf mlp_w1:H=0.8665,top10E=0.19,eRank=322.7,q75/q25=5.81 mlp_w2:H=0.9689,top10E=0.04,eRank=624.7,q75/q25=3.04 vo_prod:H=0.6380,top10E=0.26,eRank=98.2,q75/q25=inf train_time:191813ms step_avg:73.77ms +[2025-09-02 06:58:36] [Rank 0] step:2601/10000 train_time:191826ms step_avg:73.75ms +[2025-09-02 06:58:36] [Rank 0] step:2601/10000 train_time:191826ms step_avg:73.75ms +[2025-09-02 06:58:38] [Rank 0] step:2621/10000 train_time:193281ms step_avg:73.74ms +[2025-09-02 06:58:38] [Rank 0] step:2621/10000 train_time:193281ms step_avg:73.74ms +[2025-09-02 06:58:39] [Rank 0] step:2641/10000 train_time:194798ms step_avg:73.76ms +[2025-09-02 06:58:39] [Rank 0] step:2641/10000 train_time:194798ms step_avg:73.76ms +[2025-09-02 06:58:41] [Rank 0] step:2661/10000 train_time:196317ms step_avg:73.78ms +[2025-09-02 06:58:41] [Rank 0] step:2661/10000 train_time:196317ms step_avg:73.78ms +[2025-09-02 06:58:42] [Rank 0] step:2681/10000 train_time:197835ms step_avg:73.79ms +[2025-09-02 06:58:42] [Rank 0] step:2681/10000 train_time:197835ms step_avg:73.79ms +[2025-09-02 06:58:44] [Rank 0] step:2701/10000 train_time:199354ms step_avg:73.81ms +[2025-09-02 06:58:44] [Rank 0] step:2701/10000 train_time:199354ms step_avg:73.81ms +[2025-09-02 06:58:45] [Rank 0] step:2721/10000 train_time:200878ms step_avg:73.82ms +[2025-09-02 06:58:45] [Rank 0] step:2721/10000 train_time:200878ms step_avg:73.82ms +[2025-09-02 06:58:47] [Rank 0] step:2741/10000 train_time:202399ms step_avg:73.84ms +[2025-09-02 06:58:47] [Rank 0] step:2741/10000 train_time:202399ms step_avg:73.84ms +[2025-09-02 06:58:49] [Rank 0] step:2761/10000 train_time:203921ms step_avg:73.86ms +[2025-09-02 06:58:49] [Rank 0] step:2761/10000 train_time:203921ms step_avg:73.86ms +[2025-09-02 06:58:50] [Rank 0] step:2781/10000 train_time:205442ms step_avg:73.87ms +[2025-09-02 06:58:50] [Rank 0] step:2781/10000 train_time:205442ms step_avg:73.87ms +[2025-09-02 06:58:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:58:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:59:03] [Rank 0] PRINT: step:2800/10000 val_loss:4.2089 svd_entropy: attn_qk:H=0.7571,top10E=0.27,eRank=173.4,q75/q25=56.65 attn_vo:H=0.7866,top10E=0.15,eRank=259.5,q75/q25=inf mlp_w1:H=0.8707,top10E=0.18,eRank=331.3,q75/q25=5.65 mlp_w2:H=0.9692,top10E=0.04,eRank=625.8,q75/q25=3.02 vo_prod:H=0.6439,top10E=0.25,eRank=102.6,q75/q25=inf train_time:207043ms step_avg:73.94ms +[2025-09-02 06:59:03] [Rank 0] PRINT: step:2800/10000 val_loss:4.2089 svd_entropy: attn_qk:H=0.7571,top10E=0.27,eRank=173.4,q75/q25=56.65 attn_vo:H=0.7866,top10E=0.15,eRank=259.5,q75/q25=inf mlp_w1:H=0.8707,top10E=0.18,eRank=331.3,q75/q25=5.65 mlp_w2:H=0.9692,top10E=0.04,eRank=625.8,q75/q25=3.02 vo_prod:H=0.6439,top10E=0.25,eRank=102.6,q75/q25=inf train_time:207043ms step_avg:73.94ms +[2025-09-02 06:59:03] [Rank 0] step:2801/10000 train_time:207055ms step_avg:73.92ms +[2025-09-02 06:59:03] [Rank 0] step:2801/10000 train_time:207055ms step_avg:73.92ms +[2025-09-02 06:59:05] [Rank 0] step:2821/10000 train_time:208510ms step_avg:73.91ms +[2025-09-02 06:59:05] [Rank 0] step:2821/10000 train_time:208510ms step_avg:73.91ms +[2025-09-02 06:59:06] [Rank 0] step:2841/10000 train_time:210029ms step_avg:73.93ms +[2025-09-02 06:59:06] [Rank 0] step:2841/10000 train_time:210029ms step_avg:73.93ms +[2025-09-02 06:59:08] [Rank 0] step:2861/10000 train_time:211549ms step_avg:73.94ms +[2025-09-02 06:59:08] [Rank 0] step:2861/10000 train_time:211549ms step_avg:73.94ms +[2025-09-02 06:59:09] [Rank 0] step:2881/10000 train_time:213069ms step_avg:73.96ms +[2025-09-02 06:59:09] [Rank 0] step:2881/10000 train_time:213069ms step_avg:73.96ms +[2025-09-02 06:59:11] [Rank 0] step:2901/10000 train_time:214592ms step_avg:73.97ms +[2025-09-02 06:59:11] [Rank 0] step:2901/10000 train_time:214592ms step_avg:73.97ms +[2025-09-02 06:59:12] [Rank 0] step:2921/10000 train_time:216115ms step_avg:73.99ms +[2025-09-02 06:59:12] [Rank 0] step:2921/10000 train_time:216115ms step_avg:73.99ms +[2025-09-02 06:59:14] [Rank 0] step:2941/10000 train_time:217638ms step_avg:74.00ms +[2025-09-02 06:59:14] [Rank 0] step:2941/10000 train_time:217638ms step_avg:74.00ms +[2025-09-02 06:59:15] [Rank 0] step:2961/10000 train_time:219161ms step_avg:74.02ms +[2025-09-02 06:59:15] [Rank 0] step:2961/10000 train_time:219161ms step_avg:74.02ms +[2025-09-02 06:59:17] [Rank 0] step:2981/10000 train_time:220690ms step_avg:74.03ms +[2025-09-02 06:59:17] [Rank 0] step:2981/10000 train_time:220690ms step_avg:74.03ms +[2025-09-02 06:59:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:59:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:59:30] [Rank 0] PRINT: step:3000/10000 val_loss:4.1667 svd_entropy: attn_qk:H=0.7598,top10E=0.27,eRank=175.6,q75/q25=56.56 attn_vo:H=0.7900,top10E=0.15,eRank=264.1,q75/q25=inf mlp_w1:H=0.8743,top10E=0.18,eRank=338.9,q75/q25=5.52 mlp_w2:H=0.9694,top10E=0.04,eRank=626.8,q75/q25=3.01 vo_prod:H=0.6487,top10E=0.24,eRank=106.3,q75/q25=inf train_time:222298ms step_avg:74.10ms +[2025-09-02 06:59:30] [Rank 0] PRINT: step:3000/10000 val_loss:4.1667 svd_entropy: attn_qk:H=0.7598,top10E=0.27,eRank=175.6,q75/q25=56.56 attn_vo:H=0.7900,top10E=0.15,eRank=264.1,q75/q25=inf mlp_w1:H=0.8743,top10E=0.18,eRank=338.9,q75/q25=5.52 mlp_w2:H=0.9694,top10E=0.04,eRank=626.8,q75/q25=3.01 vo_prod:H=0.6487,top10E=0.24,eRank=106.3,q75/q25=inf train_time:222298ms step_avg:74.10ms +[2025-09-02 06:59:30] [Rank 0] step:3001/10000 train_time:222311ms step_avg:74.08ms +[2025-09-02 06:59:30] [Rank 0] step:3001/10000 train_time:222311ms step_avg:74.08ms +[2025-09-02 06:59:32] [Rank 0] step:3021/10000 train_time:223784ms step_avg:74.08ms +[2025-09-02 06:59:32] [Rank 0] step:3021/10000 train_time:223784ms step_avg:74.08ms +[2025-09-02 06:59:33] [Rank 0] step:3041/10000 train_time:225354ms step_avg:74.11ms +[2025-09-02 06:59:33] [Rank 0] step:3041/10000 train_time:225354ms step_avg:74.11ms +[2025-09-02 06:59:35] [Rank 0] step:3061/10000 train_time:226881ms step_avg:74.12ms +[2025-09-02 06:59:35] [Rank 0] step:3061/10000 train_time:226881ms step_avg:74.12ms +[2025-09-02 06:59:36] [Rank 0] step:3081/10000 train_time:228407ms step_avg:74.13ms +[2025-09-02 06:59:36] [Rank 0] step:3081/10000 train_time:228407ms step_avg:74.13ms +[2025-09-02 06:59:38] [Rank 0] step:3101/10000 train_time:229933ms step_avg:74.15ms +[2025-09-02 06:59:38] [Rank 0] step:3101/10000 train_time:229933ms step_avg:74.15ms +[2025-09-02 06:59:39] [Rank 0] step:3121/10000 train_time:231460ms step_avg:74.16ms +[2025-09-02 06:59:39] [Rank 0] step:3121/10000 train_time:231460ms step_avg:74.16ms +[2025-09-02 06:59:41] [Rank 0] step:3141/10000 train_time:232987ms step_avg:74.18ms +[2025-09-02 06:59:41] [Rank 0] step:3141/10000 train_time:232987ms step_avg:74.18ms +[2025-09-02 06:59:43] [Rank 0] step:3161/10000 train_time:234516ms step_avg:74.19ms +[2025-09-02 06:59:43] [Rank 0] step:3161/10000 train_time:234516ms step_avg:74.19ms +[2025-09-02 06:59:44] [Rank 0] step:3181/10000 train_time:236044ms step_avg:74.20ms +[2025-09-02 06:59:44] [Rank 0] step:3181/10000 train_time:236044ms step_avg:74.20ms +[2025-09-02 06:59:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:59:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:59:57] [Rank 0] PRINT: step:3200/10000 val_loss:4.1312 svd_entropy: attn_qk:H=0.7621,top10E=0.27,eRank=177.6,q75/q25=56.76 attn_vo:H=0.7930,top10E=0.15,eRank=268.1,q75/q25=inf mlp_w1:H=0.8776,top10E=0.17,eRank=345.9,q75/q25=5.39 mlp_w2:H=0.9696,top10E=0.04,eRank=627.4,q75/q25=2.99 vo_prod:H=0.6530,top10E=0.23,eRank=109.7,q75/q25=inf train_time:237653ms step_avg:74.27ms +[2025-09-02 06:59:57] [Rank 0] PRINT: step:3200/10000 val_loss:4.1312 svd_entropy: attn_qk:H=0.7621,top10E=0.27,eRank=177.6,q75/q25=56.76 attn_vo:H=0.7930,top10E=0.15,eRank=268.1,q75/q25=inf mlp_w1:H=0.8776,top10E=0.17,eRank=345.9,q75/q25=5.39 mlp_w2:H=0.9696,top10E=0.04,eRank=627.4,q75/q25=2.99 vo_prod:H=0.6530,top10E=0.23,eRank=109.7,q75/q25=inf train_time:237653ms step_avg:74.27ms +[2025-09-02 06:59:58] [Rank 0] step:3201/10000 train_time:237667ms step_avg:74.25ms +[2025-09-02 06:59:58] [Rank 0] step:3201/10000 train_time:237667ms step_avg:74.25ms +[2025-09-02 06:59:59] [Rank 0] step:3221/10000 train_time:239143ms step_avg:74.25ms +[2025-09-02 06:59:59] [Rank 0] step:3221/10000 train_time:239143ms step_avg:74.25ms +[2025-09-02 07:00:01] [Rank 0] step:3241/10000 train_time:240670ms step_avg:74.26ms +[2025-09-02 07:00:01] [Rank 0] step:3241/10000 train_time:240670ms step_avg:74.26ms +[2025-09-02 07:00:02] [Rank 0] step:3261/10000 train_time:242196ms step_avg:74.27ms +[2025-09-02 07:00:02] [Rank 0] step:3261/10000 train_time:242196ms step_avg:74.27ms +[2025-09-02 07:00:04] [Rank 0] step:3281/10000 train_time:243723ms step_avg:74.28ms +[2025-09-02 07:00:04] [Rank 0] step:3281/10000 train_time:243723ms step_avg:74.28ms +[2025-09-02 07:00:05] [Rank 0] step:3301/10000 train_time:245251ms step_avg:74.30ms +[2025-09-02 07:00:05] [Rank 0] step:3301/10000 train_time:245251ms step_avg:74.30ms +[2025-09-02 07:00:07] [Rank 0] step:3321/10000 train_time:246779ms step_avg:74.31ms +[2025-09-02 07:00:07] [Rank 0] step:3321/10000 train_time:246779ms step_avg:74.31ms +[2025-09-02 07:00:08] [Rank 0] step:3341/10000 train_time:248306ms step_avg:74.32ms +[2025-09-02 07:00:08] [Rank 0] step:3341/10000 train_time:248306ms step_avg:74.32ms +[2025-09-02 07:00:10] [Rank 0] step:3361/10000 train_time:249833ms step_avg:74.33ms +[2025-09-02 07:00:10] [Rank 0] step:3361/10000 train_time:249833ms step_avg:74.33ms +[2025-09-02 07:00:11] [Rank 0] step:3381/10000 train_time:251362ms step_avg:74.35ms +[2025-09-02 07:00:11] [Rank 0] step:3381/10000 train_time:251362ms step_avg:74.35ms +[2025-09-02 07:00:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:00:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:00:25] [Rank 0] PRINT: step:3400/10000 val_loss:4.0948 svd_entropy: attn_qk:H=0.7645,top10E=0.27,eRank=179.7,q75/q25=57.02 attn_vo:H=0.7959,top10E=0.14,eRank=272.1,q75/q25=inf mlp_w1:H=0.8807,top10E=0.17,eRank=352.7,q75/q25=5.29 mlp_w2:H=0.9697,top10E=0.04,eRank=627.9,q75/q25=2.99 vo_prod:H=0.6574,top10E=0.23,eRank=113.3,q75/q25=inf train_time:252969ms step_avg:74.40ms +[2025-09-02 07:00:25] [Rank 0] PRINT: step:3400/10000 val_loss:4.0948 svd_entropy: attn_qk:H=0.7645,top10E=0.27,eRank=179.7,q75/q25=57.02 attn_vo:H=0.7959,top10E=0.14,eRank=272.1,q75/q25=inf mlp_w1:H=0.8807,top10E=0.17,eRank=352.7,q75/q25=5.29 mlp_w2:H=0.9697,top10E=0.04,eRank=627.9,q75/q25=2.99 vo_prod:H=0.6574,top10E=0.23,eRank=113.3,q75/q25=inf train_time:252969ms step_avg:74.40ms +[2025-09-02 07:00:25] [Rank 0] step:3401/10000 train_time:252983ms step_avg:74.38ms +[2025-09-02 07:00:25] [Rank 0] step:3401/10000 train_time:252983ms step_avg:74.38ms +[2025-09-02 07:00:26] [Rank 0] step:3421/10000 train_time:254455ms step_avg:74.38ms +[2025-09-02 07:00:26] [Rank 0] step:3421/10000 train_time:254455ms step_avg:74.38ms +[2025-09-02 07:00:28] [Rank 0] step:3441/10000 train_time:255982ms step_avg:74.39ms +[2025-09-02 07:00:28] [Rank 0] step:3441/10000 train_time:255982ms step_avg:74.39ms +[2025-09-02 07:00:29] [Rank 0] step:3461/10000 train_time:257509ms step_avg:74.40ms +[2025-09-02 07:00:29] [Rank 0] step:3461/10000 train_time:257509ms step_avg:74.40ms +[2025-09-02 07:00:31] [Rank 0] step:3481/10000 train_time:259036ms step_avg:74.41ms +[2025-09-02 07:00:31] [Rank 0] step:3481/10000 train_time:259036ms step_avg:74.41ms +[2025-09-02 07:00:32] [Rank 0] step:3501/10000 train_time:260566ms step_avg:74.43ms +[2025-09-02 07:00:32] [Rank 0] step:3501/10000 train_time:260566ms step_avg:74.43ms +[2025-09-02 07:00:34] [Rank 0] step:3521/10000 train_time:262094ms step_avg:74.44ms +[2025-09-02 07:00:34] [Rank 0] step:3521/10000 train_time:262094ms step_avg:74.44ms +[2025-09-02 07:00:35] [Rank 0] step:3541/10000 train_time:263623ms step_avg:74.45ms +[2025-09-02 07:00:35] [Rank 0] step:3541/10000 train_time:263623ms step_avg:74.45ms +[2025-09-02 07:00:37] [Rank 0] step:3561/10000 train_time:265197ms step_avg:74.47ms +[2025-09-02 07:00:37] [Rank 0] step:3561/10000 train_time:265197ms step_avg:74.47ms +[2025-09-02 07:00:39] [Rank 0] step:3581/10000 train_time:266727ms step_avg:74.48ms +[2025-09-02 07:00:39] [Rank 0] step:3581/10000 train_time:266727ms step_avg:74.48ms +[2025-09-02 07:00:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:00:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:00:52] [Rank 0] PRINT: step:3600/10000 val_loss:4.0841 svd_entropy: attn_qk:H=0.7511,top10E=0.29,eRank=153.1,q75/q25=57.70 attn_vo:H=0.8351,top10E=0.15,eRank=288.3,q75/q25=44.99 mlp_w1:H=0.8833,top10E=0.17,eRank=358.7,q75/q25=5.18 mlp_w2:H=0.9698,top10E=0.04,eRank=628.4,q75/q25=2.98 vo_prod:H=0.7291,top10E=0.25,eRank=129.5,q75/q25=1989.33 train_time:268337ms step_avg:74.54ms +[2025-09-02 07:00:52] [Rank 0] PRINT: step:3600/10000 val_loss:4.0841 svd_entropy: attn_qk:H=0.7511,top10E=0.29,eRank=153.1,q75/q25=57.70 attn_vo:H=0.8351,top10E=0.15,eRank=288.3,q75/q25=44.99 mlp_w1:H=0.8833,top10E=0.17,eRank=358.7,q75/q25=5.18 mlp_w2:H=0.9698,top10E=0.04,eRank=628.4,q75/q25=2.98 vo_prod:H=0.7291,top10E=0.25,eRank=129.5,q75/q25=1989.33 train_time:268337ms step_avg:74.54ms +[2025-09-02 07:00:52] [Rank 0] step:3601/10000 train_time:268351ms step_avg:74.52ms +[2025-09-02 07:00:52] [Rank 0] step:3601/10000 train_time:268351ms step_avg:74.52ms +[2025-09-02 07:00:54] [Rank 0] step:3621/10000 train_time:269816ms step_avg:74.51ms +[2025-09-02 07:00:54] [Rank 0] step:3621/10000 train_time:269816ms step_avg:74.51ms +[2025-09-02 07:00:55] [Rank 0] step:3641/10000 train_time:271344ms step_avg:74.52ms +[2025-09-02 07:00:55] [Rank 0] step:3641/10000 train_time:271344ms step_avg:74.52ms +[2025-09-02 07:00:57] [Rank 0] step:3661/10000 train_time:272875ms step_avg:74.54ms +[2025-09-02 07:00:57] [Rank 0] step:3661/10000 train_time:272875ms step_avg:74.54ms +[2025-09-02 07:00:58] [Rank 0] step:3681/10000 train_time:274407ms step_avg:74.55ms +[2025-09-02 07:00:58] [Rank 0] step:3681/10000 train_time:274407ms step_avg:74.55ms +[2025-09-02 07:01:00] [Rank 0] step:3701/10000 train_time:275937ms step_avg:74.56ms +[2025-09-02 07:01:00] [Rank 0] step:3701/10000 train_time:275937ms step_avg:74.56ms +[2025-09-02 07:01:01] [Rank 0] step:3721/10000 train_time:277495ms step_avg:74.58ms +[2025-09-02 07:01:01] [Rank 0] step:3721/10000 train_time:277495ms step_avg:74.58ms +[2025-09-02 07:01:03] [Rank 0] step:3741/10000 train_time:279062ms step_avg:74.60ms +[2025-09-02 07:01:03] [Rank 0] step:3741/10000 train_time:279062ms step_avg:74.60ms +[2025-09-02 07:01:04] [Rank 0] step:3761/10000 train_time:280629ms step_avg:74.62ms +[2025-09-02 07:01:04] [Rank 0] step:3761/10000 train_time:280629ms step_avg:74.62ms +[2025-09-02 07:01:06] [Rank 0] step:3781/10000 train_time:282197ms step_avg:74.64ms +[2025-09-02 07:01:06] [Rank 0] step:3781/10000 train_time:282197ms step_avg:74.64ms +[2025-09-02 07:01:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:01:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:01:19] [Rank 0] PRINT: step:3800/10000 val_loss:4.0383 svd_entropy: attn_qk:H=0.7527,top10E=0.29,eRank=154.5,q75/q25=58.00 attn_vo:H=0.8372,top10E=0.15,eRank=291.2,q75/q25=44.31 mlp_w1:H=0.8858,top10E=0.16,eRank=364.4,q75/q25=5.09 mlp_w2:H=0.9699,top10E=0.04,eRank=628.7,q75/q25=2.97 vo_prod:H=0.7313,top10E=0.25,eRank=131.4,q75/q25=1858.71 train_time:283845ms step_avg:74.70ms +[2025-09-02 07:01:19] [Rank 0] PRINT: step:3800/10000 val_loss:4.0383 svd_entropy: attn_qk:H=0.7527,top10E=0.29,eRank=154.5,q75/q25=58.00 attn_vo:H=0.8372,top10E=0.15,eRank=291.2,q75/q25=44.31 mlp_w1:H=0.8858,top10E=0.16,eRank=364.4,q75/q25=5.09 mlp_w2:H=0.9699,top10E=0.04,eRank=628.7,q75/q25=2.97 vo_prod:H=0.7313,top10E=0.25,eRank=131.4,q75/q25=1858.71 train_time:283845ms step_avg:74.70ms +[2025-09-02 07:01:19] [Rank 0] step:3801/10000 train_time:283858ms step_avg:74.68ms +[2025-09-02 07:01:19] [Rank 0] step:3801/10000 train_time:283858ms step_avg:74.68ms +[2025-09-02 07:01:21] [Rank 0] step:3821/10000 train_time:285359ms step_avg:74.68ms +[2025-09-02 07:01:21] [Rank 0] step:3821/10000 train_time:285359ms step_avg:74.68ms +[2025-09-02 07:01:22] [Rank 0] step:3841/10000 train_time:286925ms step_avg:74.70ms +[2025-09-02 07:01:22] [Rank 0] step:3841/10000 train_time:286925ms step_avg:74.70ms +[2025-09-02 07:01:24] [Rank 0] step:3861/10000 train_time:288491ms step_avg:74.72ms +[2025-09-02 07:01:24] [Rank 0] step:3861/10000 train_time:288491ms step_avg:74.72ms +[2025-09-02 07:01:26] [Rank 0] step:3881/10000 train_time:290055ms step_avg:74.74ms +[2025-09-02 07:01:26] [Rank 0] step:3881/10000 train_time:290055ms step_avg:74.74ms +[2025-09-02 07:01:27] [Rank 0] step:3901/10000 train_time:291620ms step_avg:74.76ms +[2025-09-02 07:01:27] [Rank 0] step:3901/10000 train_time:291620ms step_avg:74.76ms +[2025-09-02 07:01:29] [Rank 0] step:3921/10000 train_time:293184ms step_avg:74.77ms +[2025-09-02 07:01:29] [Rank 0] step:3921/10000 train_time:293184ms step_avg:74.77ms +[2025-09-02 07:01:30] [Rank 0] step:3941/10000 train_time:294750ms step_avg:74.79ms +[2025-09-02 07:01:30] [Rank 0] step:3941/10000 train_time:294750ms step_avg:74.79ms +[2025-09-02 07:01:32] [Rank 0] step:3961/10000 train_time:296315ms step_avg:74.81ms +[2025-09-02 07:01:32] [Rank 0] step:3961/10000 train_time:296315ms step_avg:74.81ms +[2025-09-02 07:01:33] [Rank 0] step:3981/10000 train_time:297882ms step_avg:74.83ms +[2025-09-02 07:01:33] [Rank 0] step:3981/10000 train_time:297882ms step_avg:74.83ms +[2025-09-02 07:01:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:01:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:01:47] [Rank 0] PRINT: step:4000/10000 val_loss:4.0129 svd_entropy: attn_qk:H=0.7545,top10E=0.29,eRank=156.0,q75/q25=57.67 attn_vo:H=0.8391,top10E=0.15,eRank=293.8,q75/q25=42.94 mlp_w1:H=0.8881,top10E=0.16,eRank=369.8,q75/q25=5.02 mlp_w2:H=0.9699,top10E=0.04,eRank=629.0,q75/q25=2.96 vo_prod:H=0.7336,top10E=0.25,eRank=133.5,q75/q25=1702.45 train_time:299525ms step_avg:74.88ms +[2025-09-02 07:01:47] [Rank 0] PRINT: step:4000/10000 val_loss:4.0129 svd_entropy: attn_qk:H=0.7545,top10E=0.29,eRank=156.0,q75/q25=57.67 attn_vo:H=0.8391,top10E=0.15,eRank=293.8,q75/q25=42.94 mlp_w1:H=0.8881,top10E=0.16,eRank=369.8,q75/q25=5.02 mlp_w2:H=0.9699,top10E=0.04,eRank=629.0,q75/q25=2.96 vo_prod:H=0.7336,top10E=0.25,eRank=133.5,q75/q25=1702.45 train_time:299525ms step_avg:74.88ms +[2025-09-02 07:01:47] [Rank 0] step:4001/10000 train_time:299540ms step_avg:74.87ms +[2025-09-02 07:01:47] [Rank 0] step:4001/10000 train_time:299540ms step_avg:74.87ms +[2025-09-02 07:01:48] [Rank 0] step:4021/10000 train_time:301039ms step_avg:74.87ms +[2025-09-02 07:01:48] [Rank 0] step:4021/10000 train_time:301039ms step_avg:74.87ms +[2025-09-02 07:01:50] [Rank 0] step:4041/10000 train_time:302601ms step_avg:74.88ms +[2025-09-02 07:01:50] [Rank 0] step:4041/10000 train_time:302601ms step_avg:74.88ms +[2025-09-02 07:01:51] [Rank 0] step:4061/10000 train_time:304165ms step_avg:74.90ms +[2025-09-02 07:01:51] [Rank 0] step:4061/10000 train_time:304165ms step_avg:74.90ms +[2025-09-02 07:01:53] [Rank 0] step:4081/10000 train_time:305909ms step_avg:74.96ms +[2025-09-02 07:01:53] [Rank 0] step:4081/10000 train_time:305909ms step_avg:74.96ms +[2025-09-02 07:01:55] [Rank 0] step:4101/10000 train_time:307474ms step_avg:74.98ms +[2025-09-02 07:01:55] [Rank 0] step:4101/10000 train_time:307474ms step_avg:74.98ms +[2025-09-02 07:01:56] [Rank 0] step:4121/10000 train_time:309038ms step_avg:74.99ms +[2025-09-02 07:01:56] [Rank 0] step:4121/10000 train_time:309038ms step_avg:74.99ms +[2025-09-02 07:01:58] [Rank 0] step:4141/10000 train_time:310604ms step_avg:75.01ms +[2025-09-02 07:01:58] [Rank 0] step:4141/10000 train_time:310604ms step_avg:75.01ms +[2025-09-02 07:01:59] [Rank 0] step:4161/10000 train_time:312167ms step_avg:75.02ms +[2025-09-02 07:01:59] [Rank 0] step:4161/10000 train_time:312167ms step_avg:75.02ms +[2025-09-02 07:02:01] [Rank 0] step:4181/10000 train_time:313732ms step_avg:75.04ms +[2025-09-02 07:02:01] [Rank 0] step:4181/10000 train_time:313732ms step_avg:75.04ms +[2025-09-02 07:02:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:02:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:02:14] [Rank 0] PRINT: step:4200/10000 val_loss:3.9931 svd_entropy: attn_qk:H=0.7560,top10E=0.28,eRank=157.4,q75/q25=57.96 attn_vo:H=0.8410,top10E=0.15,eRank=296.7,q75/q25=41.55 mlp_w1:H=0.8902,top10E=0.16,eRank=374.8,q75/q25=4.94 mlp_w2:H=0.9700,top10E=0.04,eRank=629.2,q75/q25=2.96 vo_prod:H=0.7366,top10E=0.24,eRank=136.2,q75/q25=1560.93 train_time:315377ms step_avg:75.09ms +[2025-09-02 07:02:14] [Rank 0] PRINT: step:4200/10000 val_loss:3.9931 svd_entropy: attn_qk:H=0.7560,top10E=0.28,eRank=157.4,q75/q25=57.96 attn_vo:H=0.8410,top10E=0.15,eRank=296.7,q75/q25=41.55 mlp_w1:H=0.8902,top10E=0.16,eRank=374.8,q75/q25=4.94 mlp_w2:H=0.9700,top10E=0.04,eRank=629.2,q75/q25=2.96 vo_prod:H=0.7366,top10E=0.24,eRank=136.2,q75/q25=1560.93 train_time:315377ms step_avg:75.09ms +[2025-09-02 07:02:14] [Rank 0] step:4201/10000 train_time:315391ms step_avg:75.08ms +[2025-09-02 07:02:14] [Rank 0] step:4201/10000 train_time:315391ms step_avg:75.08ms +[2025-09-02 07:02:16] [Rank 0] step:4221/10000 train_time:316896ms step_avg:75.08ms +[2025-09-02 07:02:16] [Rank 0] step:4221/10000 train_time:316896ms step_avg:75.08ms +[2025-09-02 07:02:18] [Rank 0] step:4241/10000 train_time:318461ms step_avg:75.09ms +[2025-09-02 07:02:18] [Rank 0] step:4241/10000 train_time:318461ms step_avg:75.09ms +[2025-09-02 07:02:19] [Rank 0] step:4261/10000 train_time:320025ms step_avg:75.11ms +[2025-09-02 07:02:19] [Rank 0] step:4261/10000 train_time:320025ms step_avg:75.11ms +[2025-09-02 07:02:21] [Rank 0] step:4281/10000 train_time:321589ms step_avg:75.12ms +[2025-09-02 07:02:21] [Rank 0] step:4281/10000 train_time:321589ms step_avg:75.12ms +[2025-09-02 07:02:22] [Rank 0] step:4301/10000 train_time:323154ms step_avg:75.13ms +[2025-09-02 07:02:22] [Rank 0] step:4301/10000 train_time:323154ms step_avg:75.13ms +[2025-09-02 07:02:24] [Rank 0] step:4321/10000 train_time:324720ms step_avg:75.15ms +[2025-09-02 07:02:24] [Rank 0] step:4321/10000 train_time:324720ms step_avg:75.15ms +[2025-09-02 07:02:25] [Rank 0] step:4341/10000 train_time:326284ms step_avg:75.16ms +[2025-09-02 07:02:25] [Rank 0] step:4341/10000 train_time:326284ms step_avg:75.16ms +[2025-09-02 07:02:27] [Rank 0] step:4361/10000 train_time:327849ms step_avg:75.18ms +[2025-09-02 07:02:27] [Rank 0] step:4361/10000 train_time:327849ms step_avg:75.18ms +[2025-09-02 07:02:28] [Rank 0] step:4381/10000 train_time:329413ms step_avg:75.19ms +[2025-09-02 07:02:28] [Rank 0] step:4381/10000 train_time:329413ms step_avg:75.19ms +[2025-09-02 07:02:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:02:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:02:42] [Rank 0] PRINT: step:4400/10000 val_loss:3.9713 svd_entropy: attn_qk:H=0.7574,top10E=0.28,eRank=158.7,q75/q25=57.38 attn_vo:H=0.8427,top10E=0.14,eRank=299.3,q75/q25=40.70 mlp_w1:H=0.8923,top10E=0.16,eRank=379.7,q75/q25=4.88 mlp_w2:H=0.9700,top10E=0.04,eRank=629.4,q75/q25=2.96 vo_prod:H=0.7391,top10E=0.24,eRank=138.6,q75/q25=1435.59 train_time:331058ms step_avg:75.24ms +[2025-09-02 07:02:42] [Rank 0] PRINT: step:4400/10000 val_loss:3.9713 svd_entropy: attn_qk:H=0.7574,top10E=0.28,eRank=158.7,q75/q25=57.38 attn_vo:H=0.8427,top10E=0.14,eRank=299.3,q75/q25=40.70 mlp_w1:H=0.8923,top10E=0.16,eRank=379.7,q75/q25=4.88 mlp_w2:H=0.9700,top10E=0.04,eRank=629.4,q75/q25=2.96 vo_prod:H=0.7391,top10E=0.24,eRank=138.6,q75/q25=1435.59 train_time:331058ms step_avg:75.24ms +[2025-09-02 07:02:42] [Rank 0] step:4401/10000 train_time:331072ms step_avg:75.23ms +[2025-09-02 07:02:42] [Rank 0] step:4401/10000 train_time:331072ms step_avg:75.23ms +[2025-09-02 07:02:43] [Rank 0] step:4421/10000 train_time:332626ms step_avg:75.24ms +[2025-09-02 07:02:43] [Rank 0] step:4421/10000 train_time:332626ms step_avg:75.24ms +[2025-09-02 07:02:45] [Rank 0] step:4441/10000 train_time:334189ms step_avg:75.25ms +[2025-09-02 07:02:45] [Rank 0] step:4441/10000 train_time:334189ms step_avg:75.25ms +[2025-09-02 07:02:47] [Rank 0] step:4461/10000 train_time:335759ms step_avg:75.27ms +[2025-09-02 07:02:47] [Rank 0] step:4461/10000 train_time:335759ms step_avg:75.27ms +[2025-09-02 07:02:48] [Rank 0] step:4481/10000 train_time:337331ms step_avg:75.28ms +[2025-09-02 07:02:48] [Rank 0] step:4481/10000 train_time:337331ms step_avg:75.28ms +[2025-09-02 07:02:50] [Rank 0] step:4501/10000 train_time:338901ms step_avg:75.29ms +[2025-09-02 07:02:50] [Rank 0] step:4501/10000 train_time:338901ms step_avg:75.29ms +[2025-09-02 07:02:51] [Rank 0] step:4521/10000 train_time:340471ms step_avg:75.31ms +[2025-09-02 07:02:51] [Rank 0] step:4521/10000 train_time:340471ms step_avg:75.31ms +[2025-09-02 07:02:53] [Rank 0] step:4541/10000 train_time:342042ms step_avg:75.32ms +[2025-09-02 07:02:53] [Rank 0] step:4541/10000 train_time:342042ms step_avg:75.32ms +[2025-09-02 07:02:54] [Rank 0] step:4561/10000 train_time:343615ms step_avg:75.34ms +[2025-09-02 07:02:54] [Rank 0] step:4561/10000 train_time:343615ms step_avg:75.34ms +[2025-09-02 07:02:56] [Rank 0] step:4581/10000 train_time:345187ms step_avg:75.35ms +[2025-09-02 07:02:56] [Rank 0] step:4581/10000 train_time:345187ms step_avg:75.35ms +[2025-09-02 07:02:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:02:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:03:09] [Rank 0] PRINT: step:4600/10000 val_loss:3.9452 svd_entropy: attn_qk:H=0.7589,top10E=0.28,eRank=160.2,q75/q25=57.38 attn_vo:H=0.8445,top10E=0.14,eRank=301.9,q75/q25=39.69 mlp_w1:H=0.8942,top10E=0.16,eRank=384.4,q75/q25=4.82 mlp_w2:H=0.9701,top10E=0.04,eRank=629.6,q75/q25=2.95 vo_prod:H=0.7418,top10E=0.24,eRank=141.1,q75/q25=1347.77 train_time:346840ms step_avg:75.40ms +[2025-09-02 07:03:09] [Rank 0] PRINT: step:4600/10000 val_loss:3.9452 svd_entropy: attn_qk:H=0.7589,top10E=0.28,eRank=160.2,q75/q25=57.38 attn_vo:H=0.8445,top10E=0.14,eRank=301.9,q75/q25=39.69 mlp_w1:H=0.8942,top10E=0.16,eRank=384.4,q75/q25=4.82 mlp_w2:H=0.9701,top10E=0.04,eRank=629.6,q75/q25=2.95 vo_prod:H=0.7418,top10E=0.24,eRank=141.1,q75/q25=1347.77 train_time:346840ms step_avg:75.40ms +[2025-09-02 07:03:10] [Rank 0] step:4601/10000 train_time:346854ms step_avg:75.39ms +[2025-09-02 07:03:10] [Rank 0] step:4601/10000 train_time:346854ms step_avg:75.39ms +[2025-09-02 07:03:11] [Rank 0] step:4621/10000 train_time:348354ms step_avg:75.38ms +[2025-09-02 07:03:11] [Rank 0] step:4621/10000 train_time:348354ms step_avg:75.38ms +[2025-09-02 07:03:13] [Rank 0] step:4641/10000 train_time:349925ms step_avg:75.40ms +[2025-09-02 07:03:13] [Rank 0] step:4641/10000 train_time:349925ms step_avg:75.40ms +[2025-09-02 07:03:14] [Rank 0] step:4661/10000 train_time:351500ms step_avg:75.41ms +[2025-09-02 07:03:14] [Rank 0] step:4661/10000 train_time:351500ms step_avg:75.41ms +[2025-09-02 07:03:16] [Rank 0] step:4681/10000 train_time:353073ms step_avg:75.43ms +[2025-09-02 07:03:16] [Rank 0] step:4681/10000 train_time:353073ms step_avg:75.43ms +[2025-09-02 07:03:17] [Rank 0] step:4701/10000 train_time:354647ms step_avg:75.44ms +[2025-09-02 07:03:17] [Rank 0] step:4701/10000 train_time:354647ms step_avg:75.44ms +[2025-09-02 07:03:19] [Rank 0] step:4721/10000 train_time:356219ms step_avg:75.45ms +[2025-09-02 07:03:19] [Rank 0] step:4721/10000 train_time:356219ms step_avg:75.45ms +[2025-09-02 07:03:21] [Rank 0] step:4741/10000 train_time:357794ms step_avg:75.47ms +[2025-09-02 07:03:21] [Rank 0] step:4741/10000 train_time:357794ms step_avg:75.47ms +[2025-09-02 07:03:22] [Rank 0] step:4761/10000 train_time:359367ms step_avg:75.48ms +[2025-09-02 07:03:22] [Rank 0] step:4761/10000 train_time:359367ms step_avg:75.48ms +[2025-09-02 07:03:24] [Rank 0] step:4781/10000 train_time:360941ms step_avg:75.49ms +[2025-09-02 07:03:24] [Rank 0] step:4781/10000 train_time:360941ms step_avg:75.49ms +[2025-09-02 07:03:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:03:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:03:37] [Rank 0] PRINT: step:4800/10000 val_loss:3.9323 svd_entropy: attn_qk:H=0.7604,top10E=0.28,eRank=161.6,q75/q25=57.09 attn_vo:H=0.8461,top10E=0.14,eRank=304.4,q75/q25=38.77 mlp_w1:H=0.8959,top10E=0.15,eRank=388.7,q75/q25=4.76 mlp_w2:H=0.9701,top10E=0.04,eRank=629.7,q75/q25=2.95 vo_prod:H=0.7444,top10E=0.23,eRank=143.6,q75/q25=1254.86 train_time:362597ms step_avg:75.54ms +[2025-09-02 07:03:37] [Rank 0] PRINT: step:4800/10000 val_loss:3.9323 svd_entropy: attn_qk:H=0.7604,top10E=0.28,eRank=161.6,q75/q25=57.09 attn_vo:H=0.8461,top10E=0.14,eRank=304.4,q75/q25=38.77 mlp_w1:H=0.8959,top10E=0.15,eRank=388.7,q75/q25=4.76 mlp_w2:H=0.9701,top10E=0.04,eRank=629.7,q75/q25=2.95 vo_prod:H=0.7444,top10E=0.23,eRank=143.6,q75/q25=1254.86 train_time:362597ms step_avg:75.54ms +[2025-09-02 07:03:37] [Rank 0] step:4801/10000 train_time:362610ms step_avg:75.53ms +[2025-09-02 07:03:37] [Rank 0] step:4801/10000 train_time:362610ms step_avg:75.53ms +[2025-09-02 07:03:39] [Rank 0] step:4821/10000 train_time:364111ms step_avg:75.53ms +[2025-09-02 07:03:39] [Rank 0] step:4821/10000 train_time:364111ms step_avg:75.53ms +[2025-09-02 07:03:40] [Rank 0] step:4841/10000 train_time:365682ms step_avg:75.54ms +[2025-09-02 07:03:40] [Rank 0] step:4841/10000 train_time:365682ms step_avg:75.54ms +[2025-09-02 07:03:42] [Rank 0] step:4861/10000 train_time:367257ms step_avg:75.55ms +[2025-09-02 07:03:42] [Rank 0] step:4861/10000 train_time:367257ms step_avg:75.55ms +[2025-09-02 07:03:44] [Rank 0] step:4881/10000 train_time:368831ms step_avg:75.56ms +[2025-09-02 07:03:44] [Rank 0] step:4881/10000 train_time:368831ms step_avg:75.56ms +[2025-09-02 07:03:45] [Rank 0] step:4901/10000 train_time:370403ms step_avg:75.58ms +[2025-09-02 07:03:45] [Rank 0] step:4901/10000 train_time:370403ms step_avg:75.58ms +[2025-09-02 07:03:47] [Rank 0] step:4921/10000 train_time:371979ms step_avg:75.59ms +[2025-09-02 07:03:47] [Rank 0] step:4921/10000 train_time:371979ms step_avg:75.59ms +[2025-09-02 07:03:48] [Rank 0] step:4941/10000 train_time:373555ms step_avg:75.60ms +[2025-09-02 07:03:48] [Rank 0] step:4941/10000 train_time:373555ms step_avg:75.60ms +[2025-09-02 07:03:50] [Rank 0] step:4961/10000 train_time:375128ms step_avg:75.62ms +[2025-09-02 07:03:50] [Rank 0] step:4961/10000 train_time:375128ms step_avg:75.62ms +[2025-09-02 07:03:51] [Rank 0] step:4981/10000 train_time:376702ms step_avg:75.63ms +[2025-09-02 07:03:51] [Rank 0] step:4981/10000 train_time:376702ms step_avg:75.63ms +[2025-09-02 07:03:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:03:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:04:05] [Rank 0] PRINT: step:5000/10000 val_loss:3.9120 svd_entropy: attn_qk:H=0.7617,top10E=0.28,eRank=162.9,q75/q25=57.07 attn_vo:H=0.8474,top10E=0.14,eRank=306.5,q75/q25=38.04 mlp_w1:H=0.8975,top10E=0.15,eRank=392.7,q75/q25=4.72 mlp_w2:H=0.9701,top10E=0.04,eRank=629.8,q75/q25=2.95 vo_prod:H=0.7463,top10E=0.23,eRank=145.5,q75/q25=1152.82 train_time:378361ms step_avg:75.67ms +[2025-09-02 07:04:05] [Rank 0] PRINT: step:5000/10000 val_loss:3.9120 svd_entropy: attn_qk:H=0.7617,top10E=0.28,eRank=162.9,q75/q25=57.07 attn_vo:H=0.8474,top10E=0.14,eRank=306.5,q75/q25=38.04 mlp_w1:H=0.8975,top10E=0.15,eRank=392.7,q75/q25=4.72 mlp_w2:H=0.9701,top10E=0.04,eRank=629.8,q75/q25=2.95 vo_prod:H=0.7463,top10E=0.23,eRank=145.5,q75/q25=1152.82 train_time:378361ms step_avg:75.67ms +[2025-09-02 07:04:05] [Rank 0] step:5001/10000 train_time:378375ms step_avg:75.66ms +[2025-09-02 07:04:05] [Rank 0] step:5001/10000 train_time:378375ms step_avg:75.66ms +[2025-09-02 07:04:07] [Rank 0] step:5021/10000 train_time:379886ms step_avg:75.66ms +[2025-09-02 07:04:07] [Rank 0] step:5021/10000 train_time:379886ms step_avg:75.66ms +[2025-09-02 07:04:08] [Rank 0] step:5041/10000 train_time:381459ms step_avg:75.67ms +[2025-09-02 07:04:08] [Rank 0] step:5041/10000 train_time:381459ms step_avg:75.67ms +[2025-09-02 07:04:10] [Rank 0] step:5061/10000 train_time:383027ms step_avg:75.68ms +[2025-09-02 07:04:10] [Rank 0] step:5061/10000 train_time:383027ms step_avg:75.68ms +[2025-09-02 07:04:11] [Rank 0] step:5081/10000 train_time:384598ms step_avg:75.69ms +[2025-09-02 07:04:11] [Rank 0] step:5081/10000 train_time:384598ms step_avg:75.69ms +[2025-09-02 07:04:13] [Rank 0] step:5101/10000 train_time:386169ms step_avg:75.70ms +[2025-09-02 07:04:13] [Rank 0] step:5101/10000 train_time:386169ms step_avg:75.70ms +[2025-09-02 07:04:14] [Rank 0] step:5121/10000 train_time:387741ms step_avg:75.72ms +[2025-09-02 07:04:14] [Rank 0] step:5121/10000 train_time:387741ms step_avg:75.72ms +[2025-09-02 07:04:16] [Rank 0] step:5141/10000 train_time:389316ms step_avg:75.73ms +[2025-09-02 07:04:16] [Rank 0] step:5141/10000 train_time:389316ms step_avg:75.73ms +[2025-09-02 07:04:18] [Rank 0] step:5161/10000 train_time:390887ms step_avg:75.74ms +[2025-09-02 07:04:18] [Rank 0] step:5161/10000 train_time:390887ms step_avg:75.74ms +[2025-09-02 07:04:19] [Rank 0] step:5181/10000 train_time:392462ms step_avg:75.75ms +[2025-09-02 07:04:19] [Rank 0] step:5181/10000 train_time:392462ms step_avg:75.75ms +[2025-09-02 07:04:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:04:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:04:33] [Rank 0] PRINT: step:5200/10000 val_loss:3.8940 svd_entropy: attn_qk:H=0.7631,top10E=0.27,eRank=164.3,q75/q25=56.68 attn_vo:H=0.8489,top10E=0.14,eRank=308.7,q75/q25=37.05 mlp_w1:H=0.8991,top10E=0.15,eRank=396.6,q75/q25=4.67 mlp_w2:H=0.9701,top10E=0.04,eRank=629.8,q75/q25=2.94 vo_prod:H=0.7486,top10E=0.23,eRank=147.7,q75/q25=1063.04 train_time:394142ms step_avg:75.80ms +[2025-09-02 07:04:33] [Rank 0] PRINT: step:5200/10000 val_loss:3.8940 svd_entropy: attn_qk:H=0.7631,top10E=0.27,eRank=164.3,q75/q25=56.68 attn_vo:H=0.8489,top10E=0.14,eRank=308.7,q75/q25=37.05 mlp_w1:H=0.8991,top10E=0.15,eRank=396.6,q75/q25=4.67 mlp_w2:H=0.9701,top10E=0.04,eRank=629.8,q75/q25=2.94 vo_prod:H=0.7486,top10E=0.23,eRank=147.7,q75/q25=1063.04 train_time:394142ms step_avg:75.80ms +[2025-09-02 07:04:33] [Rank 0] step:5201/10000 train_time:394156ms step_avg:75.78ms +[2025-09-02 07:04:33] [Rank 0] step:5201/10000 train_time:394156ms step_avg:75.78ms +[2025-09-02 07:04:34] [Rank 0] step:5221/10000 train_time:395681ms step_avg:75.79ms +[2025-09-02 07:04:34] [Rank 0] step:5221/10000 train_time:395681ms step_avg:75.79ms +[2025-09-02 07:04:36] [Rank 0] step:5241/10000 train_time:397281ms step_avg:75.80ms +[2025-09-02 07:04:36] [Rank 0] step:5241/10000 train_time:397281ms step_avg:75.80ms +[2025-09-02 07:04:38] [Rank 0] step:5261/10000 train_time:398882ms step_avg:75.82ms +[2025-09-02 07:04:38] [Rank 0] step:5261/10000 train_time:398882ms step_avg:75.82ms +[2025-09-02 07:04:39] [Rank 0] step:5281/10000 train_time:400483ms step_avg:75.83ms +[2025-09-02 07:04:39] [Rank 0] step:5281/10000 train_time:400483ms step_avg:75.83ms +[2025-09-02 07:04:41] [Rank 0] step:5301/10000 train_time:402093ms step_avg:75.85ms +[2025-09-02 07:04:41] [Rank 0] step:5301/10000 train_time:402093ms step_avg:75.85ms +[2025-09-02 07:04:42] [Rank 0] step:5321/10000 train_time:403694ms step_avg:75.87ms +[2025-09-02 07:04:42] [Rank 0] step:5321/10000 train_time:403694ms step_avg:75.87ms +[2025-09-02 07:04:44] [Rank 0] step:5341/10000 train_time:405296ms step_avg:75.88ms +[2025-09-02 07:04:44] [Rank 0] step:5341/10000 train_time:405296ms step_avg:75.88ms +[2025-09-02 07:04:46] [Rank 0] step:5361/10000 train_time:406908ms step_avg:75.90ms +[2025-09-02 07:04:46] [Rank 0] step:5361/10000 train_time:406908ms step_avg:75.90ms +[2025-09-02 07:04:47] [Rank 0] step:5381/10000 train_time:408515ms step_avg:75.92ms +[2025-09-02 07:04:47] [Rank 0] step:5381/10000 train_time:408515ms step_avg:75.92ms +[2025-09-02 07:04:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:04:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:05:00] [Rank 0] PRINT: step:5400/10000 val_loss:3.8770 svd_entropy: attn_qk:H=0.7641,top10E=0.27,eRank=165.3,q75/q25=56.12 attn_vo:H=0.8500,top10E=0.13,eRank=310.6,q75/q25=36.09 mlp_w1:H=0.9006,top10E=0.15,eRank=400.4,q75/q25=4.63 mlp_w2:H=0.9702,top10E=0.04,eRank=629.9,q75/q25=2.94 vo_prod:H=0.7503,top10E=0.23,eRank=149.4,q75/q25=991.88 train_time:410201ms step_avg:75.96ms +[2025-09-02 07:05:00] [Rank 0] PRINT: step:5400/10000 val_loss:3.8770 svd_entropy: attn_qk:H=0.7641,top10E=0.27,eRank=165.3,q75/q25=56.12 attn_vo:H=0.8500,top10E=0.13,eRank=310.6,q75/q25=36.09 mlp_w1:H=0.9006,top10E=0.15,eRank=400.4,q75/q25=4.63 mlp_w2:H=0.9702,top10E=0.04,eRank=629.9,q75/q25=2.94 vo_prod:H=0.7503,top10E=0.23,eRank=149.4,q75/q25=991.88 train_time:410201ms step_avg:75.96ms +[2025-09-02 07:05:01] [Rank 0] step:5401/10000 train_time:410214ms step_avg:75.95ms +[2025-09-02 07:05:01] [Rank 0] step:5401/10000 train_time:410214ms step_avg:75.95ms +[2025-09-02 07:05:02] [Rank 0] step:5421/10000 train_time:411768ms step_avg:75.96ms +[2025-09-02 07:05:02] [Rank 0] step:5421/10000 train_time:411768ms step_avg:75.96ms +[2025-09-02 07:05:04] [Rank 0] step:5441/10000 train_time:413367ms step_avg:75.97ms +[2025-09-02 07:05:04] [Rank 0] step:5441/10000 train_time:413367ms step_avg:75.97ms +[2025-09-02 07:05:05] [Rank 0] step:5461/10000 train_time:414975ms step_avg:75.99ms +[2025-09-02 07:05:05] [Rank 0] step:5461/10000 train_time:414975ms step_avg:75.99ms +[2025-09-02 07:05:07] [Rank 0] step:5481/10000 train_time:416579ms step_avg:76.00ms +[2025-09-02 07:05:07] [Rank 0] step:5481/10000 train_time:416579ms step_avg:76.00ms +[2025-09-02 07:05:09] [Rank 0] step:5501/10000 train_time:418190ms step_avg:76.02ms +[2025-09-02 07:05:09] [Rank 0] step:5501/10000 train_time:418190ms step_avg:76.02ms +[2025-09-02 07:05:10] [Rank 0] step:5521/10000 train_time:419798ms step_avg:76.04ms +[2025-09-02 07:05:10] [Rank 0] step:5521/10000 train_time:419798ms step_avg:76.04ms +[2025-09-02 07:05:12] [Rank 0] step:5541/10000 train_time:421403ms step_avg:76.05ms +[2025-09-02 07:05:12] [Rank 0] step:5541/10000 train_time:421403ms step_avg:76.05ms +[2025-09-02 07:05:13] [Rank 0] step:5561/10000 train_time:423010ms step_avg:76.07ms +[2025-09-02 07:05:13] [Rank 0] step:5561/10000 train_time:423010ms step_avg:76.07ms +[2025-09-02 07:05:15] [Rank 0] step:5581/10000 train_time:424614ms step_avg:76.08ms +[2025-09-02 07:05:15] [Rank 0] step:5581/10000 train_time:424614ms step_avg:76.08ms +[2025-09-02 07:05:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:05:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:05:29] [Rank 0] PRINT: step:5600/10000 val_loss:3.8636 svd_entropy: attn_qk:H=0.7654,top10E=0.27,eRank=166.7,q75/q25=55.97 attn_vo:H=0.8512,top10E=0.13,eRank=312.4,q75/q25=35.23 mlp_w1:H=0.9019,top10E=0.15,eRank=403.8,q75/q25=4.59 mlp_w2:H=0.9702,top10E=0.04,eRank=630.0,q75/q25=2.93 vo_prod:H=0.7521,top10E=0.22,eRank=151.2,q75/q25=917.55 train_time:426302ms step_avg:76.13ms +[2025-09-02 07:05:29] [Rank 0] PRINT: step:5600/10000 val_loss:3.8636 svd_entropy: attn_qk:H=0.7654,top10E=0.27,eRank=166.7,q75/q25=55.97 attn_vo:H=0.8512,top10E=0.13,eRank=312.4,q75/q25=35.23 mlp_w1:H=0.9019,top10E=0.15,eRank=403.8,q75/q25=4.59 mlp_w2:H=0.9702,top10E=0.04,eRank=630.0,q75/q25=2.93 vo_prod:H=0.7521,top10E=0.22,eRank=151.2,q75/q25=917.55 train_time:426302ms step_avg:76.13ms +[2025-09-02 07:05:29] [Rank 0] step:5601/10000 train_time:426316ms step_avg:76.11ms +[2025-09-02 07:05:29] [Rank 0] step:5601/10000 train_time:426316ms step_avg:76.11ms +[2025-09-02 07:05:30] [Rank 0] step:5621/10000 train_time:427856ms step_avg:76.12ms +[2025-09-02 07:05:30] [Rank 0] step:5621/10000 train_time:427856ms step_avg:76.12ms +[2025-09-02 07:05:32] [Rank 0] step:5641/10000 train_time:429457ms step_avg:76.13ms +[2025-09-02 07:05:32] [Rank 0] step:5641/10000 train_time:429457ms step_avg:76.13ms +[2025-09-02 07:05:33] [Rank 0] step:5661/10000 train_time:431058ms step_avg:76.15ms +[2025-09-02 07:05:33] [Rank 0] step:5661/10000 train_time:431058ms step_avg:76.15ms +[2025-09-02 07:05:35] [Rank 0] step:5681/10000 train_time:432666ms step_avg:76.16ms +[2025-09-02 07:05:35] [Rank 0] step:5681/10000 train_time:432666ms step_avg:76.16ms +[2025-09-02 07:05:37] [Rank 0] step:5701/10000 train_time:434266ms step_avg:76.17ms +[2025-09-02 07:05:37] [Rank 0] step:5701/10000 train_time:434266ms step_avg:76.17ms +[2025-09-02 07:05:38] [Rank 0] step:5721/10000 train_time:435873ms step_avg:76.19ms +[2025-09-02 07:05:38] [Rank 0] step:5721/10000 train_time:435873ms step_avg:76.19ms +[2025-09-02 07:05:40] [Rank 0] step:5741/10000 train_time:437475ms step_avg:76.20ms +[2025-09-02 07:05:40] [Rank 0] step:5741/10000 train_time:437475ms step_avg:76.20ms +[2025-09-02 07:05:41] [Rank 0] step:5761/10000 train_time:439080ms step_avg:76.22ms +[2025-09-02 07:05:41] [Rank 0] step:5761/10000 train_time:439080ms step_avg:76.22ms +[2025-09-02 07:05:43] [Rank 0] step:5781/10000 train_time:440690ms step_avg:76.23ms +[2025-09-02 07:05:43] [Rank 0] step:5781/10000 train_time:440690ms step_avg:76.23ms +[2025-09-02 07:05:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:05:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:05:57] [Rank 0] PRINT: step:5800/10000 val_loss:3.8528 svd_entropy: attn_qk:H=0.7666,top10E=0.27,eRank=167.9,q75/q25=55.38 attn_vo:H=0.8522,top10E=0.13,eRank=314.1,q75/q25=34.69 mlp_w1:H=0.9031,top10E=0.14,eRank=407.0,q75/q25=4.56 mlp_w2:H=0.9702,top10E=0.04,eRank=630.1,q75/q25=2.92 vo_prod:H=0.7536,top10E=0.22,eRank=152.7,q75/q25=863.99 train_time:442378ms step_avg:76.27ms +[2025-09-02 07:05:57] [Rank 0] PRINT: step:5800/10000 val_loss:3.8528 svd_entropy: attn_qk:H=0.7666,top10E=0.27,eRank=167.9,q75/q25=55.38 attn_vo:H=0.8522,top10E=0.13,eRank=314.1,q75/q25=34.69 mlp_w1:H=0.9031,top10E=0.14,eRank=407.0,q75/q25=4.56 mlp_w2:H=0.9702,top10E=0.04,eRank=630.1,q75/q25=2.92 vo_prod:H=0.7536,top10E=0.22,eRank=152.7,q75/q25=863.99 train_time:442378ms step_avg:76.27ms +[2025-09-02 07:05:57] [Rank 0] step:5801/10000 train_time:442392ms step_avg:76.26ms +[2025-09-02 07:05:57] [Rank 0] step:5801/10000 train_time:442392ms step_avg:76.26ms +[2025-09-02 07:05:58] [Rank 0] step:5821/10000 train_time:443929ms step_avg:76.26ms +[2025-09-02 07:05:58] [Rank 0] step:5821/10000 train_time:443929ms step_avg:76.26ms +[2025-09-02 07:06:00] [Rank 0] step:5841/10000 train_time:445532ms step_avg:76.28ms +[2025-09-02 07:06:00] [Rank 0] step:5841/10000 train_time:445532ms step_avg:76.28ms +[2025-09-02 07:06:02] [Rank 0] step:5861/10000 train_time:447142ms step_avg:76.29ms +[2025-09-02 07:06:02] [Rank 0] step:5861/10000 train_time:447142ms step_avg:76.29ms +[2025-09-02 07:06:03] [Rank 0] step:5881/10000 train_time:448747ms step_avg:76.30ms +[2025-09-02 07:06:03] [Rank 0] step:5881/10000 train_time:448747ms step_avg:76.30ms +[2025-09-02 07:06:05] [Rank 0] step:5901/10000 train_time:450355ms step_avg:76.32ms +[2025-09-02 07:06:05] [Rank 0] step:5901/10000 train_time:450355ms step_avg:76.32ms +[2025-09-02 07:06:06] [Rank 0] step:5921/10000 train_time:451959ms step_avg:76.33ms +[2025-09-02 07:06:06] [Rank 0] step:5921/10000 train_time:451959ms step_avg:76.33ms +[2025-09-02 07:06:08] [Rank 0] step:5941/10000 train_time:453569ms step_avg:76.35ms +[2025-09-02 07:06:08] [Rank 0] step:5941/10000 train_time:453569ms step_avg:76.35ms +[2025-09-02 07:06:10] [Rank 0] step:5961/10000 train_time:455181ms step_avg:76.36ms +[2025-09-02 07:06:10] [Rank 0] step:5961/10000 train_time:455181ms step_avg:76.36ms +[2025-09-02 07:06:11] [Rank 0] step:5981/10000 train_time:456791ms step_avg:76.37ms +[2025-09-02 07:06:11] [Rank 0] step:5981/10000 train_time:456791ms step_avg:76.37ms +[2025-09-02 07:06:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:06:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:06:25] [Rank 0] PRINT: step:6000/10000 val_loss:3.8296 svd_entropy: attn_qk:H=0.7677,top10E=0.27,eRank=169.1,q75/q25=54.82 attn_vo:H=0.8532,top10E=0.13,eRank=315.7,q75/q25=33.67 mlp_w1:H=0.9043,top10E=0.14,eRank=410.3,q75/q25=4.52 mlp_w2:H=0.9702,top10E=0.04,eRank=630.1,q75/q25=2.93 vo_prod:H=0.7550,top10E=0.22,eRank=154.1,q75/q25=820.50 train_time:458479ms step_avg:76.41ms +[2025-09-02 07:06:25] [Rank 0] PRINT: step:6000/10000 val_loss:3.8296 svd_entropy: attn_qk:H=0.7677,top10E=0.27,eRank=169.1,q75/q25=54.82 attn_vo:H=0.8532,top10E=0.13,eRank=315.7,q75/q25=33.67 mlp_w1:H=0.9043,top10E=0.14,eRank=410.3,q75/q25=4.52 mlp_w2:H=0.9702,top10E=0.04,eRank=630.1,q75/q25=2.93 vo_prod:H=0.7550,top10E=0.22,eRank=154.1,q75/q25=820.50 train_time:458479ms step_avg:76.41ms +[2025-09-02 07:06:25] [Rank 0] step:6001/10000 train_time:458492ms step_avg:76.40ms +[2025-09-02 07:06:25] [Rank 0] step:6001/10000 train_time:458492ms step_avg:76.40ms +[2025-09-02 07:06:26] [Rank 0] step:6021/10000 train_time:460035ms step_avg:76.41ms +[2025-09-02 07:06:26] [Rank 0] step:6021/10000 train_time:460035ms step_avg:76.41ms +[2025-09-02 07:06:28] [Rank 0] step:6041/10000 train_time:461640ms step_avg:76.42ms +[2025-09-02 07:06:28] [Rank 0] step:6041/10000 train_time:461640ms step_avg:76.42ms +[2025-09-02 07:06:30] [Rank 0] step:6061/10000 train_time:463253ms step_avg:76.43ms +[2025-09-02 07:06:30] [Rank 0] step:6061/10000 train_time:463253ms step_avg:76.43ms +[2025-09-02 07:06:31] [Rank 0] step:6081/10000 train_time:464862ms step_avg:76.44ms +[2025-09-02 07:06:31] [Rank 0] step:6081/10000 train_time:464862ms step_avg:76.44ms +[2025-09-02 07:06:33] [Rank 0] step:6101/10000 train_time:466472ms step_avg:76.46ms +[2025-09-02 07:06:33] [Rank 0] step:6101/10000 train_time:466472ms step_avg:76.46ms +[2025-09-02 07:06:35] [Rank 0] step:6121/10000 train_time:468143ms step_avg:76.48ms +[2025-09-02 07:06:35] [Rank 0] step:6121/10000 train_time:468143ms step_avg:76.48ms +[2025-09-02 07:06:36] [Rank 0] step:6141/10000 train_time:469762ms step_avg:76.50ms +[2025-09-02 07:06:36] [Rank 0] step:6141/10000 train_time:469762ms step_avg:76.50ms +[2025-09-02 07:06:38] [Rank 0] step:6161/10000 train_time:471371ms step_avg:76.51ms +[2025-09-02 07:06:38] [Rank 0] step:6161/10000 train_time:471371ms step_avg:76.51ms +[2025-09-02 07:06:39] [Rank 0] step:6181/10000 train_time:472976ms step_avg:76.52ms +[2025-09-02 07:06:39] [Rank 0] step:6181/10000 train_time:472976ms step_avg:76.52ms +[2025-09-02 07:06:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:06:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:06:53] [Rank 0] PRINT: step:6200/10000 val_loss:3.8156 svd_entropy: attn_qk:H=0.7688,top10E=0.27,eRank=170.2,q75/q25=54.65 attn_vo:H=0.8541,top10E=0.13,eRank=317.1,q75/q25=33.10 mlp_w1:H=0.9054,top10E=0.14,eRank=413.1,q75/q25=4.49 mlp_w2:H=0.9702,top10E=0.04,eRank=630.1,q75/q25=2.93 vo_prod:H=0.7561,top10E=0.22,eRank=155.3,q75/q25=778.35 train_time:474667ms step_avg:76.56ms +[2025-09-02 07:06:53] [Rank 0] PRINT: step:6200/10000 val_loss:3.8156 svd_entropy: attn_qk:H=0.7688,top10E=0.27,eRank=170.2,q75/q25=54.65 attn_vo:H=0.8541,top10E=0.13,eRank=317.1,q75/q25=33.10 mlp_w1:H=0.9054,top10E=0.14,eRank=413.1,q75/q25=4.49 mlp_w2:H=0.9702,top10E=0.04,eRank=630.1,q75/q25=2.93 vo_prod:H=0.7561,top10E=0.22,eRank=155.3,q75/q25=778.35 train_time:474667ms step_avg:76.56ms +[2025-09-02 07:06:53] [Rank 0] step:6201/10000 train_time:474680ms step_avg:76.55ms +[2025-09-02 07:06:53] [Rank 0] step:6201/10000 train_time:474680ms step_avg:76.55ms +[2025-09-02 07:06:55] [Rank 0] step:6221/10000 train_time:476228ms step_avg:76.55ms +[2025-09-02 07:06:55] [Rank 0] step:6221/10000 train_time:476228ms step_avg:76.55ms +[2025-09-02 07:06:56] [Rank 0] step:6241/10000 train_time:477880ms step_avg:76.57ms +[2025-09-02 07:06:56] [Rank 0] step:6241/10000 train_time:477880ms step_avg:76.57ms +[2025-09-02 07:06:58] [Rank 0] step:6261/10000 train_time:479489ms step_avg:76.58ms +[2025-09-02 07:06:58] [Rank 0] step:6261/10000 train_time:479489ms step_avg:76.58ms +[2025-09-02 07:07:00] [Rank 0] step:6281/10000 train_time:481100ms step_avg:76.60ms +[2025-09-02 07:07:00] [Rank 0] step:6281/10000 train_time:481100ms step_avg:76.60ms +[2025-09-02 07:07:01] [Rank 0] step:6301/10000 train_time:482712ms step_avg:76.61ms +[2025-09-02 07:07:01] [Rank 0] step:6301/10000 train_time:482712ms step_avg:76.61ms +[2025-09-02 07:07:03] [Rank 0] step:6321/10000 train_time:484320ms step_avg:76.62ms +[2025-09-02 07:07:03] [Rank 0] step:6321/10000 train_time:484320ms step_avg:76.62ms +[2025-09-02 07:07:04] [Rank 0] step:6341/10000 train_time:485934ms step_avg:76.63ms +[2025-09-02 07:07:04] [Rank 0] step:6341/10000 train_time:485934ms step_avg:76.63ms +[2025-09-02 07:07:06] [Rank 0] step:6361/10000 train_time:487549ms step_avg:76.65ms +[2025-09-02 07:07:06] [Rank 0] step:6361/10000 train_time:487549ms step_avg:76.65ms +[2025-09-02 07:07:08] [Rank 0] step:6381/10000 train_time:489161ms step_avg:76.66ms +[2025-09-02 07:07:08] [Rank 0] step:6381/10000 train_time:489161ms step_avg:76.66ms +[2025-09-02 07:07:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:07:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:07:21] [Rank 0] PRINT: step:6400/10000 val_loss:3.7979 svd_entropy: attn_qk:H=0.7698,top10E=0.27,eRank=171.3,q75/q25=54.42 attn_vo:H=0.8548,top10E=0.13,eRank=318.4,q75/q25=32.65 mlp_w1:H=0.9064,top10E=0.14,eRank=415.7,q75/q25=4.47 mlp_w2:H=0.9702,top10E=0.04,eRank=630.1,q75/q25=2.93 vo_prod:H=0.7571,top10E=0.22,eRank=156.4,q75/q25=742.53 train_time:490851ms step_avg:76.70ms +[2025-09-02 07:07:21] [Rank 0] PRINT: step:6400/10000 val_loss:3.7979 svd_entropy: attn_qk:H=0.7698,top10E=0.27,eRank=171.3,q75/q25=54.42 attn_vo:H=0.8548,top10E=0.13,eRank=318.4,q75/q25=32.65 mlp_w1:H=0.9064,top10E=0.14,eRank=415.7,q75/q25=4.47 mlp_w2:H=0.9702,top10E=0.04,eRank=630.1,q75/q25=2.93 vo_prod:H=0.7571,top10E=0.22,eRank=156.4,q75/q25=742.53 train_time:490851ms step_avg:76.70ms +[2025-09-02 07:07:21] [Rank 0] step:6401/10000 train_time:490864ms step_avg:76.69ms +[2025-09-02 07:07:21] [Rank 0] step:6401/10000 train_time:490864ms step_avg:76.69ms +[2025-09-02 07:07:23] [Rank 0] step:6421/10000 train_time:492399ms step_avg:76.69ms +[2025-09-02 07:07:23] [Rank 0] step:6421/10000 train_time:492399ms step_avg:76.69ms +[2025-09-02 07:07:24] [Rank 0] step:6441/10000 train_time:494007ms step_avg:76.70ms +[2025-09-02 07:07:24] [Rank 0] step:6441/10000 train_time:494007ms step_avg:76.70ms +[2025-09-02 07:07:26] [Rank 0] step:6461/10000 train_time:495619ms step_avg:76.71ms +[2025-09-02 07:07:26] [Rank 0] step:6461/10000 train_time:495619ms step_avg:76.71ms +[2025-09-02 07:07:28] [Rank 0] step:6481/10000 train_time:497237ms step_avg:76.72ms +[2025-09-02 07:07:28] [Rank 0] step:6481/10000 train_time:497237ms step_avg:76.72ms +[2025-09-02 07:07:29] [Rank 0] step:6501/10000 train_time:498846ms step_avg:76.73ms +[2025-09-02 07:07:29] [Rank 0] step:6501/10000 train_time:498846ms step_avg:76.73ms +[2025-09-02 07:07:31] [Rank 0] step:6521/10000 train_time:500451ms step_avg:76.74ms +[2025-09-02 07:07:31] [Rank 0] step:6521/10000 train_time:500451ms step_avg:76.74ms +[2025-09-02 07:07:32] [Rank 0] step:6541/10000 train_time:502061ms step_avg:76.76ms +[2025-09-02 07:07:32] [Rank 0] step:6541/10000 train_time:502061ms step_avg:76.76ms +[2025-09-02 07:07:34] [Rank 0] step:6561/10000 train_time:503674ms step_avg:76.77ms +[2025-09-02 07:07:34] [Rank 0] step:6561/10000 train_time:503674ms step_avg:76.77ms +[2025-09-02 07:07:36] [Rank 0] step:6581/10000 train_time:505281ms step_avg:76.78ms +[2025-09-02 07:07:36] [Rank 0] step:6581/10000 train_time:505281ms step_avg:76.78ms +[2025-09-02 07:07:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:07:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:07:49] [Rank 0] PRINT: step:6600/10000 val_loss:3.7838 svd_entropy: attn_qk:H=0.7706,top10E=0.27,eRank=172.1,q75/q25=54.34 attn_vo:H=0.8556,top10E=0.13,eRank=319.7,q75/q25=32.14 mlp_w1:H=0.9073,top10E=0.14,eRank=418.2,q75/q25=4.44 mlp_w2:H=0.9702,top10E=0.04,eRank=630.1,q75/q25=2.92 vo_prod:H=0.7585,top10E=0.22,eRank=157.7,q75/q25=701.98 train_time:506977ms step_avg:76.81ms +[2025-09-02 07:07:49] [Rank 0] PRINT: step:6600/10000 val_loss:3.7838 svd_entropy: attn_qk:H=0.7706,top10E=0.27,eRank=172.1,q75/q25=54.34 attn_vo:H=0.8556,top10E=0.13,eRank=319.7,q75/q25=32.14 mlp_w1:H=0.9073,top10E=0.14,eRank=418.2,q75/q25=4.44 mlp_w2:H=0.9702,top10E=0.04,eRank=630.1,q75/q25=2.92 vo_prod:H=0.7585,top10E=0.22,eRank=157.7,q75/q25=701.98 train_time:506977ms step_avg:76.81ms +[2025-09-02 07:07:49] [Rank 0] step:6601/10000 train_time:506990ms step_avg:76.81ms +[2025-09-02 07:07:49] [Rank 0] step:6601/10000 train_time:506990ms step_avg:76.81ms +[2025-09-02 07:07:51] [Rank 0] step:6621/10000 train_time:508522ms step_avg:76.80ms +[2025-09-02 07:07:51] [Rank 0] step:6621/10000 train_time:508522ms step_avg:76.80ms +[2025-09-02 07:07:53] [Rank 0] step:6641/10000 train_time:510139ms step_avg:76.82ms +[2025-09-02 07:07:53] [Rank 0] step:6641/10000 train_time:510139ms step_avg:76.82ms +[2025-09-02 07:07:54] [Rank 0] step:6661/10000 train_time:511747ms step_avg:76.83ms +[2025-09-02 07:07:54] [Rank 0] step:6661/10000 train_time:511747ms step_avg:76.83ms +[2025-09-02 07:07:56] [Rank 0] step:6681/10000 train_time:513372ms step_avg:76.84ms +[2025-09-02 07:07:56] [Rank 0] step:6681/10000 train_time:513372ms step_avg:76.84ms +[2025-09-02 07:07:57] [Rank 0] step:6701/10000 train_time:515013ms step_avg:76.86ms +[2025-09-02 07:07:57] [Rank 0] step:6701/10000 train_time:515013ms step_avg:76.86ms +[2025-09-02 07:07:59] [Rank 0] step:6721/10000 train_time:516651ms step_avg:76.87ms +[2025-09-02 07:07:59] [Rank 0] step:6721/10000 train_time:516651ms step_avg:76.87ms +[2025-09-02 07:08:01] [Rank 0] step:6741/10000 train_time:518287ms step_avg:76.89ms +[2025-09-02 07:08:01] [Rank 0] step:6741/10000 train_time:518287ms step_avg:76.89ms +[2025-09-02 07:08:02] [Rank 0] step:6761/10000 train_time:519925ms step_avg:76.90ms +[2025-09-02 07:08:02] [Rank 0] step:6761/10000 train_time:519925ms step_avg:76.90ms +[2025-09-02 07:08:04] [Rank 0] step:6781/10000 train_time:521569ms step_avg:76.92ms +[2025-09-02 07:08:04] [Rank 0] step:6781/10000 train_time:521569ms step_avg:76.92ms +[2025-09-02 07:08:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:08:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:08:18] [Rank 0] PRINT: step:6800/10000 val_loss:3.7677 svd_entropy: attn_qk:H=0.7713,top10E=0.26,eRank=172.8,q75/q25=53.81 attn_vo:H=0.8563,top10E=0.13,eRank=320.8,q75/q25=31.63 mlp_w1:H=0.9081,top10E=0.14,eRank=420.3,q75/q25=4.42 mlp_w2:H=0.9702,top10E=0.04,eRank=630.1,q75/q25=2.93 vo_prod:H=0.7595,top10E=0.22,eRank=158.8,q75/q25=678.78 train_time:523298ms step_avg:76.96ms +[2025-09-02 07:08:18] [Rank 0] PRINT: step:6800/10000 val_loss:3.7677 svd_entropy: attn_qk:H=0.7713,top10E=0.26,eRank=172.8,q75/q25=53.81 attn_vo:H=0.8563,top10E=0.13,eRank=320.8,q75/q25=31.63 mlp_w1:H=0.9081,top10E=0.14,eRank=420.3,q75/q25=4.42 mlp_w2:H=0.9702,top10E=0.04,eRank=630.1,q75/q25=2.93 vo_prod:H=0.7595,top10E=0.22,eRank=158.8,q75/q25=678.78 train_time:523298ms step_avg:76.96ms +[2025-09-02 07:08:18] [Rank 0] step:6801/10000 train_time:523311ms step_avg:76.95ms +[2025-09-02 07:08:18] [Rank 0] step:6801/10000 train_time:523311ms step_avg:76.95ms +[2025-09-02 07:08:19] [Rank 0] step:6821/10000 train_time:524877ms step_avg:76.95ms +[2025-09-02 07:08:19] [Rank 0] step:6821/10000 train_time:524877ms step_avg:76.95ms +[2025-09-02 07:08:21] [Rank 0] step:6841/10000 train_time:526514ms step_avg:76.96ms +[2025-09-02 07:08:21] [Rank 0] step:6841/10000 train_time:526514ms step_avg:76.96ms +[2025-09-02 07:08:23] [Rank 0] step:6861/10000 train_time:528152ms step_avg:76.98ms +[2025-09-02 07:08:23] [Rank 0] step:6861/10000 train_time:528152ms step_avg:76.98ms +[2025-09-02 07:08:24] [Rank 0] step:6881/10000 train_time:529789ms step_avg:76.99ms +[2025-09-02 07:08:24] [Rank 0] step:6881/10000 train_time:529789ms step_avg:76.99ms +[2025-09-02 07:08:26] [Rank 0] step:6901/10000 train_time:531425ms step_avg:77.01ms +[2025-09-02 07:08:26] [Rank 0] step:6901/10000 train_time:531425ms step_avg:77.01ms +[2025-09-02 07:08:27] [Rank 0] step:6921/10000 train_time:533061ms step_avg:77.02ms +[2025-09-02 07:08:27] [Rank 0] step:6921/10000 train_time:533061ms step_avg:77.02ms +[2025-09-02 07:08:29] [Rank 0] step:6941/10000 train_time:534700ms step_avg:77.03ms +[2025-09-02 07:08:29] [Rank 0] step:6941/10000 train_time:534700ms step_avg:77.03ms +[2025-09-02 07:08:31] [Rank 0] step:6961/10000 train_time:536352ms step_avg:77.05ms +[2025-09-02 07:08:31] [Rank 0] step:6961/10000 train_time:536352ms step_avg:77.05ms +[2025-09-02 07:08:32] [Rank 0] step:6981/10000 train_time:537993ms step_avg:77.07ms +[2025-09-02 07:08:32] [Rank 0] step:6981/10000 train_time:537993ms step_avg:77.07ms +[2025-09-02 07:08:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:08:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:08:46] [Rank 0] PRINT: step:7000/10000 val_loss:3.7516 svd_entropy: attn_qk:H=0.7719,top10E=0.26,eRank=173.5,q75/q25=53.46 attn_vo:H=0.8570,top10E=0.13,eRank=322.0,q75/q25=31.36 mlp_w1:H=0.9088,top10E=0.14,eRank=422.3,q75/q25=4.39 mlp_w2:H=0.9702,top10E=0.04,eRank=630.1,q75/q25=2.93 vo_prod:H=0.7606,top10E=0.22,eRank=160.0,q75/q25=645.82 train_time:539719ms step_avg:77.10ms +[2025-09-02 07:08:46] [Rank 0] PRINT: step:7000/10000 val_loss:3.7516 svd_entropy: attn_qk:H=0.7719,top10E=0.26,eRank=173.5,q75/q25=53.46 attn_vo:H=0.8570,top10E=0.13,eRank=322.0,q75/q25=31.36 mlp_w1:H=0.9088,top10E=0.14,eRank=422.3,q75/q25=4.39 mlp_w2:H=0.9702,top10E=0.04,eRank=630.1,q75/q25=2.93 vo_prod:H=0.7606,top10E=0.22,eRank=160.0,q75/q25=645.82 train_time:539719ms step_avg:77.10ms +[2025-09-02 07:08:46] [Rank 0] step:7001/10000 train_time:539732ms step_avg:77.09ms +[2025-09-02 07:08:46] [Rank 0] step:7001/10000 train_time:539732ms step_avg:77.09ms +[2025-09-02 07:08:48] [Rank 0] step:7021/10000 train_time:541312ms step_avg:77.10ms +[2025-09-02 07:08:48] [Rank 0] step:7021/10000 train_time:541312ms step_avg:77.10ms +[2025-09-02 07:08:49] [Rank 0] step:7041/10000 train_time:542950ms step_avg:77.11ms +[2025-09-02 07:08:49] [Rank 0] step:7041/10000 train_time:542950ms step_avg:77.11ms +[2025-09-02 07:08:51] [Rank 0] step:7061/10000 train_time:544587ms step_avg:77.13ms +[2025-09-02 07:08:51] [Rank 0] step:7061/10000 train_time:544587ms step_avg:77.13ms +[2025-09-02 07:08:53] [Rank 0] step:7081/10000 train_time:546223ms step_avg:77.14ms +[2025-09-02 07:08:53] [Rank 0] step:7081/10000 train_time:546223ms step_avg:77.14ms +[2025-09-02 07:08:54] [Rank 0] step:7101/10000 train_time:547865ms step_avg:77.15ms +[2025-09-02 07:08:54] [Rank 0] step:7101/10000 train_time:547865ms step_avg:77.15ms +[2025-09-02 07:08:56] [Rank 0] step:7121/10000 train_time:549506ms step_avg:77.17ms +[2025-09-02 07:08:56] [Rank 0] step:7121/10000 train_time:549506ms step_avg:77.17ms +[2025-09-02 07:08:58] [Rank 0] step:7141/10000 train_time:551146ms step_avg:77.18ms +[2025-09-02 07:08:58] [Rank 0] step:7141/10000 train_time:551146ms step_avg:77.18ms +[2025-09-02 07:08:59] [Rank 0] step:7161/10000 train_time:552788ms step_avg:77.19ms +[2025-09-02 07:08:59] [Rank 0] step:7161/10000 train_time:552788ms step_avg:77.19ms +[2025-09-02 07:09:01] [Rank 0] step:7181/10000 train_time:554432ms step_avg:77.21ms +[2025-09-02 07:09:01] [Rank 0] step:7181/10000 train_time:554432ms step_avg:77.21ms +[2025-09-02 07:09:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:09:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:09:14] [Rank 0] PRINT: step:7200/10000 val_loss:3.7414 svd_entropy: attn_qk:H=0.7726,top10E=0.26,eRank=174.2,q75/q25=53.37 attn_vo:H=0.8576,top10E=0.13,eRank=323.0,q75/q25=30.91 mlp_w1:H=0.9095,top10E=0.14,eRank=424.1,q75/q25=4.38 mlp_w2:H=0.9702,top10E=0.04,eRank=630.1,q75/q25=2.93 vo_prod:H=0.7615,top10E=0.22,eRank=160.9,q75/q25=612.88 train_time:556161ms step_avg:77.24ms +[2025-09-02 07:09:14] [Rank 0] PRINT: step:7200/10000 val_loss:3.7414 svd_entropy: attn_qk:H=0.7726,top10E=0.26,eRank=174.2,q75/q25=53.37 attn_vo:H=0.8576,top10E=0.13,eRank=323.0,q75/q25=30.91 mlp_w1:H=0.9095,top10E=0.14,eRank=424.1,q75/q25=4.38 mlp_w2:H=0.9702,top10E=0.04,eRank=630.1,q75/q25=2.93 vo_prod:H=0.7615,top10E=0.22,eRank=160.9,q75/q25=612.88 train_time:556161ms step_avg:77.24ms +[2025-09-02 07:09:15] [Rank 0] step:7201/10000 train_time:556174ms step_avg:77.24ms +[2025-09-02 07:09:15] [Rank 0] step:7201/10000 train_time:556174ms step_avg:77.24ms +[2025-09-02 07:09:16] [Rank 0] step:7221/10000 train_time:557736ms step_avg:77.24ms +[2025-09-02 07:09:16] [Rank 0] step:7221/10000 train_time:557736ms step_avg:77.24ms +[2025-09-02 07:09:18] [Rank 0] step:7241/10000 train_time:559370ms step_avg:77.25ms +[2025-09-02 07:09:18] [Rank 0] step:7241/10000 train_time:559370ms step_avg:77.25ms +[2025-09-02 07:09:19] [Rank 0] step:7261/10000 train_time:561003ms step_avg:77.26ms +[2025-09-02 07:09:19] [Rank 0] step:7261/10000 train_time:561003ms step_avg:77.26ms +[2025-09-02 07:09:21] [Rank 0] step:7281/10000 train_time:562649ms step_avg:77.28ms +[2025-09-02 07:09:21] [Rank 0] step:7281/10000 train_time:562649ms step_avg:77.28ms +[2025-09-02 07:09:23] [Rank 0] step:7301/10000 train_time:564282ms step_avg:77.29ms +[2025-09-02 07:09:23] [Rank 0] step:7301/10000 train_time:564282ms step_avg:77.29ms +[2025-09-02 07:09:24] [Rank 0] step:7321/10000 train_time:565932ms step_avg:77.30ms +[2025-09-02 07:09:24] [Rank 0] step:7321/10000 train_time:565932ms step_avg:77.30ms +[2025-09-02 07:09:26] [Rank 0] step:7341/10000 train_time:567567ms step_avg:77.31ms +[2025-09-02 07:09:26] [Rank 0] step:7341/10000 train_time:567567ms step_avg:77.31ms +[2025-09-02 07:09:28] [Rank 0] step:7361/10000 train_time:569213ms step_avg:77.33ms +[2025-09-02 07:09:28] [Rank 0] step:7361/10000 train_time:569213ms step_avg:77.33ms +[2025-09-02 07:09:29] [Rank 0] step:7381/10000 train_time:570859ms step_avg:77.34ms +[2025-09-02 07:09:29] [Rank 0] step:7381/10000 train_time:570859ms step_avg:77.34ms +[2025-09-02 07:09:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:09:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:09:43] [Rank 0] PRINT: step:7400/10000 val_loss:3.7230 svd_entropy: attn_qk:H=0.7730,top10E=0.26,eRank=174.7,q75/q25=52.98 attn_vo:H=0.8581,top10E=0.13,eRank=323.8,q75/q25=30.50 mlp_w1:H=0.9101,top10E=0.14,eRank=425.7,q75/q25=4.36 mlp_w2:H=0.9702,top10E=0.04,eRank=630.0,q75/q25=2.92 vo_prod:H=0.7624,top10E=0.22,eRank=161.9,q75/q25=590.67 train_time:572568ms step_avg:77.37ms +[2025-09-02 07:09:43] [Rank 0] PRINT: step:7400/10000 val_loss:3.7230 svd_entropy: attn_qk:H=0.7730,top10E=0.26,eRank=174.7,q75/q25=52.98 attn_vo:H=0.8581,top10E=0.13,eRank=323.8,q75/q25=30.50 mlp_w1:H=0.9101,top10E=0.14,eRank=425.7,q75/q25=4.36 mlp_w2:H=0.9702,top10E=0.04,eRank=630.0,q75/q25=2.92 vo_prod:H=0.7624,top10E=0.22,eRank=161.9,q75/q25=590.67 train_time:572568ms step_avg:77.37ms +[2025-09-02 07:09:43] [Rank 0] step:7401/10000 train_time:572581ms step_avg:77.37ms +[2025-09-02 07:09:43] [Rank 0] step:7401/10000 train_time:572581ms step_avg:77.37ms +[2025-09-02 07:09:45] [Rank 0] step:7421/10000 train_time:574146ms step_avg:77.37ms +[2025-09-02 07:09:45] [Rank 0] step:7421/10000 train_time:574146ms step_avg:77.37ms +[2025-09-02 07:09:46] [Rank 0] step:7441/10000 train_time:575786ms step_avg:77.38ms +[2025-09-02 07:09:46] [Rank 0] step:7441/10000 train_time:575786ms step_avg:77.38ms +[2025-09-02 07:09:48] [Rank 0] step:7461/10000 train_time:577426ms step_avg:77.39ms +[2025-09-02 07:09:48] [Rank 0] step:7461/10000 train_time:577426ms step_avg:77.39ms +[2025-09-02 07:09:50] [Rank 0] step:7481/10000 train_time:579071ms step_avg:77.41ms +[2025-09-02 07:09:50] [Rank 0] step:7481/10000 train_time:579071ms step_avg:77.41ms +[2025-09-02 07:09:51] [Rank 0] step:7501/10000 train_time:580715ms step_avg:77.42ms +[2025-09-02 07:09:51] [Rank 0] step:7501/10000 train_time:580715ms step_avg:77.42ms +[2025-09-02 07:09:53] [Rank 0] step:7521/10000 train_time:582362ms step_avg:77.43ms +[2025-09-02 07:09:53] [Rank 0] step:7521/10000 train_time:582362ms step_avg:77.43ms +[2025-09-02 07:09:54] [Rank 0] step:7541/10000 train_time:584015ms step_avg:77.45ms +[2025-09-02 07:09:54] [Rank 0] step:7541/10000 train_time:584015ms step_avg:77.45ms +[2025-09-02 07:09:56] [Rank 0] step:7561/10000 train_time:585646ms step_avg:77.46ms +[2025-09-02 07:09:56] [Rank 0] step:7561/10000 train_time:585646ms step_avg:77.46ms +[2025-09-02 07:09:58] [Rank 0] step:7581/10000 train_time:587297ms step_avg:77.47ms +[2025-09-02 07:09:58] [Rank 0] step:7581/10000 train_time:587297ms step_avg:77.47ms +[2025-09-02 07:09:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:09:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:10:11] [Rank 0] PRINT: step:7600/10000 val_loss:3.7163 svd_entropy: attn_qk:H=0.7736,top10E=0.26,eRank=175.3,q75/q25=52.58 attn_vo:H=0.8587,top10E=0.13,eRank=324.7,q75/q25=30.08 mlp_w1:H=0.9106,top10E=0.14,eRank=427.1,q75/q25=4.34 mlp_w2:H=0.9702,top10E=0.04,eRank=630.1,q75/q25=2.92 vo_prod:H=0.7632,top10E=0.22,eRank=162.8,q75/q25=576.24 train_time:589034ms step_avg:77.50ms +[2025-09-02 07:10:11] [Rank 0] PRINT: step:7600/10000 val_loss:3.7163 svd_entropy: attn_qk:H=0.7736,top10E=0.26,eRank=175.3,q75/q25=52.58 attn_vo:H=0.8587,top10E=0.13,eRank=324.7,q75/q25=30.08 mlp_w1:H=0.9106,top10E=0.14,eRank=427.1,q75/q25=4.34 mlp_w2:H=0.9702,top10E=0.04,eRank=630.1,q75/q25=2.92 vo_prod:H=0.7632,top10E=0.22,eRank=162.8,q75/q25=576.24 train_time:589034ms step_avg:77.50ms +[2025-09-02 07:10:11] [Rank 0] step:7601/10000 train_time:589046ms step_avg:77.50ms +[2025-09-02 07:10:11] [Rank 0] step:7601/10000 train_time:589046ms step_avg:77.50ms +[2025-09-02 07:10:13] [Rank 0] step:7621/10000 train_time:590610ms step_avg:77.50ms +[2025-09-02 07:10:13] [Rank 0] step:7621/10000 train_time:590610ms step_avg:77.50ms +[2025-09-02 07:10:15] [Rank 0] step:7641/10000 train_time:592246ms step_avg:77.51ms +[2025-09-02 07:10:15] [Rank 0] step:7641/10000 train_time:592246ms step_avg:77.51ms +[2025-09-02 07:10:16] [Rank 0] step:7661/10000 train_time:593889ms step_avg:77.52ms +[2025-09-02 07:10:16] [Rank 0] step:7661/10000 train_time:593889ms step_avg:77.52ms +[2025-09-02 07:10:18] [Rank 0] step:7681/10000 train_time:595528ms step_avg:77.53ms +[2025-09-02 07:10:18] [Rank 0] step:7681/10000 train_time:595528ms step_avg:77.53ms +[2025-09-02 07:10:20] [Rank 0] step:7701/10000 train_time:597167ms step_avg:77.54ms +[2025-09-02 07:10:20] [Rank 0] step:7701/10000 train_time:597167ms step_avg:77.54ms +[2025-09-02 07:10:21] [Rank 0] step:7721/10000 train_time:598819ms step_avg:77.56ms +[2025-09-02 07:10:21] [Rank 0] step:7721/10000 train_time:598819ms step_avg:77.56ms +[2025-09-02 07:10:23] [Rank 0] step:7741/10000 train_time:600462ms step_avg:77.57ms +[2025-09-02 07:10:23] [Rank 0] step:7741/10000 train_time:600462ms step_avg:77.57ms +[2025-09-02 07:10:25] [Rank 0] step:7761/10000 train_time:602113ms step_avg:77.58ms +[2025-09-02 07:10:25] [Rank 0] step:7761/10000 train_time:602113ms step_avg:77.58ms +[2025-09-02 07:10:26] [Rank 0] step:7781/10000 train_time:603759ms step_avg:77.59ms +[2025-09-02 07:10:26] [Rank 0] step:7781/10000 train_time:603759ms step_avg:77.59ms +[2025-09-02 07:10:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:10:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:10:40] [Rank 0] PRINT: step:7800/10000 val_loss:3.7027 svd_entropy: attn_qk:H=0.7740,top10E=0.26,eRank=175.7,q75/q25=52.44 attn_vo:H=0.8591,top10E=0.13,eRank=325.4,q75/q25=29.85 mlp_w1:H=0.9111,top10E=0.13,eRank=428.5,q75/q25=4.32 mlp_w2:H=0.9702,top10E=0.04,eRank=630.1,q75/q25=2.93 vo_prod:H=0.7639,top10E=0.21,eRank=163.5,q75/q25=564.11 train_time:605497ms step_avg:77.63ms +[2025-09-02 07:10:40] [Rank 0] PRINT: step:7800/10000 val_loss:3.7027 svd_entropy: attn_qk:H=0.7740,top10E=0.26,eRank=175.7,q75/q25=52.44 attn_vo:H=0.8591,top10E=0.13,eRank=325.4,q75/q25=29.85 mlp_w1:H=0.9111,top10E=0.13,eRank=428.5,q75/q25=4.32 mlp_w2:H=0.9702,top10E=0.04,eRank=630.1,q75/q25=2.93 vo_prod:H=0.7639,top10E=0.21,eRank=163.5,q75/q25=564.11 train_time:605497ms step_avg:77.63ms +[2025-09-02 07:10:40] [Rank 0] step:7801/10000 train_time:605510ms step_avg:77.62ms +[2025-09-02 07:10:40] [Rank 0] step:7801/10000 train_time:605510ms step_avg:77.62ms +[2025-09-02 07:10:42] [Rank 0] step:7821/10000 train_time:607072ms step_avg:77.62ms +[2025-09-02 07:10:42] [Rank 0] step:7821/10000 train_time:607072ms step_avg:77.62ms +[2025-09-02 07:10:43] [Rank 0] step:7841/10000 train_time:608710ms step_avg:77.63ms +[2025-09-02 07:10:43] [Rank 0] step:7841/10000 train_time:608710ms step_avg:77.63ms +[2025-09-02 07:10:45] [Rank 0] step:7861/10000 train_time:610359ms step_avg:77.64ms +[2025-09-02 07:10:45] [Rank 0] step:7861/10000 train_time:610359ms step_avg:77.64ms +[2025-09-02 07:10:46] [Rank 0] step:7881/10000 train_time:612008ms step_avg:77.66ms +[2025-09-02 07:10:46] [Rank 0] step:7881/10000 train_time:612008ms step_avg:77.66ms +[2025-09-02 07:10:48] [Rank 0] step:7901/10000 train_time:613648ms step_avg:77.67ms +[2025-09-02 07:10:48] [Rank 0] step:7901/10000 train_time:613648ms step_avg:77.67ms +[2025-09-02 07:10:50] [Rank 0] step:7921/10000 train_time:615292ms step_avg:77.68ms +[2025-09-02 07:10:50] [Rank 0] step:7921/10000 train_time:615292ms step_avg:77.68ms +[2025-09-02 07:10:51] [Rank 0] step:7941/10000 train_time:616946ms step_avg:77.69ms +[2025-09-02 07:10:51] [Rank 0] step:7941/10000 train_time:616946ms step_avg:77.69ms +[2025-09-02 07:10:53] [Rank 0] step:7961/10000 train_time:618597ms step_avg:77.70ms +[2025-09-02 07:10:53] [Rank 0] step:7961/10000 train_time:618597ms step_avg:77.70ms +[2025-09-02 07:10:55] [Rank 0] step:7981/10000 train_time:620236ms step_avg:77.71ms +[2025-09-02 07:10:55] [Rank 0] step:7981/10000 train_time:620236ms step_avg:77.71ms +[2025-09-02 07:10:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:10:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:11:08] [Rank 0] PRINT: step:8000/10000 val_loss:3.6877 svd_entropy: attn_qk:H=0.7745,top10E=0.26,eRank=176.2,q75/q25=52.28 attn_vo:H=0.8595,top10E=0.13,eRank=326.1,q75/q25=29.56 mlp_w1:H=0.9115,top10E=0.13,eRank=429.7,q75/q25=4.31 mlp_w2:H=0.9702,top10E=0.04,eRank=630.0,q75/q25=2.92 vo_prod:H=0.7647,top10E=0.21,eRank=164.4,q75/q25=545.64 train_time:621971ms step_avg:77.75ms +[2025-09-02 07:11:08] [Rank 0] PRINT: step:8000/10000 val_loss:3.6877 svd_entropy: attn_qk:H=0.7745,top10E=0.26,eRank=176.2,q75/q25=52.28 attn_vo:H=0.8595,top10E=0.13,eRank=326.1,q75/q25=29.56 mlp_w1:H=0.9115,top10E=0.13,eRank=429.7,q75/q25=4.31 mlp_w2:H=0.9702,top10E=0.04,eRank=630.0,q75/q25=2.92 vo_prod:H=0.7647,top10E=0.21,eRank=164.4,q75/q25=545.64 train_time:621971ms step_avg:77.75ms +[2025-09-02 07:11:08] [Rank 0] step:8001/10000 train_time:621984ms step_avg:77.74ms +[2025-09-02 07:11:08] [Rank 0] step:8001/10000 train_time:621984ms step_avg:77.74ms +[2025-09-02 07:11:10] [Rank 0] step:8021/10000 train_time:623598ms step_avg:77.75ms +[2025-09-02 07:11:10] [Rank 0] step:8021/10000 train_time:623598ms step_avg:77.75ms +[2025-09-02 07:11:12] [Rank 0] step:8041/10000 train_time:625252ms step_avg:77.76ms +[2025-09-02 07:11:12] [Rank 0] step:8041/10000 train_time:625252ms step_avg:77.76ms +[2025-09-02 07:11:13] [Rank 0] step:8061/10000 train_time:626898ms step_avg:77.77ms +[2025-09-02 07:11:13] [Rank 0] step:8061/10000 train_time:626898ms step_avg:77.77ms +[2025-09-02 07:11:15] [Rank 0] step:8081/10000 train_time:628534ms step_avg:77.78ms +[2025-09-02 07:11:15] [Rank 0] step:8081/10000 train_time:628534ms step_avg:77.78ms +[2025-09-02 07:11:17] [Rank 0] step:8101/10000 train_time:630189ms step_avg:77.79ms +[2025-09-02 07:11:17] [Rank 0] step:8101/10000 train_time:630189ms step_avg:77.79ms +[2025-09-02 07:11:18] [Rank 0] step:8121/10000 train_time:631833ms step_avg:77.80ms +[2025-09-02 07:11:18] [Rank 0] step:8121/10000 train_time:631833ms step_avg:77.80ms +[2025-09-02 07:11:20] [Rank 0] step:8141/10000 train_time:633647ms step_avg:77.83ms +[2025-09-02 07:11:20] [Rank 0] step:8141/10000 train_time:633647ms step_avg:77.83ms +[2025-09-02 07:11:22] [Rank 0] step:8161/10000 train_time:635308ms step_avg:77.85ms +[2025-09-02 07:11:22] [Rank 0] step:8161/10000 train_time:635308ms step_avg:77.85ms +[2025-09-02 07:11:23] [Rank 0] step:8181/10000 train_time:636981ms step_avg:77.86ms +[2025-09-02 07:11:23] [Rank 0] step:8181/10000 train_time:636981ms step_avg:77.86ms +[2025-09-02 07:11:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:11:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:11:37] [Rank 0] PRINT: step:8200/10000 val_loss:3.6770 svd_entropy: attn_qk:H=0.7748,top10E=0.26,eRank=176.5,q75/q25=52.04 attn_vo:H=0.8598,top10E=0.13,eRank=326.7,q75/q25=29.28 mlp_w1:H=0.9119,top10E=0.13,eRank=430.8,q75/q25=4.28 mlp_w2:H=0.9702,top10E=0.04,eRank=630.0,q75/q25=2.92 vo_prod:H=0.7652,top10E=0.21,eRank=164.9,q75/q25=535.01 train_time:638766ms step_avg:77.90ms +[2025-09-02 07:11:37] [Rank 0] PRINT: step:8200/10000 val_loss:3.6770 svd_entropy: attn_qk:H=0.7748,top10E=0.26,eRank=176.5,q75/q25=52.04 attn_vo:H=0.8598,top10E=0.13,eRank=326.7,q75/q25=29.28 mlp_w1:H=0.9119,top10E=0.13,eRank=430.8,q75/q25=4.28 mlp_w2:H=0.9702,top10E=0.04,eRank=630.0,q75/q25=2.92 vo_prod:H=0.7652,top10E=0.21,eRank=164.9,q75/q25=535.01 train_time:638766ms step_avg:77.90ms +[2025-09-02 07:11:37] [Rank 0] step:8201/10000 train_time:638778ms step_avg:77.89ms +[2025-09-02 07:11:37] [Rank 0] step:8201/10000 train_time:638778ms step_avg:77.89ms +[2025-09-02 07:11:39] [Rank 0] step:8221/10000 train_time:640393ms step_avg:77.90ms +[2025-09-02 07:11:39] [Rank 0] step:8221/10000 train_time:640393ms step_avg:77.90ms +[2025-09-02 07:11:41] [Rank 0] step:8241/10000 train_time:642074ms step_avg:77.91ms +[2025-09-02 07:11:41] [Rank 0] step:8241/10000 train_time:642074ms step_avg:77.91ms +[2025-09-02 07:11:42] [Rank 0] step:8261/10000 train_time:643749ms step_avg:77.93ms +[2025-09-02 07:11:42] [Rank 0] step:8261/10000 train_time:643749ms step_avg:77.93ms +[2025-09-02 07:11:44] [Rank 0] step:8281/10000 train_time:645425ms step_avg:77.94ms +[2025-09-02 07:11:44] [Rank 0] step:8281/10000 train_time:645425ms step_avg:77.94ms +[2025-09-02 07:11:46] [Rank 0] step:8301/10000 train_time:647100ms step_avg:77.95ms +[2025-09-02 07:11:46] [Rank 0] step:8301/10000 train_time:647100ms step_avg:77.95ms +[2025-09-02 07:11:47] [Rank 0] step:8321/10000 train_time:648767ms step_avg:77.97ms +[2025-09-02 07:11:47] [Rank 0] step:8321/10000 train_time:648767ms step_avg:77.97ms +[2025-09-02 07:11:49] [Rank 0] step:8341/10000 train_time:650447ms step_avg:77.98ms +[2025-09-02 07:11:49] [Rank 0] step:8341/10000 train_time:650447ms step_avg:77.98ms +[2025-09-02 07:11:51] [Rank 0] step:8361/10000 train_time:652125ms step_avg:78.00ms +[2025-09-02 07:11:51] [Rank 0] step:8361/10000 train_time:652125ms step_avg:78.00ms +[2025-09-02 07:11:52] [Rank 0] step:8381/10000 train_time:653797ms step_avg:78.01ms +[2025-09-02 07:11:52] [Rank 0] step:8381/10000 train_time:653797ms step_avg:78.01ms +[2025-09-02 07:11:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:11:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:12:06] [Rank 0] PRINT: step:8400/10000 val_loss:3.6657 svd_entropy: attn_qk:H=0.7751,top10E=0.26,eRank=176.8,q75/q25=51.96 attn_vo:H=0.8601,top10E=0.13,eRank=327.2,q75/q25=29.00 mlp_w1:H=0.9123,top10E=0.13,eRank=431.8,q75/q25=4.27 mlp_w2:H=0.9702,top10E=0.04,eRank=630.0,q75/q25=2.92 vo_prod:H=0.7657,top10E=0.21,eRank=165.5,q75/q25=523.73 train_time:655559ms step_avg:78.04ms +[2025-09-02 07:12:06] [Rank 0] PRINT: step:8400/10000 val_loss:3.6657 svd_entropy: attn_qk:H=0.7751,top10E=0.26,eRank=176.8,q75/q25=51.96 attn_vo:H=0.8601,top10E=0.13,eRank=327.2,q75/q25=29.00 mlp_w1:H=0.9123,top10E=0.13,eRank=431.8,q75/q25=4.27 mlp_w2:H=0.9702,top10E=0.04,eRank=630.0,q75/q25=2.92 vo_prod:H=0.7657,top10E=0.21,eRank=165.5,q75/q25=523.73 train_time:655559ms step_avg:78.04ms +[2025-09-02 07:12:06] [Rank 0] step:8401/10000 train_time:655572ms step_avg:78.04ms +[2025-09-02 07:12:06] [Rank 0] step:8401/10000 train_time:655572ms step_avg:78.04ms +[2025-09-02 07:12:08] [Rank 0] step:8421/10000 train_time:657166ms step_avg:78.04ms +[2025-09-02 07:12:08] [Rank 0] step:8421/10000 train_time:657166ms step_avg:78.04ms +[2025-09-02 07:12:09] [Rank 0] step:8441/10000 train_time:658836ms step_avg:78.05ms +[2025-09-02 07:12:09] [Rank 0] step:8441/10000 train_time:658836ms step_avg:78.05ms +[2025-09-02 07:12:11] [Rank 0] step:8461/10000 train_time:660505ms step_avg:78.06ms +[2025-09-02 07:12:11] [Rank 0] step:8461/10000 train_time:660505ms step_avg:78.06ms +[2025-09-02 07:12:13] [Rank 0] step:8481/10000 train_time:662180ms step_avg:78.08ms +[2025-09-02 07:12:13] [Rank 0] step:8481/10000 train_time:662180ms step_avg:78.08ms +[2025-09-02 07:12:14] [Rank 0] step:8501/10000 train_time:663873ms step_avg:78.09ms +[2025-09-02 07:12:14] [Rank 0] step:8501/10000 train_time:663873ms step_avg:78.09ms +[2025-09-02 07:12:16] [Rank 0] step:8521/10000 train_time:665546ms step_avg:78.11ms +[2025-09-02 07:12:16] [Rank 0] step:8521/10000 train_time:665546ms step_avg:78.11ms +[2025-09-02 07:12:18] [Rank 0] step:8541/10000 train_time:667227ms step_avg:78.12ms +[2025-09-02 07:12:18] [Rank 0] step:8541/10000 train_time:667227ms step_avg:78.12ms +[2025-09-02 07:12:19] [Rank 0] step:8561/10000 train_time:668903ms step_avg:78.13ms +[2025-09-02 07:12:19] [Rank 0] step:8561/10000 train_time:668903ms step_avg:78.13ms +[2025-09-02 07:12:21] [Rank 0] step:8581/10000 train_time:670577ms step_avg:78.15ms +[2025-09-02 07:12:21] [Rank 0] step:8581/10000 train_time:670577ms step_avg:78.15ms +[2025-09-02 07:12:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:12:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:12:35] [Rank 0] PRINT: step:8600/10000 val_loss:3.6566 svd_entropy: attn_qk:H=0.7754,top10E=0.26,eRank=177.2,q75/q25=51.84 attn_vo:H=0.8604,top10E=0.12,eRank=327.7,q75/q25=28.80 mlp_w1:H=0.9126,top10E=0.13,eRank=432.7,q75/q25=4.26 mlp_w2:H=0.9702,top10E=0.04,eRank=630.0,q75/q25=2.92 vo_prod:H=0.7662,top10E=0.21,eRank=166.1,q75/q25=507.67 train_time:672331ms step_avg:78.18ms +[2025-09-02 07:12:35] [Rank 0] PRINT: step:8600/10000 val_loss:3.6566 svd_entropy: attn_qk:H=0.7754,top10E=0.26,eRank=177.2,q75/q25=51.84 attn_vo:H=0.8604,top10E=0.12,eRank=327.7,q75/q25=28.80 mlp_w1:H=0.9126,top10E=0.13,eRank=432.7,q75/q25=4.26 mlp_w2:H=0.9702,top10E=0.04,eRank=630.0,q75/q25=2.92 vo_prod:H=0.7662,top10E=0.21,eRank=166.1,q75/q25=507.67 train_time:672331ms step_avg:78.18ms +[2025-09-02 07:12:35] [Rank 0] step:8601/10000 train_time:672344ms step_avg:78.17ms +[2025-09-02 07:12:35] [Rank 0] step:8601/10000 train_time:672344ms step_avg:78.17ms +[2025-09-02 07:12:36] [Rank 0] step:8621/10000 train_time:673952ms step_avg:78.18ms +[2025-09-02 07:12:36] [Rank 0] step:8621/10000 train_time:673952ms step_avg:78.18ms +[2025-09-02 07:12:38] [Rank 0] step:8641/10000 train_time:675621ms step_avg:78.19ms +[2025-09-02 07:12:38] [Rank 0] step:8641/10000 train_time:675621ms step_avg:78.19ms +[2025-09-02 07:12:40] [Rank 0] step:8661/10000 train_time:677294ms step_avg:78.20ms +[2025-09-02 07:12:40] [Rank 0] step:8661/10000 train_time:677294ms step_avg:78.20ms +[2025-09-02 07:12:41] [Rank 0] step:8681/10000 train_time:678965ms step_avg:78.21ms +[2025-09-02 07:12:41] [Rank 0] step:8681/10000 train_time:678965ms step_avg:78.21ms +[2025-09-02 07:12:43] [Rank 0] step:8701/10000 train_time:680629ms step_avg:78.22ms +[2025-09-02 07:12:43] [Rank 0] step:8701/10000 train_time:680629ms step_avg:78.22ms +[2025-09-02 07:12:45] [Rank 0] step:8721/10000 train_time:682304ms step_avg:78.24ms +[2025-09-02 07:12:45] [Rank 0] step:8721/10000 train_time:682304ms step_avg:78.24ms +[2025-09-02 07:12:46] [Rank 0] step:8741/10000 train_time:683963ms step_avg:78.25ms +[2025-09-02 07:12:46] [Rank 0] step:8741/10000 train_time:683963ms step_avg:78.25ms +[2025-09-02 07:12:48] [Rank 0] step:8761/10000 train_time:685634ms step_avg:78.26ms +[2025-09-02 07:12:48] [Rank 0] step:8761/10000 train_time:685634ms step_avg:78.26ms +[2025-09-02 07:12:50] [Rank 0] step:8781/10000 train_time:687312ms step_avg:78.27ms +[2025-09-02 07:12:50] [Rank 0] step:8781/10000 train_time:687312ms step_avg:78.27ms +[2025-09-02 07:12:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:12:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:13:03] [Rank 0] PRINT: step:8800/10000 val_loss:3.6467 svd_entropy: attn_qk:H=0.7756,top10E=0.26,eRank=177.4,q75/q25=51.62 attn_vo:H=0.8607,top10E=0.12,eRank=328.3,q75/q25=28.55 mlp_w1:H=0.9129,top10E=0.13,eRank=433.4,q75/q25=4.25 mlp_w2:H=0.9702,top10E=0.04,eRank=630.0,q75/q25=2.92 vo_prod:H=0.7668,top10E=0.21,eRank=166.7,q75/q25=502.87 train_time:689074ms step_avg:78.30ms +[2025-09-02 07:13:03] [Rank 0] PRINT: step:8800/10000 val_loss:3.6467 svd_entropy: attn_qk:H=0.7756,top10E=0.26,eRank=177.4,q75/q25=51.62 attn_vo:H=0.8607,top10E=0.12,eRank=328.3,q75/q25=28.55 mlp_w1:H=0.9129,top10E=0.13,eRank=433.4,q75/q25=4.25 mlp_w2:H=0.9702,top10E=0.04,eRank=630.0,q75/q25=2.92 vo_prod:H=0.7668,top10E=0.21,eRank=166.7,q75/q25=502.87 train_time:689074ms step_avg:78.30ms +[2025-09-02 07:13:03] [Rank 0] step:8801/10000 train_time:689087ms step_avg:78.30ms +[2025-09-02 07:13:03] [Rank 0] step:8801/10000 train_time:689087ms step_avg:78.30ms +[2025-09-02 07:13:05] [Rank 0] step:8821/10000 train_time:690675ms step_avg:78.30ms +[2025-09-02 07:13:05] [Rank 0] step:8821/10000 train_time:690675ms step_avg:78.30ms +[2025-09-02 07:13:07] [Rank 0] step:8841/10000 train_time:692364ms step_avg:78.31ms +[2025-09-02 07:13:07] [Rank 0] step:8841/10000 train_time:692364ms step_avg:78.31ms +[2025-09-02 07:13:08] [Rank 0] step:8861/10000 train_time:694035ms step_avg:78.32ms +[2025-09-02 07:13:08] [Rank 0] step:8861/10000 train_time:694035ms step_avg:78.32ms +[2025-09-02 07:13:10] [Rank 0] step:8881/10000 train_time:695706ms step_avg:78.34ms +[2025-09-02 07:13:10] [Rank 0] step:8881/10000 train_time:695706ms step_avg:78.34ms +[2025-09-02 07:13:12] [Rank 0] step:8901/10000 train_time:697376ms step_avg:78.35ms +[2025-09-02 07:13:12] [Rank 0] step:8901/10000 train_time:697376ms step_avg:78.35ms +[2025-09-02 07:13:14] [Rank 0] step:8921/10000 train_time:699063ms step_avg:78.36ms +[2025-09-02 07:13:14] [Rank 0] step:8921/10000 train_time:699063ms step_avg:78.36ms +[2025-09-02 07:13:15] [Rank 0] step:8941/10000 train_time:700741ms step_avg:78.37ms +[2025-09-02 07:13:15] [Rank 0] step:8941/10000 train_time:700741ms step_avg:78.37ms +[2025-09-02 07:13:17] [Rank 0] step:8961/10000 train_time:702411ms step_avg:78.39ms +[2025-09-02 07:13:17] [Rank 0] step:8961/10000 train_time:702411ms step_avg:78.39ms +[2025-09-02 07:13:19] [Rank 0] step:8981/10000 train_time:704083ms step_avg:78.40ms +[2025-09-02 07:13:19] [Rank 0] step:8981/10000 train_time:704083ms step_avg:78.40ms +[2025-09-02 07:13:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:13:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:13:32] [Rank 0] PRINT: step:9000/10000 val_loss:3.6379 svd_entropy: attn_qk:H=0.7759,top10E=0.26,eRank=177.7,q75/q25=51.65 attn_vo:H=0.8609,top10E=0.12,eRank=328.7,q75/q25=28.39 mlp_w1:H=0.9131,top10E=0.13,eRank=434.1,q75/q25=4.24 mlp_w2:H=0.9702,top10E=0.04,eRank=630.0,q75/q25=2.92 vo_prod:H=0.7672,top10E=0.21,eRank=167.3,q75/q25=494.10 train_time:705837ms step_avg:78.43ms +[2025-09-02 07:13:32] [Rank 0] PRINT: step:9000/10000 val_loss:3.6379 svd_entropy: attn_qk:H=0.7759,top10E=0.26,eRank=177.7,q75/q25=51.65 attn_vo:H=0.8609,top10E=0.12,eRank=328.7,q75/q25=28.39 mlp_w1:H=0.9131,top10E=0.13,eRank=434.1,q75/q25=4.24 mlp_w2:H=0.9702,top10E=0.04,eRank=630.0,q75/q25=2.92 vo_prod:H=0.7672,top10E=0.21,eRank=167.3,q75/q25=494.10 train_time:705837ms step_avg:78.43ms +[2025-09-02 07:13:32] [Rank 0] step:9001/10000 train_time:705850ms step_avg:78.42ms +[2025-09-02 07:13:32] [Rank 0] step:9001/10000 train_time:705850ms step_avg:78.42ms +[2025-09-02 07:13:34] [Rank 0] step:9021/10000 train_time:707443ms step_avg:78.42ms +[2025-09-02 07:13:34] [Rank 0] step:9021/10000 train_time:707443ms step_avg:78.42ms +[2025-09-02 07:13:36] [Rank 0] step:9041/10000 train_time:709114ms step_avg:78.43ms +[2025-09-02 07:13:36] [Rank 0] step:9041/10000 train_time:709114ms step_avg:78.43ms +[2025-09-02 07:13:37] [Rank 0] step:9061/10000 train_time:710795ms step_avg:78.45ms +[2025-09-02 07:13:37] [Rank 0] step:9061/10000 train_time:710795ms step_avg:78.45ms +[2025-09-02 07:13:39] [Rank 0] step:9081/10000 train_time:712479ms step_avg:78.46ms +[2025-09-02 07:13:39] [Rank 0] step:9081/10000 train_time:712479ms step_avg:78.46ms +[2025-09-02 07:13:41] [Rank 0] step:9101/10000 train_time:714171ms step_avg:78.47ms +[2025-09-02 07:13:41] [Rank 0] step:9101/10000 train_time:714171ms step_avg:78.47ms +[2025-09-02 07:13:42] [Rank 0] step:9121/10000 train_time:715849ms step_avg:78.48ms +[2025-09-02 07:13:42] [Rank 0] step:9121/10000 train_time:715849ms step_avg:78.48ms +[2025-09-02 07:13:44] [Rank 0] step:9141/10000 train_time:717513ms step_avg:78.49ms +[2025-09-02 07:13:44] [Rank 0] step:9141/10000 train_time:717513ms step_avg:78.49ms +[2025-09-02 07:13:46] [Rank 0] step:9161/10000 train_time:719184ms step_avg:78.50ms +[2025-09-02 07:13:46] [Rank 0] step:9161/10000 train_time:719184ms step_avg:78.50ms +[2025-09-02 07:13:47] [Rank 0] step:9181/10000 train_time:720890ms step_avg:78.52ms +[2025-09-02 07:13:47] [Rank 0] step:9181/10000 train_time:720890ms step_avg:78.52ms +[2025-09-02 07:13:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:13:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:14:01] [Rank 0] PRINT: step:9200/10000 val_loss:3.6299 svd_entropy: attn_qk:H=0.7761,top10E=0.26,eRank=177.9,q75/q25=51.38 attn_vo:H=0.8611,top10E=0.12,eRank=329.0,q75/q25=28.28 mlp_w1:H=0.9133,top10E=0.13,eRank=434.7,q75/q25=4.23 mlp_w2:H=0.9701,top10E=0.04,eRank=629.9,q75/q25=2.92 vo_prod:H=0.7676,top10E=0.21,eRank=167.7,q75/q25=485.05 train_time:722644ms step_avg:78.55ms +[2025-09-02 07:14:01] [Rank 0] PRINT: step:9200/10000 val_loss:3.6299 svd_entropy: attn_qk:H=0.7761,top10E=0.26,eRank=177.9,q75/q25=51.38 attn_vo:H=0.8611,top10E=0.12,eRank=329.0,q75/q25=28.28 mlp_w1:H=0.9133,top10E=0.13,eRank=434.7,q75/q25=4.23 mlp_w2:H=0.9701,top10E=0.04,eRank=629.9,q75/q25=2.92 vo_prod:H=0.7676,top10E=0.21,eRank=167.7,q75/q25=485.05 train_time:722644ms step_avg:78.55ms +[2025-09-02 07:14:01] [Rank 0] step:9201/10000 train_time:722657ms step_avg:78.54ms +[2025-09-02 07:14:01] [Rank 0] step:9201/10000 train_time:722657ms step_avg:78.54ms +[2025-09-02 07:14:03] [Rank 0] step:9221/10000 train_time:724266ms step_avg:78.55ms +[2025-09-02 07:14:03] [Rank 0] step:9221/10000 train_time:724266ms step_avg:78.55ms +[2025-09-02 07:14:04] [Rank 0] step:9241/10000 train_time:725953ms step_avg:78.56ms +[2025-09-02 07:14:04] [Rank 0] step:9241/10000 train_time:725953ms step_avg:78.56ms +[2025-09-02 07:14:06] [Rank 0] step:9261/10000 train_time:727639ms step_avg:78.57ms +[2025-09-02 07:14:06] [Rank 0] step:9261/10000 train_time:727639ms step_avg:78.57ms +[2025-09-02 07:14:08] [Rank 0] step:9281/10000 train_time:729310ms step_avg:78.58ms +[2025-09-02 07:14:08] [Rank 0] step:9281/10000 train_time:729310ms step_avg:78.58ms +[2025-09-02 07:14:09] [Rank 0] step:9301/10000 train_time:730986ms step_avg:78.59ms +[2025-09-02 07:14:09] [Rank 0] step:9301/10000 train_time:730986ms step_avg:78.59ms +[2025-09-02 07:14:11] [Rank 0] step:9321/10000 train_time:732666ms step_avg:78.60ms +[2025-09-02 07:14:11] [Rank 0] step:9321/10000 train_time:732666ms step_avg:78.60ms +[2025-09-02 07:14:13] [Rank 0] step:9341/10000 train_time:734344ms step_avg:78.62ms +[2025-09-02 07:14:13] [Rank 0] step:9341/10000 train_time:734344ms step_avg:78.62ms +[2025-09-02 07:14:14] [Rank 0] step:9361/10000 train_time:736026ms step_avg:78.63ms +[2025-09-02 07:14:14] [Rank 0] step:9361/10000 train_time:736026ms step_avg:78.63ms +[2025-09-02 07:14:16] [Rank 0] step:9381/10000 train_time:737718ms step_avg:78.64ms +[2025-09-02 07:14:16] [Rank 0] step:9381/10000 train_time:737718ms step_avg:78.64ms +[2025-09-02 07:14:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:14:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:14:30] [Rank 0] PRINT: step:9400/10000 val_loss:3.6221 svd_entropy: attn_qk:H=0.7762,top10E=0.26,eRank=178.0,q75/q25=51.21 attn_vo:H=0.8613,top10E=0.12,eRank=329.3,q75/q25=28.25 mlp_w1:H=0.9135,top10E=0.13,eRank=435.2,q75/q25=4.22 mlp_w2:H=0.9701,top10E=0.04,eRank=629.9,q75/q25=2.92 vo_prod:H=0.7680,top10E=0.21,eRank=168.1,q75/q25=480.77 train_time:739490ms step_avg:78.67ms +[2025-09-02 07:14:30] [Rank 0] PRINT: step:9400/10000 val_loss:3.6221 svd_entropy: attn_qk:H=0.7762,top10E=0.26,eRank=178.0,q75/q25=51.21 attn_vo:H=0.8613,top10E=0.12,eRank=329.3,q75/q25=28.25 mlp_w1:H=0.9135,top10E=0.13,eRank=435.2,q75/q25=4.22 mlp_w2:H=0.9701,top10E=0.04,eRank=629.9,q75/q25=2.92 vo_prod:H=0.7680,top10E=0.21,eRank=168.1,q75/q25=480.77 train_time:739490ms step_avg:78.67ms +[2025-09-02 07:14:30] [Rank 0] step:9401/10000 train_time:739503ms step_avg:78.66ms +[2025-09-02 07:14:30] [Rank 0] step:9401/10000 train_time:739503ms step_avg:78.66ms +[2025-09-02 07:14:31] [Rank 0] step:9421/10000 train_time:741101ms step_avg:78.66ms +[2025-09-02 07:14:31] [Rank 0] step:9421/10000 train_time:741101ms step_avg:78.66ms +[2025-09-02 07:14:33] [Rank 0] step:9441/10000 train_time:742779ms step_avg:78.68ms +[2025-09-02 07:14:33] [Rank 0] step:9441/10000 train_time:742779ms step_avg:78.68ms +[2025-09-02 07:14:35] [Rank 0] step:9461/10000 train_time:744460ms step_avg:78.69ms +[2025-09-02 07:14:35] [Rank 0] step:9461/10000 train_time:744460ms step_avg:78.69ms +[2025-09-02 07:14:36] [Rank 0] step:9481/10000 train_time:746140ms step_avg:78.70ms +[2025-09-02 07:14:36] [Rank 0] step:9481/10000 train_time:746140ms step_avg:78.70ms +[2025-09-02 07:14:38] [Rank 0] step:9501/10000 train_time:747833ms step_avg:78.71ms +[2025-09-02 07:14:38] [Rank 0] step:9501/10000 train_time:747833ms step_avg:78.71ms +[2025-09-02 07:14:40] [Rank 0] step:9521/10000 train_time:749504ms step_avg:78.72ms +[2025-09-02 07:14:40] [Rank 0] step:9521/10000 train_time:749504ms step_avg:78.72ms +[2025-09-02 07:14:41] [Rank 0] step:9541/10000 train_time:751183ms step_avg:78.73ms +[2025-09-02 07:14:41] [Rank 0] step:9541/10000 train_time:751183ms step_avg:78.73ms +[2025-09-02 07:14:43] [Rank 0] step:9561/10000 train_time:752857ms step_avg:78.74ms +[2025-09-02 07:14:43] [Rank 0] step:9561/10000 train_time:752857ms step_avg:78.74ms +[2025-09-02 07:14:45] [Rank 0] step:9581/10000 train_time:754536ms step_avg:78.75ms +[2025-09-02 07:14:45] [Rank 0] step:9581/10000 train_time:754536ms step_avg:78.75ms +[2025-09-02 07:14:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:14:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:14:58] [Rank 0] PRINT: step:9600/10000 val_loss:3.6158 svd_entropy: attn_qk:H=0.7764,top10E=0.26,eRank=178.2,q75/q25=51.15 attn_vo:H=0.8615,top10E=0.12,eRank=329.6,q75/q25=28.18 mlp_w1:H=0.9136,top10E=0.13,eRank=435.6,q75/q25=4.21 mlp_w2:H=0.9701,top10E=0.04,eRank=629.9,q75/q25=2.92 vo_prod:H=0.7683,top10E=0.21,eRank=168.5,q75/q25=475.26 train_time:756313ms step_avg:78.78ms +[2025-09-02 07:14:58] [Rank 0] PRINT: step:9600/10000 val_loss:3.6158 svd_entropy: attn_qk:H=0.7764,top10E=0.26,eRank=178.2,q75/q25=51.15 attn_vo:H=0.8615,top10E=0.12,eRank=329.6,q75/q25=28.18 mlp_w1:H=0.9136,top10E=0.13,eRank=435.6,q75/q25=4.21 mlp_w2:H=0.9701,top10E=0.04,eRank=629.9,q75/q25=2.92 vo_prod:H=0.7683,top10E=0.21,eRank=168.5,q75/q25=475.26 train_time:756313ms step_avg:78.78ms +[2025-09-02 07:14:58] [Rank 0] step:9601/10000 train_time:756326ms step_avg:78.78ms +[2025-09-02 07:14:58] [Rank 0] step:9601/10000 train_time:756326ms step_avg:78.78ms +[2025-09-02 07:15:00] [Rank 0] step:9621/10000 train_time:757936ms step_avg:78.78ms +[2025-09-02 07:15:00] [Rank 0] step:9621/10000 train_time:757936ms step_avg:78.78ms +[2025-09-02 07:15:02] [Rank 0] step:9641/10000 train_time:759618ms step_avg:78.79ms +[2025-09-02 07:15:02] [Rank 0] step:9641/10000 train_time:759618ms step_avg:78.79ms +[2025-09-02 07:15:03] [Rank 0] step:9661/10000 train_time:761326ms step_avg:78.80ms +[2025-09-02 07:15:03] [Rank 0] step:9661/10000 train_time:761326ms step_avg:78.80ms +[2025-09-02 07:15:05] [Rank 0] step:9681/10000 train_time:763022ms step_avg:78.82ms +[2025-09-02 07:15:05] [Rank 0] step:9681/10000 train_time:763022ms step_avg:78.82ms +[2025-09-02 07:15:07] [Rank 0] step:9701/10000 train_time:764735ms step_avg:78.83ms +[2025-09-02 07:15:07] [Rank 0] step:9701/10000 train_time:764735ms step_avg:78.83ms +[2025-09-02 07:15:08] [Rank 0] step:9721/10000 train_time:766428ms step_avg:78.84ms +[2025-09-02 07:15:08] [Rank 0] step:9721/10000 train_time:766428ms step_avg:78.84ms +[2025-09-02 07:15:10] [Rank 0] step:9741/10000 train_time:768147ms step_avg:78.86ms +[2025-09-02 07:15:10] [Rank 0] step:9741/10000 train_time:768147ms step_avg:78.86ms +[2025-09-02 07:15:12] [Rank 0] step:9761/10000 train_time:769853ms step_avg:78.87ms +[2025-09-02 07:15:12] [Rank 0] step:9761/10000 train_time:769853ms step_avg:78.87ms +[2025-09-02 07:15:14] [Rank 0] step:9781/10000 train_time:771558ms step_avg:78.88ms +[2025-09-02 07:15:14] [Rank 0] step:9781/10000 train_time:771558ms step_avg:78.88ms +[2025-09-02 07:15:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:15:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:15:27] [Rank 0] PRINT: step:9800/10000 val_loss:3.6092 svd_entropy: attn_qk:H=0.7764,top10E=0.26,eRank=178.2,q75/q25=51.08 attn_vo:H=0.8616,top10E=0.12,eRank=329.8,q75/q25=28.12 mlp_w1:H=0.9137,top10E=0.13,eRank=435.9,q75/q25=4.21 mlp_w2:H=0.9701,top10E=0.04,eRank=629.9,q75/q25=2.92 vo_prod:H=0.7685,top10E=0.21,eRank=168.8,q75/q25=469.37 train_time:773364ms step_avg:78.91ms +[2025-09-02 07:15:27] [Rank 0] PRINT: step:9800/10000 val_loss:3.6092 svd_entropy: attn_qk:H=0.7764,top10E=0.26,eRank=178.2,q75/q25=51.08 attn_vo:H=0.8616,top10E=0.12,eRank=329.8,q75/q25=28.12 mlp_w1:H=0.9137,top10E=0.13,eRank=435.9,q75/q25=4.21 mlp_w2:H=0.9701,top10E=0.04,eRank=629.9,q75/q25=2.92 vo_prod:H=0.7685,top10E=0.21,eRank=168.8,q75/q25=469.37 train_time:773364ms step_avg:78.91ms +[2025-09-02 07:15:27] [Rank 0] step:9801/10000 train_time:773377ms step_avg:78.91ms +[2025-09-02 07:15:27] [Rank 0] step:9801/10000 train_time:773377ms step_avg:78.91ms +[2025-09-02 07:15:29] [Rank 0] step:9821/10000 train_time:775008ms step_avg:78.91ms +[2025-09-02 07:15:29] [Rank 0] step:9821/10000 train_time:775008ms step_avg:78.91ms +[2025-09-02 07:15:31] [Rank 0] step:9841/10000 train_time:776720ms step_avg:78.93ms +[2025-09-02 07:15:31] [Rank 0] step:9841/10000 train_time:776720ms step_avg:78.93ms +[2025-09-02 07:15:32] [Rank 0] step:9861/10000 train_time:778411ms step_avg:78.94ms +[2025-09-02 07:15:32] [Rank 0] step:9861/10000 train_time:778411ms step_avg:78.94ms +[2025-09-02 07:15:34] [Rank 0] step:9881/10000 train_time:780100ms step_avg:78.95ms +[2025-09-02 07:15:34] [Rank 0] step:9881/10000 train_time:780100ms step_avg:78.95ms +[2025-09-02 07:15:36] [Rank 0] step:9901/10000 train_time:781801ms step_avg:78.96ms +[2025-09-02 07:15:36] [Rank 0] step:9901/10000 train_time:781801ms step_avg:78.96ms +[2025-09-02 07:15:37] [Rank 0] step:9921/10000 train_time:783504ms step_avg:78.97ms +[2025-09-02 07:15:37] [Rank 0] step:9921/10000 train_time:783504ms step_avg:78.97ms +[2025-09-02 07:15:39] [Rank 0] step:9941/10000 train_time:785206ms step_avg:78.99ms +[2025-09-02 07:15:39] [Rank 0] step:9941/10000 train_time:785206ms step_avg:78.99ms +[2025-09-02 07:15:41] [Rank 0] step:9961/10000 train_time:786905ms step_avg:79.00ms +[2025-09-02 07:15:41] [Rank 0] step:9961/10000 train_time:786905ms step_avg:79.00ms +[2025-09-02 07:15:42] [Rank 0] step:9981/10000 train_time:788604ms step_avg:79.01ms +[2025-09-02 07:15:42] [Rank 0] step:9981/10000 train_time:788604ms step_avg:79.01ms +[2025-09-02 07:15:44] [Rank 0] step:10000/10000 train_time:790225ms step_avg:79.02ms +[2025-09-02 07:15:44] [Rank 0] step:10000/10000 train_time:790225ms step_avg:79.02ms +[2025-09-02 07:15:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:15:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:15:56] [Rank 0] PRINT: step:10000/10000 val_loss:3.6031 svd_entropy: attn_qk:H=0.7765,top10E=0.26,eRank=178.3,q75/q25=51.07 attn_vo:H=0.8617,top10E=0.12,eRank=330.0,q75/q25=28.04 mlp_w1:H=0.9138,top10E=0.13,eRank=436.1,q75/q25=4.21 mlp_w2:H=0.9701,top10E=0.04,eRank=629.9,q75/q25=2.92 vo_prod:H=0.7687,top10E=0.21,eRank=169.0,q75/q25=468.04 train_time:790407ms step_avg:79.04ms +[2025-09-02 07:15:56] [Rank 0] PRINT: step:10000/10000 val_loss:3.6031 svd_entropy: attn_qk:H=0.7765,top10E=0.26,eRank=178.3,q75/q25=51.07 attn_vo:H=0.8617,top10E=0.12,eRank=330.0,q75/q25=28.04 mlp_w1:H=0.9138,top10E=0.13,eRank=436.1,q75/q25=4.21 mlp_w2:H=0.9701,top10E=0.04,eRank=629.9,q75/q25=2.92 vo_prod:H=0.7687,top10E=0.21,eRank=169.0,q75/q25=468.04 train_time:790407ms step_avg:79.04ms +[2025-09-02 07:15:56] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 07:15:56 2025 --- +[2025-09-02 07:15:56] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 07:15:56 2025 --- +[2025-09-02 07:15:56] [Rank 0] PRINT: Peak memory allocated: 10115 MiB reserved: 15096 MiB +[2025-09-02 07:15:56] [Rank 0] PRINT: Peak memory allocated: 10115 MiB reserved: 15096 MiB diff --git a/logs_svd_qkvo/mode_13_param_qkvo_seed_44/config.json b/logs_svd_qkvo/mode_13_param_qkvo_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..3e7e53be48c09a369b6723c540a0970e26b89e02 --- /dev/null +++ b/logs_svd_qkvo/mode_13_param_qkvo_seed_44/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 13, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "bfbac81f-8f89-4c9e-8b36-46748588cc4f", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_13_param_qkvo_seed_44/training_log_bfbac81f-8f89-4c9e-8b36-46748588cc4f.txt b/logs_svd_qkvo/mode_13_param_qkvo_seed_44/training_log_bfbac81f-8f89-4c9e-8b36-46748588cc4f.txt new file mode 100644 index 0000000000000000000000000000000000000000..46cd13a416154f05e631eba3dd3f845a29b723c0 --- /dev/null +++ b/logs_svd_qkvo/mode_13_param_qkvo_seed_44/training_log_bfbac81f-8f89-4c9e-8b36-46748588cc4f.txt @@ -0,0 +1,2984 @@ +[2025-09-02 08:04:34] [Rank 0] PRINT: --- Script Start: Tue Sep 2 08:04:34 2025 --- +[2025-09-02 08:04:34] [Rank 0] PRINT: --- Script Start: Tue Sep 2 08:04:34 2025 --- +[2025-09-02 08:04:34] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=13, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 08:04:34] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=13, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 08:04:34] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 08:04:34] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 08:04:34] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-02 08:04:34] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-02 08:04:34] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_13_param_qkvo_seed_44 +[2025-09-02 08:04:34] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_13_param_qkvo_seed_44 +[2025-09-02 08:04:34] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 08:04:34] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 08:04:34] [Rank 0] PRINT: Constructing model... +[2025-09-02 08:04:34] [Rank 0] PRINT: Constructing model... +[2025-09-02 08:04:36] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 08:04:36] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 08:04:36] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 08:04:36] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 08:04:36] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 08:04:36] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 08:04:36] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 13 +[2025-09-02 08:04:36] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 13 +[2025-09-02 08:04:36] [Rank 0] PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: 0.008). +[2025-09-02 08:04:36] [Rank 0] PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: 0.008). +[2025-09-02 08:04:36] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 08:04:36] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 08:04:36] [Rank 0] PRINT: Muon optimizer is active with 23 parameters. +[2025-09-02 08:04:36] [Rank 0] PRINT: Muon optimizer is active with 23 parameters. +[2025-09-02 08:04:36] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 08:04:36] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 08:04:36] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 08:04:36] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 08:04:36] [Rank 0] PRINT: Starting warmup... +[2025-09-02 08:04:36] [Rank 0] PRINT: Starting warmup... +[2025-09-02 08:05:18] [Rank 0] PRINT: Warmup complete. +[2025-09-02 08:05:18] [Rank 0] PRINT: Warmup complete. +[2025-09-02 08:05:18] [Rank 0] PRINT: Starting training... +[2025-09-02 08:05:18] [Rank 0] PRINT: Starting training... +[2025-09-02 08:05:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:05:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:05:35] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.6,q75/q25=10.29 attn_vo:H=0.4624,top10E=0.02,eRank=233.0,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.7,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 08:05:35] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.6,q75/q25=10.29 attn_vo:H=0.4624,top10E=0.02,eRank=233.0,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.7,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 08:05:36] [Rank 0] step:21/10000 train_time:1424ms step_avg:67.83ms +[2025-09-02 08:05:36] [Rank 0] step:21/10000 train_time:1424ms step_avg:67.83ms +[2025-09-02 08:05:38] [Rank 0] step:41/10000 train_time:2878ms step_avg:70.20ms +[2025-09-02 08:05:38] [Rank 0] step:41/10000 train_time:2878ms step_avg:70.20ms +[2025-09-02 08:05:39] [Rank 0] step:61/10000 train_time:4334ms step_avg:71.04ms +[2025-09-02 08:05:39] [Rank 0] step:61/10000 train_time:4334ms step_avg:71.04ms +[2025-09-02 08:05:41] [Rank 0] step:81/10000 train_time:5789ms step_avg:71.47ms +[2025-09-02 08:05:41] [Rank 0] step:81/10000 train_time:5789ms step_avg:71.47ms +[2025-09-02 08:05:42] [Rank 0] step:101/10000 train_time:7246ms step_avg:71.74ms +[2025-09-02 08:05:42] [Rank 0] step:101/10000 train_time:7246ms step_avg:71.74ms +[2025-09-02 08:05:44] [Rank 0] step:121/10000 train_time:8702ms step_avg:71.92ms +[2025-09-02 08:05:44] [Rank 0] step:121/10000 train_time:8702ms step_avg:71.92ms +[2025-09-02 08:05:45] [Rank 0] step:141/10000 train_time:10158ms step_avg:72.05ms +[2025-09-02 08:05:45] [Rank 0] step:141/10000 train_time:10158ms step_avg:72.05ms +[2025-09-02 08:05:46] [Rank 0] step:161/10000 train_time:11615ms step_avg:72.14ms +[2025-09-02 08:05:46] [Rank 0] step:161/10000 train_time:11615ms step_avg:72.14ms +[2025-09-02 08:05:48] [Rank 0] step:181/10000 train_time:13071ms step_avg:72.22ms +[2025-09-02 08:05:48] [Rank 0] step:181/10000 train_time:13071ms step_avg:72.22ms +[2025-09-02 08:05:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:05:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:06:01] [Rank 0] PRINT: step:200/10000 val_loss:6.2375 svd_entropy: attn_qk:H=0.6090,top10E=0.54,eRank=96.8,q75/q25=13.04 attn_vo:H=0.5181,top10E=0.57,eRank=77.3,q75/q25=inf mlp_w1:H=0.6626,top10E=0.51,eRank=96.4,q75/q25=2.95 mlp_w2:H=0.8013,top10E=0.18,eRank=209.8,q75/q25=17.15 vo_prod:H=0.3287,top10E=0.81,eRank=15.2,q75/q25=inf train_time:14603ms step_avg:73.02ms +[2025-09-02 08:06:01] [Rank 0] PRINT: step:200/10000 val_loss:6.2375 svd_entropy: attn_qk:H=0.6090,top10E=0.54,eRank=96.8,q75/q25=13.04 attn_vo:H=0.5181,top10E=0.57,eRank=77.3,q75/q25=inf mlp_w1:H=0.6626,top10E=0.51,eRank=96.4,q75/q25=2.95 mlp_w2:H=0.8013,top10E=0.18,eRank=209.8,q75/q25=17.15 vo_prod:H=0.3287,top10E=0.81,eRank=15.2,q75/q25=inf train_time:14603ms step_avg:73.02ms +[2025-09-02 08:06:01] [Rank 0] step:201/10000 train_time:14618ms step_avg:72.73ms +[2025-09-02 08:06:01] [Rank 0] step:201/10000 train_time:14618ms step_avg:72.73ms +[2025-09-02 08:06:03] [Rank 0] step:221/10000 train_time:16016ms step_avg:72.47ms +[2025-09-02 08:06:03] [Rank 0] step:221/10000 train_time:16016ms step_avg:72.47ms +[2025-09-02 08:06:04] [Rank 0] step:241/10000 train_time:17471ms step_avg:72.49ms +[2025-09-02 08:06:04] [Rank 0] step:241/10000 train_time:17471ms step_avg:72.49ms +[2025-09-02 08:06:06] [Rank 0] step:261/10000 train_time:18925ms step_avg:72.51ms +[2025-09-02 08:06:06] [Rank 0] step:261/10000 train_time:18925ms step_avg:72.51ms +[2025-09-02 08:06:07] [Rank 0] step:281/10000 train_time:20380ms step_avg:72.53ms +[2025-09-02 08:06:07] [Rank 0] step:281/10000 train_time:20380ms step_avg:72.53ms +[2025-09-02 08:06:09] [Rank 0] step:301/10000 train_time:21835ms step_avg:72.54ms +[2025-09-02 08:06:09] [Rank 0] step:301/10000 train_time:21835ms step_avg:72.54ms +[2025-09-02 08:06:10] [Rank 0] step:321/10000 train_time:23304ms step_avg:72.60ms +[2025-09-02 08:06:10] [Rank 0] step:321/10000 train_time:23304ms step_avg:72.60ms +[2025-09-02 08:06:12] [Rank 0] step:341/10000 train_time:24757ms step_avg:72.60ms +[2025-09-02 08:06:12] [Rank 0] step:341/10000 train_time:24757ms step_avg:72.60ms +[2025-09-02 08:06:13] [Rank 0] step:361/10000 train_time:26211ms step_avg:72.61ms +[2025-09-02 08:06:13] [Rank 0] step:361/10000 train_time:26211ms step_avg:72.61ms +[2025-09-02 08:06:14] [Rank 0] step:381/10000 train_time:27665ms step_avg:72.61ms +[2025-09-02 08:06:14] [Rank 0] step:381/10000 train_time:27665ms step_avg:72.61ms +[2025-09-02 08:06:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:06:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:06:28] [Rank 0] PRINT: step:400/10000 val_loss:5.7224 svd_entropy: attn_qk:H=0.6489,top10E=0.44,eRank=110.8,q75/q25=15.64 attn_vo:H=0.6003,top10E=0.41,eRank=105.8,q75/q25=inf mlp_w1:H=0.6857,top10E=0.41,eRank=115.4,q75/q25=4.58 mlp_w2:H=0.9285,top10E=0.07,eRank=478.9,q75/q25=6.50 vo_prod:H=0.4279,top10E=0.65,eRank=25.5,q75/q25=inf train_time:29194ms step_avg:72.98ms +[2025-09-02 08:06:28] [Rank 0] PRINT: step:400/10000 val_loss:5.7224 svd_entropy: attn_qk:H=0.6489,top10E=0.44,eRank=110.8,q75/q25=15.64 attn_vo:H=0.6003,top10E=0.41,eRank=105.8,q75/q25=inf mlp_w1:H=0.6857,top10E=0.41,eRank=115.4,q75/q25=4.58 mlp_w2:H=0.9285,top10E=0.07,eRank=478.9,q75/q25=6.50 vo_prod:H=0.4279,top10E=0.65,eRank=25.5,q75/q25=inf train_time:29194ms step_avg:72.98ms +[2025-09-02 08:06:28] [Rank 0] step:401/10000 train_time:29209ms step_avg:72.84ms +[2025-09-02 08:06:28] [Rank 0] step:401/10000 train_time:29209ms step_avg:72.84ms +[2025-09-02 08:06:29] [Rank 0] step:421/10000 train_time:30602ms step_avg:72.69ms +[2025-09-02 08:06:29] [Rank 0] step:421/10000 train_time:30602ms step_avg:72.69ms +[2025-09-02 08:06:31] [Rank 0] step:441/10000 train_time:32056ms step_avg:72.69ms +[2025-09-02 08:06:31] [Rank 0] step:441/10000 train_time:32056ms step_avg:72.69ms +[2025-09-02 08:06:32] [Rank 0] step:461/10000 train_time:33510ms step_avg:72.69ms +[2025-09-02 08:06:32] [Rank 0] step:461/10000 train_time:33510ms step_avg:72.69ms +[2025-09-02 08:06:34] [Rank 0] step:481/10000 train_time:34963ms step_avg:72.69ms +[2025-09-02 08:06:34] [Rank 0] step:481/10000 train_time:34963ms step_avg:72.69ms +[2025-09-02 08:06:35] [Rank 0] step:501/10000 train_time:36416ms step_avg:72.69ms +[2025-09-02 08:06:35] [Rank 0] step:501/10000 train_time:36416ms step_avg:72.69ms +[2025-09-02 08:06:37] [Rank 0] step:521/10000 train_time:37870ms step_avg:72.69ms +[2025-09-02 08:06:37] [Rank 0] step:521/10000 train_time:37870ms step_avg:72.69ms +[2025-09-02 08:06:38] [Rank 0] step:541/10000 train_time:39324ms step_avg:72.69ms +[2025-09-02 08:06:38] [Rank 0] step:541/10000 train_time:39324ms step_avg:72.69ms +[2025-09-02 08:06:39] [Rank 0] step:561/10000 train_time:40779ms step_avg:72.69ms +[2025-09-02 08:06:39] [Rank 0] step:561/10000 train_time:40779ms step_avg:72.69ms +[2025-09-02 08:06:41] [Rank 0] step:581/10000 train_time:42234ms step_avg:72.69ms +[2025-09-02 08:06:41] [Rank 0] step:581/10000 train_time:42234ms step_avg:72.69ms +[2025-09-02 08:06:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:06:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:06:54] [Rank 0] PRINT: step:600/10000 val_loss:5.4269 svd_entropy: attn_qk:H=0.6762,top10E=0.39,eRank=122.5,q75/q25=20.17 attn_vo:H=0.6478,top10E=0.34,eRank=130.3,q75/q25=inf mlp_w1:H=0.7283,top10E=0.35,eRank=145.3,q75/q25=6.36 mlp_w2:H=0.9487,top10E=0.05,eRank=546.7,q75/q25=4.50 vo_prod:H=0.4840,top10E=0.53,eRank=35.3,q75/q25=inf train_time:43762ms step_avg:72.94ms +[2025-09-02 08:06:54] [Rank 0] PRINT: step:600/10000 val_loss:5.4269 svd_entropy: attn_qk:H=0.6762,top10E=0.39,eRank=122.5,q75/q25=20.17 attn_vo:H=0.6478,top10E=0.34,eRank=130.3,q75/q25=inf mlp_w1:H=0.7283,top10E=0.35,eRank=145.3,q75/q25=6.36 mlp_w2:H=0.9487,top10E=0.05,eRank=546.7,q75/q25=4.50 vo_prod:H=0.4840,top10E=0.53,eRank=35.3,q75/q25=inf train_time:43762ms step_avg:72.94ms +[2025-09-02 08:06:54] [Rank 0] step:601/10000 train_time:43777ms step_avg:72.84ms +[2025-09-02 08:06:54] [Rank 0] step:601/10000 train_time:43777ms step_avg:72.84ms +[2025-09-02 08:06:56] [Rank 0] step:621/10000 train_time:45169ms step_avg:72.74ms +[2025-09-02 08:06:56] [Rank 0] step:621/10000 train_time:45169ms step_avg:72.74ms +[2025-09-02 08:06:57] [Rank 0] step:641/10000 train_time:46622ms step_avg:72.73ms +[2025-09-02 08:06:57] [Rank 0] step:641/10000 train_time:46622ms step_avg:72.73ms +[2025-09-02 08:06:59] [Rank 0] step:661/10000 train_time:48076ms step_avg:72.73ms +[2025-09-02 08:06:59] [Rank 0] step:661/10000 train_time:48076ms step_avg:72.73ms +[2025-09-02 08:07:00] [Rank 0] step:681/10000 train_time:49529ms step_avg:72.73ms +[2025-09-02 08:07:00] [Rank 0] step:681/10000 train_time:49529ms step_avg:72.73ms +[2025-09-02 08:07:02] [Rank 0] step:701/10000 train_time:51042ms step_avg:72.81ms +[2025-09-02 08:07:02] [Rank 0] step:701/10000 train_time:51042ms step_avg:72.81ms +[2025-09-02 08:07:03] [Rank 0] step:721/10000 train_time:52496ms step_avg:72.81ms +[2025-09-02 08:07:03] [Rank 0] step:721/10000 train_time:52496ms step_avg:72.81ms +[2025-09-02 08:07:05] [Rank 0] step:741/10000 train_time:53951ms step_avg:72.81ms +[2025-09-02 08:07:05] [Rank 0] step:741/10000 train_time:53951ms step_avg:72.81ms +[2025-09-02 08:07:06] [Rank 0] step:761/10000 train_time:55417ms step_avg:72.82ms +[2025-09-02 08:07:06] [Rank 0] step:761/10000 train_time:55417ms step_avg:72.82ms +[2025-09-02 08:07:07] [Rank 0] step:781/10000 train_time:56895ms step_avg:72.85ms +[2025-09-02 08:07:07] [Rank 0] step:781/10000 train_time:56895ms step_avg:72.85ms +[2025-09-02 08:07:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:07:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:07:21] [Rank 0] PRINT: step:800/10000 val_loss:5.1849 svd_entropy: attn_qk:H=0.6953,top10E=0.36,eRank=131.7,q75/q25=26.67 attn_vo:H=0.6794,top10E=0.29,eRank=150.9,q75/q25=inf mlp_w1:H=0.7595,top10E=0.31,eRank=172.2,q75/q25=7.20 mlp_w2:H=0.9555,top10E=0.05,eRank=571.8,q75/q25=3.96 vo_prod:H=0.5198,top10E=0.46,eRank=44.2,q75/q25=inf train_time:58439ms step_avg:73.05ms +[2025-09-02 08:07:21] [Rank 0] PRINT: step:800/10000 val_loss:5.1849 svd_entropy: attn_qk:H=0.6953,top10E=0.36,eRank=131.7,q75/q25=26.67 attn_vo:H=0.6794,top10E=0.29,eRank=150.9,q75/q25=inf mlp_w1:H=0.7595,top10E=0.31,eRank=172.2,q75/q25=7.20 mlp_w2:H=0.9555,top10E=0.05,eRank=571.8,q75/q25=3.96 vo_prod:H=0.5198,top10E=0.46,eRank=44.2,q75/q25=inf train_time:58439ms step_avg:73.05ms +[2025-09-02 08:07:21] [Rank 0] step:801/10000 train_time:58454ms step_avg:72.98ms +[2025-09-02 08:07:21] [Rank 0] step:801/10000 train_time:58454ms step_avg:72.98ms +[2025-09-02 08:07:22] [Rank 0] step:821/10000 train_time:59861ms step_avg:72.91ms +[2025-09-02 08:07:22] [Rank 0] step:821/10000 train_time:59861ms step_avg:72.91ms +[2025-09-02 08:07:24] [Rank 0] step:841/10000 train_time:61327ms step_avg:72.92ms +[2025-09-02 08:07:24] [Rank 0] step:841/10000 train_time:61327ms step_avg:72.92ms +[2025-09-02 08:07:25] [Rank 0] step:861/10000 train_time:62793ms step_avg:72.93ms +[2025-09-02 08:07:25] [Rank 0] step:861/10000 train_time:62793ms step_avg:72.93ms +[2025-09-02 08:07:27] [Rank 0] step:881/10000 train_time:64260ms step_avg:72.94ms +[2025-09-02 08:07:27] [Rank 0] step:881/10000 train_time:64260ms step_avg:72.94ms +[2025-09-02 08:07:28] [Rank 0] step:901/10000 train_time:65727ms step_avg:72.95ms +[2025-09-02 08:07:28] [Rank 0] step:901/10000 train_time:65727ms step_avg:72.95ms +[2025-09-02 08:07:30] [Rank 0] step:921/10000 train_time:67195ms step_avg:72.96ms +[2025-09-02 08:07:30] [Rank 0] step:921/10000 train_time:67195ms step_avg:72.96ms +[2025-09-02 08:07:31] [Rank 0] step:941/10000 train_time:68662ms step_avg:72.97ms +[2025-09-02 08:07:31] [Rank 0] step:941/10000 train_time:68662ms step_avg:72.97ms +[2025-09-02 08:07:33] [Rank 0] step:961/10000 train_time:70131ms step_avg:72.98ms +[2025-09-02 08:07:33] [Rank 0] step:961/10000 train_time:70131ms step_avg:72.98ms +[2025-09-02 08:07:34] [Rank 0] step:981/10000 train_time:71598ms step_avg:72.98ms +[2025-09-02 08:07:34] [Rank 0] step:981/10000 train_time:71598ms step_avg:72.98ms +[2025-09-02 08:07:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:07:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:07:47] [Rank 0] PRINT: step:1000/10000 val_loss:4.9942 svd_entropy: attn_qk:H=0.7097,top10E=0.33,eRank=139.6,q75/q25=33.00 attn_vo:H=0.7017,top10E=0.26,eRank=168.4,q75/q25=inf mlp_w1:H=0.7845,top10E=0.28,eRank=197.9,q75/q25=7.45 mlp_w2:H=0.9601,top10E=0.04,eRank=589.4,q75/q25=3.61 vo_prod:H=0.5435,top10E=0.42,eRank=51.3,q75/q25=inf train_time:73140ms step_avg:73.14ms +[2025-09-02 08:07:47] [Rank 0] PRINT: step:1000/10000 val_loss:4.9942 svd_entropy: attn_qk:H=0.7097,top10E=0.33,eRank=139.6,q75/q25=33.00 attn_vo:H=0.7017,top10E=0.26,eRank=168.4,q75/q25=inf mlp_w1:H=0.7845,top10E=0.28,eRank=197.9,q75/q25=7.45 mlp_w2:H=0.9601,top10E=0.04,eRank=589.4,q75/q25=3.61 vo_prod:H=0.5435,top10E=0.42,eRank=51.3,q75/q25=inf train_time:73140ms step_avg:73.14ms +[2025-09-02 08:07:48] [Rank 0] step:1001/10000 train_time:73156ms step_avg:73.08ms +[2025-09-02 08:07:48] [Rank 0] step:1001/10000 train_time:73156ms step_avg:73.08ms +[2025-09-02 08:07:49] [Rank 0] step:1021/10000 train_time:74565ms step_avg:73.03ms +[2025-09-02 08:07:49] [Rank 0] step:1021/10000 train_time:74565ms step_avg:73.03ms +[2025-09-02 08:07:50] [Rank 0] step:1041/10000 train_time:76032ms step_avg:73.04ms +[2025-09-02 08:07:50] [Rank 0] step:1041/10000 train_time:76032ms step_avg:73.04ms +[2025-09-02 08:07:52] [Rank 0] step:1061/10000 train_time:77500ms step_avg:73.04ms +[2025-09-02 08:07:52] [Rank 0] step:1061/10000 train_time:77500ms step_avg:73.04ms +[2025-09-02 08:07:53] [Rank 0] step:1081/10000 train_time:78973ms step_avg:73.06ms +[2025-09-02 08:07:53] [Rank 0] step:1081/10000 train_time:78973ms step_avg:73.06ms +[2025-09-02 08:07:55] [Rank 0] step:1101/10000 train_time:80439ms step_avg:73.06ms +[2025-09-02 08:07:55] [Rank 0] step:1101/10000 train_time:80439ms step_avg:73.06ms +[2025-09-02 08:07:56] [Rank 0] step:1121/10000 train_time:81907ms step_avg:73.07ms +[2025-09-02 08:07:56] [Rank 0] step:1121/10000 train_time:81907ms step_avg:73.07ms +[2025-09-02 08:07:58] [Rank 0] step:1141/10000 train_time:83374ms step_avg:73.07ms +[2025-09-02 08:07:58] [Rank 0] step:1141/10000 train_time:83374ms step_avg:73.07ms +[2025-09-02 08:07:59] [Rank 0] step:1161/10000 train_time:84843ms step_avg:73.08ms +[2025-09-02 08:07:59] [Rank 0] step:1161/10000 train_time:84843ms step_avg:73.08ms +[2025-09-02 08:08:01] [Rank 0] step:1181/10000 train_time:86311ms step_avg:73.08ms +[2025-09-02 08:08:01] [Rank 0] step:1181/10000 train_time:86311ms step_avg:73.08ms +[2025-09-02 08:08:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:08:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:08:14] [Rank 0] PRINT: step:1200/10000 val_loss:4.8173 svd_entropy: attn_qk:H=0.7205,top10E=0.32,eRank=146.4,q75/q25=39.21 attn_vo:H=0.7196,top10E=0.24,eRank=184.2,q75/q25=inf mlp_w1:H=0.8046,top10E=0.26,eRank=222.3,q75/q25=7.36 mlp_w2:H=0.9631,top10E=0.04,eRank=601.2,q75/q25=3.39 vo_prod:H=0.5627,top10E=0.38,eRank=58.3,q75/q25=inf train_time:87854ms step_avg:73.21ms +[2025-09-02 08:08:14] [Rank 0] PRINT: step:1200/10000 val_loss:4.8173 svd_entropy: attn_qk:H=0.7205,top10E=0.32,eRank=146.4,q75/q25=39.21 attn_vo:H=0.7196,top10E=0.24,eRank=184.2,q75/q25=inf mlp_w1:H=0.8046,top10E=0.26,eRank=222.3,q75/q25=7.36 mlp_w2:H=0.9631,top10E=0.04,eRank=601.2,q75/q25=3.39 vo_prod:H=0.5627,top10E=0.38,eRank=58.3,q75/q25=inf train_time:87854ms step_avg:73.21ms +[2025-09-02 08:08:14] [Rank 0] step:1201/10000 train_time:87870ms step_avg:73.16ms +[2025-09-02 08:08:14] [Rank 0] step:1201/10000 train_time:87870ms step_avg:73.16ms +[2025-09-02 08:08:15] [Rank 0] step:1221/10000 train_time:89279ms step_avg:73.12ms +[2025-09-02 08:08:15] [Rank 0] step:1221/10000 train_time:89279ms step_avg:73.12ms +[2025-09-02 08:08:17] [Rank 0] step:1241/10000 train_time:90746ms step_avg:73.12ms +[2025-09-02 08:08:17] [Rank 0] step:1241/10000 train_time:90746ms step_avg:73.12ms +[2025-09-02 08:08:18] [Rank 0] step:1261/10000 train_time:92213ms step_avg:73.13ms +[2025-09-02 08:08:18] [Rank 0] step:1261/10000 train_time:92213ms step_avg:73.13ms +[2025-09-02 08:08:20] [Rank 0] step:1281/10000 train_time:93680ms step_avg:73.13ms +[2025-09-02 08:08:20] [Rank 0] step:1281/10000 train_time:93680ms step_avg:73.13ms +[2025-09-02 08:08:21] [Rank 0] step:1301/10000 train_time:95149ms step_avg:73.14ms +[2025-09-02 08:08:21] [Rank 0] step:1301/10000 train_time:95149ms step_avg:73.14ms +[2025-09-02 08:08:23] [Rank 0] step:1321/10000 train_time:96616ms step_avg:73.14ms +[2025-09-02 08:08:23] [Rank 0] step:1321/10000 train_time:96616ms step_avg:73.14ms +[2025-09-02 08:08:24] [Rank 0] step:1341/10000 train_time:98085ms step_avg:73.14ms +[2025-09-02 08:08:24] [Rank 0] step:1341/10000 train_time:98085ms step_avg:73.14ms +[2025-09-02 08:08:26] [Rank 0] step:1361/10000 train_time:99552ms step_avg:73.15ms +[2025-09-02 08:08:26] [Rank 0] step:1361/10000 train_time:99552ms step_avg:73.15ms +[2025-09-02 08:08:27] [Rank 0] step:1381/10000 train_time:101022ms step_avg:73.15ms +[2025-09-02 08:08:27] [Rank 0] step:1381/10000 train_time:101022ms step_avg:73.15ms +[2025-09-02 08:08:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:08:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:08:40] [Rank 0] PRINT: step:1400/10000 val_loss:4.7070 svd_entropy: attn_qk:H=0.7291,top10E=0.31,eRank=152.1,q75/q25=44.49 attn_vo:H=0.7339,top10E=0.22,eRank=198.1,q75/q25=inf mlp_w1:H=0.8201,top10E=0.24,eRank=243.5,q75/q25=7.17 mlp_w2:H=0.9652,top10E=0.04,eRank=609.5,q75/q25=3.26 vo_prod:H=0.5781,top10E=0.36,eRank=64.7,q75/q25=inf train_time:102565ms step_avg:73.26ms +[2025-09-02 08:08:40] [Rank 0] PRINT: step:1400/10000 val_loss:4.7070 svd_entropy: attn_qk:H=0.7291,top10E=0.31,eRank=152.1,q75/q25=44.49 attn_vo:H=0.7339,top10E=0.22,eRank=198.1,q75/q25=inf mlp_w1:H=0.8201,top10E=0.24,eRank=243.5,q75/q25=7.17 mlp_w2:H=0.9652,top10E=0.04,eRank=609.5,q75/q25=3.26 vo_prod:H=0.5781,top10E=0.36,eRank=64.7,q75/q25=inf train_time:102565ms step_avg:73.26ms +[2025-09-02 08:08:41] [Rank 0] step:1401/10000 train_time:102581ms step_avg:73.22ms +[2025-09-02 08:08:41] [Rank 0] step:1401/10000 train_time:102581ms step_avg:73.22ms +[2025-09-02 08:08:42] [Rank 0] step:1421/10000 train_time:103988ms step_avg:73.18ms +[2025-09-02 08:08:42] [Rank 0] step:1421/10000 train_time:103988ms step_avg:73.18ms +[2025-09-02 08:08:43] [Rank 0] step:1441/10000 train_time:105454ms step_avg:73.18ms +[2025-09-02 08:08:43] [Rank 0] step:1441/10000 train_time:105454ms step_avg:73.18ms +[2025-09-02 08:08:45] [Rank 0] step:1461/10000 train_time:106922ms step_avg:73.18ms +[2025-09-02 08:08:45] [Rank 0] step:1461/10000 train_time:106922ms step_avg:73.18ms +[2025-09-02 08:08:46] [Rank 0] step:1481/10000 train_time:108389ms step_avg:73.19ms +[2025-09-02 08:08:46] [Rank 0] step:1481/10000 train_time:108389ms step_avg:73.19ms +[2025-09-02 08:08:48] [Rank 0] step:1501/10000 train_time:109867ms step_avg:73.20ms +[2025-09-02 08:08:48] [Rank 0] step:1501/10000 train_time:109867ms step_avg:73.20ms +[2025-09-02 08:08:49] [Rank 0] step:1521/10000 train_time:111345ms step_avg:73.21ms +[2025-09-02 08:08:49] [Rank 0] step:1521/10000 train_time:111345ms step_avg:73.21ms +[2025-09-02 08:08:51] [Rank 0] step:1541/10000 train_time:112825ms step_avg:73.22ms +[2025-09-02 08:08:51] [Rank 0] step:1541/10000 train_time:112825ms step_avg:73.22ms +[2025-09-02 08:08:52] [Rank 0] step:1561/10000 train_time:114303ms step_avg:73.22ms +[2025-09-02 08:08:52] [Rank 0] step:1561/10000 train_time:114303ms step_avg:73.22ms +[2025-09-02 08:08:54] [Rank 0] step:1581/10000 train_time:115782ms step_avg:73.23ms +[2025-09-02 08:08:54] [Rank 0] step:1581/10000 train_time:115782ms step_avg:73.23ms +[2025-09-02 08:08:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:08:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:09:07] [Rank 0] PRINT: step:1600/10000 val_loss:4.5814 svd_entropy: attn_qk:H=0.7357,top10E=0.30,eRank=156.6,q75/q25=48.67 attn_vo:H=0.7453,top10E=0.21,eRank=209.9,q75/q25=inf mlp_w1:H=0.8325,top10E=0.22,eRank=262.1,q75/q25=6.96 mlp_w2:H=0.9667,top10E=0.04,eRank=615.6,q75/q25=3.16 vo_prod:H=0.5912,top10E=0.33,eRank=70.7,q75/q25=inf train_time:117338ms step_avg:73.34ms +[2025-09-02 08:09:07] [Rank 0] PRINT: step:1600/10000 val_loss:4.5814 svd_entropy: attn_qk:H=0.7357,top10E=0.30,eRank=156.6,q75/q25=48.67 attn_vo:H=0.7453,top10E=0.21,eRank=209.9,q75/q25=inf mlp_w1:H=0.8325,top10E=0.22,eRank=262.1,q75/q25=6.96 mlp_w2:H=0.9667,top10E=0.04,eRank=615.6,q75/q25=3.16 vo_prod:H=0.5912,top10E=0.33,eRank=70.7,q75/q25=inf train_time:117338ms step_avg:73.34ms +[2025-09-02 08:09:07] [Rank 0] step:1601/10000 train_time:117353ms step_avg:73.30ms +[2025-09-02 08:09:07] [Rank 0] step:1601/10000 train_time:117353ms step_avg:73.30ms +[2025-09-02 08:09:09] [Rank 0] step:1621/10000 train_time:118817ms step_avg:73.30ms +[2025-09-02 08:09:09] [Rank 0] step:1621/10000 train_time:118817ms step_avg:73.30ms +[2025-09-02 08:09:10] [Rank 0] step:1641/10000 train_time:120295ms step_avg:73.31ms +[2025-09-02 08:09:10] [Rank 0] step:1641/10000 train_time:120295ms step_avg:73.31ms +[2025-09-02 08:09:12] [Rank 0] step:1661/10000 train_time:121775ms step_avg:73.31ms +[2025-09-02 08:09:12] [Rank 0] step:1661/10000 train_time:121775ms step_avg:73.31ms +[2025-09-02 08:09:13] [Rank 0] step:1681/10000 train_time:123254ms step_avg:73.32ms +[2025-09-02 08:09:13] [Rank 0] step:1681/10000 train_time:123254ms step_avg:73.32ms +[2025-09-02 08:09:14] [Rank 0] step:1701/10000 train_time:124732ms step_avg:73.33ms +[2025-09-02 08:09:14] [Rank 0] step:1701/10000 train_time:124732ms step_avg:73.33ms +[2025-09-02 08:09:16] [Rank 0] step:1721/10000 train_time:126211ms step_avg:73.34ms +[2025-09-02 08:09:16] [Rank 0] step:1721/10000 train_time:126211ms step_avg:73.34ms +[2025-09-02 08:09:17] [Rank 0] step:1741/10000 train_time:127691ms step_avg:73.34ms +[2025-09-02 08:09:17] [Rank 0] step:1741/10000 train_time:127691ms step_avg:73.34ms +[2025-09-02 08:09:19] [Rank 0] step:1761/10000 train_time:129171ms step_avg:73.35ms +[2025-09-02 08:09:19] [Rank 0] step:1761/10000 train_time:129171ms step_avg:73.35ms +[2025-09-02 08:09:20] [Rank 0] step:1781/10000 train_time:130651ms step_avg:73.36ms +[2025-09-02 08:09:20] [Rank 0] step:1781/10000 train_time:130651ms step_avg:73.36ms +[2025-09-02 08:09:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:09:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:09:33] [Rank 0] PRINT: step:1800/10000 val_loss:4.4889 svd_entropy: attn_qk:H=0.7415,top10E=0.29,eRank=160.8,q75/q25=51.53 attn_vo:H=0.7546,top10E=0.20,eRank=220.3,q75/q25=inf mlp_w1:H=0.8425,top10E=0.21,eRank=278.4,q75/q25=6.69 mlp_w2:H=0.9677,top10E=0.04,eRank=619.9,q75/q25=3.10 vo_prod:H=0.6018,top10E=0.32,eRank=76.1,q75/q25=inf train_time:132209ms step_avg:73.45ms +[2025-09-02 08:09:33] [Rank 0] PRINT: step:1800/10000 val_loss:4.4889 svd_entropy: attn_qk:H=0.7415,top10E=0.29,eRank=160.8,q75/q25=51.53 attn_vo:H=0.7546,top10E=0.20,eRank=220.3,q75/q25=inf mlp_w1:H=0.8425,top10E=0.21,eRank=278.4,q75/q25=6.69 mlp_w2:H=0.9677,top10E=0.04,eRank=619.9,q75/q25=3.10 vo_prod:H=0.6018,top10E=0.32,eRank=76.1,q75/q25=inf train_time:132209ms step_avg:73.45ms +[2025-09-02 08:09:34] [Rank 0] step:1801/10000 train_time:132224ms step_avg:73.42ms +[2025-09-02 08:09:34] [Rank 0] step:1801/10000 train_time:132224ms step_avg:73.42ms +[2025-09-02 08:09:35] [Rank 0] step:1821/10000 train_time:133634ms step_avg:73.38ms +[2025-09-02 08:09:35] [Rank 0] step:1821/10000 train_time:133634ms step_avg:73.38ms +[2025-09-02 08:09:37] [Rank 0] step:1841/10000 train_time:135111ms step_avg:73.39ms +[2025-09-02 08:09:37] [Rank 0] step:1841/10000 train_time:135111ms step_avg:73.39ms +[2025-09-02 08:09:38] [Rank 0] step:1861/10000 train_time:136590ms step_avg:73.40ms +[2025-09-02 08:09:38] [Rank 0] step:1861/10000 train_time:136590ms step_avg:73.40ms +[2025-09-02 08:09:39] [Rank 0] step:1881/10000 train_time:138069ms step_avg:73.40ms +[2025-09-02 08:09:39] [Rank 0] step:1881/10000 train_time:138069ms step_avg:73.40ms +[2025-09-02 08:09:41] [Rank 0] step:1901/10000 train_time:139549ms step_avg:73.41ms +[2025-09-02 08:09:41] [Rank 0] step:1901/10000 train_time:139549ms step_avg:73.41ms +[2025-09-02 08:09:42] [Rank 0] step:1921/10000 train_time:141028ms step_avg:73.41ms +[2025-09-02 08:09:42] [Rank 0] step:1921/10000 train_time:141028ms step_avg:73.41ms +[2025-09-02 08:09:44] [Rank 0] step:1941/10000 train_time:142509ms step_avg:73.42ms +[2025-09-02 08:09:44] [Rank 0] step:1941/10000 train_time:142509ms step_avg:73.42ms +[2025-09-02 08:09:45] [Rank 0] step:1961/10000 train_time:143988ms step_avg:73.43ms +[2025-09-02 08:09:45] [Rank 0] step:1961/10000 train_time:143988ms step_avg:73.43ms +[2025-09-02 08:09:47] [Rank 0] step:1981/10000 train_time:145468ms step_avg:73.43ms +[2025-09-02 08:09:47] [Rank 0] step:1981/10000 train_time:145468ms step_avg:73.43ms +[2025-09-02 08:09:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:09:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:10:00] [Rank 0] PRINT: step:2000/10000 val_loss:4.4314 svd_entropy: attn_qk:H=0.7464,top10E=0.28,eRank=164.5,q75/q25=53.97 attn_vo:H=0.7623,top10E=0.19,eRank=229.2,q75/q25=inf mlp_w1:H=0.8505,top10E=0.20,eRank=292.5,q75/q25=6.45 mlp_w2:H=0.9685,top10E=0.04,eRank=623.1,q75/q25=3.04 vo_prod:H=0.6109,top10E=0.30,eRank=81.2,q75/q25=inf train_time:147023ms step_avg:73.51ms +[2025-09-02 08:10:00] [Rank 0] PRINT: step:2000/10000 val_loss:4.4314 svd_entropy: attn_qk:H=0.7464,top10E=0.28,eRank=164.5,q75/q25=53.97 attn_vo:H=0.7623,top10E=0.19,eRank=229.2,q75/q25=inf mlp_w1:H=0.8505,top10E=0.20,eRank=292.5,q75/q25=6.45 mlp_w2:H=0.9685,top10E=0.04,eRank=623.1,q75/q25=3.04 vo_prod:H=0.6109,top10E=0.30,eRank=81.2,q75/q25=inf train_time:147023ms step_avg:73.51ms +[2025-09-02 08:10:00] [Rank 0] step:2001/10000 train_time:147039ms step_avg:73.48ms +[2025-09-02 08:10:00] [Rank 0] step:2001/10000 train_time:147039ms step_avg:73.48ms +[2025-09-02 08:10:01] [Rank 0] step:2021/10000 train_time:148467ms step_avg:73.46ms +[2025-09-02 08:10:01] [Rank 0] step:2021/10000 train_time:148467ms step_avg:73.46ms +[2025-09-02 08:10:03] [Rank 0] step:2041/10000 train_time:150127ms step_avg:73.56ms +[2025-09-02 08:10:03] [Rank 0] step:2041/10000 train_time:150127ms step_avg:73.56ms +[2025-09-02 08:10:05] [Rank 0] step:2061/10000 train_time:151604ms step_avg:73.56ms +[2025-09-02 08:10:05] [Rank 0] step:2061/10000 train_time:151604ms step_avg:73.56ms +[2025-09-02 08:10:06] [Rank 0] step:2081/10000 train_time:153081ms step_avg:73.56ms +[2025-09-02 08:10:06] [Rank 0] step:2081/10000 train_time:153081ms step_avg:73.56ms +[2025-09-02 08:10:07] [Rank 0] step:2101/10000 train_time:154560ms step_avg:73.57ms +[2025-09-02 08:10:07] [Rank 0] step:2101/10000 train_time:154560ms step_avg:73.57ms +[2025-09-02 08:10:09] [Rank 0] step:2121/10000 train_time:156038ms step_avg:73.57ms +[2025-09-02 08:10:09] [Rank 0] step:2121/10000 train_time:156038ms step_avg:73.57ms +[2025-09-02 08:10:10] [Rank 0] step:2141/10000 train_time:157518ms step_avg:73.57ms +[2025-09-02 08:10:10] [Rank 0] step:2141/10000 train_time:157518ms step_avg:73.57ms +[2025-09-02 08:10:12] [Rank 0] step:2161/10000 train_time:159049ms step_avg:73.60ms +[2025-09-02 08:10:12] [Rank 0] step:2161/10000 train_time:159049ms step_avg:73.60ms +[2025-09-02 08:10:13] [Rank 0] step:2181/10000 train_time:160529ms step_avg:73.60ms +[2025-09-02 08:10:13] [Rank 0] step:2181/10000 train_time:160529ms step_avg:73.60ms +[2025-09-02 08:10:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:10:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:10:27] [Rank 0] PRINT: step:2200/10000 val_loss:4.3662 svd_entropy: attn_qk:H=0.7506,top10E=0.28,eRank=167.8,q75/q25=55.69 attn_vo:H=0.7686,top10E=0.18,eRank=236.7,q75/q25=inf mlp_w1:H=0.8571,top10E=0.20,eRank=304.4,q75/q25=6.24 mlp_w2:H=0.9691,top10E=0.04,eRank=625.6,q75/q25=3.01 vo_prod:H=0.6184,top10E=0.29,eRank=85.8,q75/q25=inf train_time:162084ms step_avg:73.67ms +[2025-09-02 08:10:27] [Rank 0] PRINT: step:2200/10000 val_loss:4.3662 svd_entropy: attn_qk:H=0.7506,top10E=0.28,eRank=167.8,q75/q25=55.69 attn_vo:H=0.7686,top10E=0.18,eRank=236.7,q75/q25=inf mlp_w1:H=0.8571,top10E=0.20,eRank=304.4,q75/q25=6.24 mlp_w2:H=0.9691,top10E=0.04,eRank=625.6,q75/q25=3.01 vo_prod:H=0.6184,top10E=0.29,eRank=85.8,q75/q25=inf train_time:162084ms step_avg:73.67ms +[2025-09-02 08:10:27] [Rank 0] step:2201/10000 train_time:162099ms step_avg:73.65ms +[2025-09-02 08:10:27] [Rank 0] step:2201/10000 train_time:162099ms step_avg:73.65ms +[2025-09-02 08:10:28] [Rank 0] step:2221/10000 train_time:163502ms step_avg:73.62ms +[2025-09-02 08:10:28] [Rank 0] step:2221/10000 train_time:163502ms step_avg:73.62ms +[2025-09-02 08:10:30] [Rank 0] step:2241/10000 train_time:165015ms step_avg:73.63ms +[2025-09-02 08:10:30] [Rank 0] step:2241/10000 train_time:165015ms step_avg:73.63ms +[2025-09-02 08:10:31] [Rank 0] step:2261/10000 train_time:166536ms step_avg:73.66ms +[2025-09-02 08:10:31] [Rank 0] step:2261/10000 train_time:166536ms step_avg:73.66ms +[2025-09-02 08:10:33] [Rank 0] step:2281/10000 train_time:168059ms step_avg:73.68ms +[2025-09-02 08:10:33] [Rank 0] step:2281/10000 train_time:168059ms step_avg:73.68ms +[2025-09-02 08:10:34] [Rank 0] step:2301/10000 train_time:169581ms step_avg:73.70ms +[2025-09-02 08:10:34] [Rank 0] step:2301/10000 train_time:169581ms step_avg:73.70ms +[2025-09-02 08:10:36] [Rank 0] step:2321/10000 train_time:171104ms step_avg:73.72ms +[2025-09-02 08:10:36] [Rank 0] step:2321/10000 train_time:171104ms step_avg:73.72ms +[2025-09-02 08:10:37] [Rank 0] step:2341/10000 train_time:172627ms step_avg:73.74ms +[2025-09-02 08:10:37] [Rank 0] step:2341/10000 train_time:172627ms step_avg:73.74ms +[2025-09-02 08:10:39] [Rank 0] step:2361/10000 train_time:174151ms step_avg:73.76ms +[2025-09-02 08:10:39] [Rank 0] step:2361/10000 train_time:174151ms step_avg:73.76ms +[2025-09-02 08:10:40] [Rank 0] step:2381/10000 train_time:175676ms step_avg:73.78ms +[2025-09-02 08:10:40] [Rank 0] step:2381/10000 train_time:175676ms step_avg:73.78ms +[2025-09-02 08:10:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:10:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:10:53] [Rank 0] PRINT: step:2400/10000 val_loss:4.2921 svd_entropy: attn_qk:H=0.7533,top10E=0.28,eRank=169.8,q75/q25=56.09 attn_vo:H=0.7740,top10E=0.17,eRank=243.3,q75/q25=inf mlp_w1:H=0.8628,top10E=0.19,eRank=315.4,q75/q25=6.06 mlp_w2:H=0.9695,top10E=0.04,eRank=627.4,q75/q25=2.98 vo_prod:H=0.6252,top10E=0.28,eRank=90.2,q75/q25=inf train_time:177278ms step_avg:73.87ms +[2025-09-02 08:10:53] [Rank 0] PRINT: step:2400/10000 val_loss:4.2921 svd_entropy: attn_qk:H=0.7533,top10E=0.28,eRank=169.8,q75/q25=56.09 attn_vo:H=0.7740,top10E=0.17,eRank=243.3,q75/q25=inf mlp_w1:H=0.8628,top10E=0.19,eRank=315.4,q75/q25=6.06 mlp_w2:H=0.9695,top10E=0.04,eRank=627.4,q75/q25=2.98 vo_prod:H=0.6252,top10E=0.28,eRank=90.2,q75/q25=inf train_time:177278ms step_avg:73.87ms +[2025-09-02 08:10:54] [Rank 0] step:2401/10000 train_time:177293ms step_avg:73.84ms +[2025-09-02 08:10:54] [Rank 0] step:2401/10000 train_time:177293ms step_avg:73.84ms +[2025-09-02 08:10:55] [Rank 0] step:2421/10000 train_time:178760ms step_avg:73.84ms +[2025-09-02 08:10:55] [Rank 0] step:2421/10000 train_time:178760ms step_avg:73.84ms +[2025-09-02 08:10:57] [Rank 0] step:2441/10000 train_time:180284ms step_avg:73.86ms +[2025-09-02 08:10:57] [Rank 0] step:2441/10000 train_time:180284ms step_avg:73.86ms +[2025-09-02 08:10:58] [Rank 0] step:2461/10000 train_time:181807ms step_avg:73.88ms +[2025-09-02 08:10:58] [Rank 0] step:2461/10000 train_time:181807ms step_avg:73.88ms +[2025-09-02 08:11:00] [Rank 0] step:2481/10000 train_time:183330ms step_avg:73.89ms +[2025-09-02 08:11:00] [Rank 0] step:2481/10000 train_time:183330ms step_avg:73.89ms +[2025-09-02 08:11:01] [Rank 0] step:2501/10000 train_time:184857ms step_avg:73.91ms +[2025-09-02 08:11:01] [Rank 0] step:2501/10000 train_time:184857ms step_avg:73.91ms +[2025-09-02 08:11:03] [Rank 0] step:2521/10000 train_time:186384ms step_avg:73.93ms +[2025-09-02 08:11:03] [Rank 0] step:2521/10000 train_time:186384ms step_avg:73.93ms +[2025-09-02 08:11:04] [Rank 0] step:2541/10000 train_time:187910ms step_avg:73.95ms +[2025-09-02 08:11:04] [Rank 0] step:2541/10000 train_time:187910ms step_avg:73.95ms +[2025-09-02 08:11:06] [Rank 0] step:2561/10000 train_time:189436ms step_avg:73.97ms +[2025-09-02 08:11:06] [Rank 0] step:2561/10000 train_time:189436ms step_avg:73.97ms +[2025-09-02 08:11:07] [Rank 0] step:2581/10000 train_time:190962ms step_avg:73.99ms +[2025-09-02 08:11:07] [Rank 0] step:2581/10000 train_time:190962ms step_avg:73.99ms +[2025-09-02 08:11:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:11:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:11:21] [Rank 0] PRINT: step:2600/10000 val_loss:4.2470 svd_entropy: attn_qk:H=0.7563,top10E=0.27,eRank=172.4,q75/q25=56.95 attn_vo:H=0.7788,top10E=0.17,eRank=249.4,q75/q25=inf mlp_w1:H=0.8676,top10E=0.18,eRank=325.1,q75/q25=5.88 mlp_w2:H=0.9699,top10E=0.04,eRank=628.8,q75/q25=2.96 vo_prod:H=0.6315,top10E=0.27,eRank=94.5,q75/q25=inf train_time:192568ms step_avg:74.06ms +[2025-09-02 08:11:21] [Rank 0] PRINT: step:2600/10000 val_loss:4.2470 svd_entropy: attn_qk:H=0.7563,top10E=0.27,eRank=172.4,q75/q25=56.95 attn_vo:H=0.7788,top10E=0.17,eRank=249.4,q75/q25=inf mlp_w1:H=0.8676,top10E=0.18,eRank=325.1,q75/q25=5.88 mlp_w2:H=0.9699,top10E=0.04,eRank=628.8,q75/q25=2.96 vo_prod:H=0.6315,top10E=0.27,eRank=94.5,q75/q25=inf train_time:192568ms step_avg:74.06ms +[2025-09-02 08:11:21] [Rank 0] step:2601/10000 train_time:192583ms step_avg:74.04ms +[2025-09-02 08:11:21] [Rank 0] step:2601/10000 train_time:192583ms step_avg:74.04ms +[2025-09-02 08:11:22] [Rank 0] step:2621/10000 train_time:194048ms step_avg:74.04ms +[2025-09-02 08:11:22] [Rank 0] step:2621/10000 train_time:194048ms step_avg:74.04ms +[2025-09-02 08:11:24] [Rank 0] step:2641/10000 train_time:195570ms step_avg:74.05ms +[2025-09-02 08:11:24] [Rank 0] step:2641/10000 train_time:195570ms step_avg:74.05ms +[2025-09-02 08:11:25] [Rank 0] step:2661/10000 train_time:197094ms step_avg:74.07ms +[2025-09-02 08:11:25] [Rank 0] step:2661/10000 train_time:197094ms step_avg:74.07ms +[2025-09-02 08:11:27] [Rank 0] step:2681/10000 train_time:198616ms step_avg:74.08ms +[2025-09-02 08:11:27] [Rank 0] step:2681/10000 train_time:198616ms step_avg:74.08ms +[2025-09-02 08:11:28] [Rank 0] step:2701/10000 train_time:200141ms step_avg:74.10ms +[2025-09-02 08:11:28] [Rank 0] step:2701/10000 train_time:200141ms step_avg:74.10ms +[2025-09-02 08:11:30] [Rank 0] step:2721/10000 train_time:201664ms step_avg:74.11ms +[2025-09-02 08:11:30] [Rank 0] step:2721/10000 train_time:201664ms step_avg:74.11ms +[2025-09-02 08:11:31] [Rank 0] step:2741/10000 train_time:203186ms step_avg:74.13ms +[2025-09-02 08:11:31] [Rank 0] step:2741/10000 train_time:203186ms step_avg:74.13ms +[2025-09-02 08:11:33] [Rank 0] step:2761/10000 train_time:204710ms step_avg:74.14ms +[2025-09-02 08:11:33] [Rank 0] step:2761/10000 train_time:204710ms step_avg:74.14ms +[2025-09-02 08:11:34] [Rank 0] step:2781/10000 train_time:206233ms step_avg:74.16ms +[2025-09-02 08:11:34] [Rank 0] step:2781/10000 train_time:206233ms step_avg:74.16ms +[2025-09-02 08:11:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:11:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:11:48] [Rank 0] PRINT: step:2800/10000 val_loss:4.2078 svd_entropy: attn_qk:H=0.7592,top10E=0.27,eRank=174.8,q75/q25=57.47 attn_vo:H=0.7830,top10E=0.16,eRank=254.9,q75/q25=inf mlp_w1:H=0.8719,top10E=0.18,eRank=333.9,q75/q25=5.72 mlp_w2:H=0.9701,top10E=0.04,eRank=629.8,q75/q25=2.94 vo_prod:H=0.6371,top10E=0.26,eRank=98.4,q75/q25=inf train_time:207835ms step_avg:74.23ms +[2025-09-02 08:11:48] [Rank 0] PRINT: step:2800/10000 val_loss:4.2078 svd_entropy: attn_qk:H=0.7592,top10E=0.27,eRank=174.8,q75/q25=57.47 attn_vo:H=0.7830,top10E=0.16,eRank=254.9,q75/q25=inf mlp_w1:H=0.8719,top10E=0.18,eRank=333.9,q75/q25=5.72 mlp_w2:H=0.9701,top10E=0.04,eRank=629.8,q75/q25=2.94 vo_prod:H=0.6371,top10E=0.26,eRank=98.4,q75/q25=inf train_time:207835ms step_avg:74.23ms +[2025-09-02 08:11:48] [Rank 0] step:2801/10000 train_time:207850ms step_avg:74.21ms +[2025-09-02 08:11:48] [Rank 0] step:2801/10000 train_time:207850ms step_avg:74.21ms +[2025-09-02 08:11:49] [Rank 0] step:2821/10000 train_time:209295ms step_avg:74.19ms +[2025-09-02 08:11:49] [Rank 0] step:2821/10000 train_time:209295ms step_avg:74.19ms +[2025-09-02 08:11:51] [Rank 0] step:2841/10000 train_time:210818ms step_avg:74.21ms +[2025-09-02 08:11:51] [Rank 0] step:2841/10000 train_time:210818ms step_avg:74.21ms +[2025-09-02 08:11:52] [Rank 0] step:2861/10000 train_time:212340ms step_avg:74.22ms +[2025-09-02 08:11:52] [Rank 0] step:2861/10000 train_time:212340ms step_avg:74.22ms +[2025-09-02 08:11:54] [Rank 0] step:2881/10000 train_time:213863ms step_avg:74.23ms +[2025-09-02 08:11:54] [Rank 0] step:2881/10000 train_time:213863ms step_avg:74.23ms +[2025-09-02 08:11:55] [Rank 0] step:2901/10000 train_time:215387ms step_avg:74.25ms +[2025-09-02 08:11:55] [Rank 0] step:2901/10000 train_time:215387ms step_avg:74.25ms +[2025-09-02 08:11:57] [Rank 0] step:2921/10000 train_time:216911ms step_avg:74.26ms +[2025-09-02 08:11:57] [Rank 0] step:2921/10000 train_time:216911ms step_avg:74.26ms +[2025-09-02 08:11:58] [Rank 0] step:2941/10000 train_time:218434ms step_avg:74.27ms +[2025-09-02 08:11:58] [Rank 0] step:2941/10000 train_time:218434ms step_avg:74.27ms +[2025-09-02 08:12:00] [Rank 0] step:2961/10000 train_time:219958ms step_avg:74.29ms +[2025-09-02 08:12:00] [Rank 0] step:2961/10000 train_time:219958ms step_avg:74.29ms +[2025-09-02 08:12:01] [Rank 0] step:2981/10000 train_time:221488ms step_avg:74.30ms +[2025-09-02 08:12:01] [Rank 0] step:2981/10000 train_time:221488ms step_avg:74.30ms +[2025-09-02 08:12:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:12:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:12:15] [Rank 0] PRINT: step:3000/10000 val_loss:4.1657 svd_entropy: attn_qk:H=0.7618,top10E=0.27,eRank=177.0,q75/q25=57.61 attn_vo:H=0.7868,top10E=0.16,eRank=259.9,q75/q25=inf mlp_w1:H=0.8756,top10E=0.18,eRank=341.7,q75/q25=5.58 mlp_w2:H=0.9703,top10E=0.04,eRank=630.6,q75/q25=2.93 vo_prod:H=0.6421,top10E=0.26,eRank=102.1,q75/q25=inf train_time:223098ms step_avg:74.37ms +[2025-09-02 08:12:15] [Rank 0] PRINT: step:3000/10000 val_loss:4.1657 svd_entropy: attn_qk:H=0.7618,top10E=0.27,eRank=177.0,q75/q25=57.61 attn_vo:H=0.7868,top10E=0.16,eRank=259.9,q75/q25=inf mlp_w1:H=0.8756,top10E=0.18,eRank=341.7,q75/q25=5.58 mlp_w2:H=0.9703,top10E=0.04,eRank=630.6,q75/q25=2.93 vo_prod:H=0.6421,top10E=0.26,eRank=102.1,q75/q25=inf train_time:223098ms step_avg:74.37ms +[2025-09-02 08:12:15] [Rank 0] step:3001/10000 train_time:223113ms step_avg:74.35ms +[2025-09-02 08:12:15] [Rank 0] step:3001/10000 train_time:223113ms step_avg:74.35ms +[2025-09-02 08:12:16] [Rank 0] step:3021/10000 train_time:224571ms step_avg:74.34ms +[2025-09-02 08:12:16] [Rank 0] step:3021/10000 train_time:224571ms step_avg:74.34ms +[2025-09-02 08:12:18] [Rank 0] step:3041/10000 train_time:226100ms step_avg:74.35ms +[2025-09-02 08:12:18] [Rank 0] step:3041/10000 train_time:226100ms step_avg:74.35ms +[2025-09-02 08:12:19] [Rank 0] step:3061/10000 train_time:227711ms step_avg:74.39ms +[2025-09-02 08:12:19] [Rank 0] step:3061/10000 train_time:227711ms step_avg:74.39ms +[2025-09-02 08:12:21] [Rank 0] step:3081/10000 train_time:229239ms step_avg:74.40ms +[2025-09-02 08:12:21] [Rank 0] step:3081/10000 train_time:229239ms step_avg:74.40ms +[2025-09-02 08:12:22] [Rank 0] step:3101/10000 train_time:230772ms step_avg:74.42ms +[2025-09-02 08:12:22] [Rank 0] step:3101/10000 train_time:230772ms step_avg:74.42ms +[2025-09-02 08:12:24] [Rank 0] step:3121/10000 train_time:232303ms step_avg:74.43ms +[2025-09-02 08:12:24] [Rank 0] step:3121/10000 train_time:232303ms step_avg:74.43ms +[2025-09-02 08:12:25] [Rank 0] step:3141/10000 train_time:233833ms step_avg:74.45ms +[2025-09-02 08:12:25] [Rank 0] step:3141/10000 train_time:233833ms step_avg:74.45ms +[2025-09-02 08:12:27] [Rank 0] step:3161/10000 train_time:235366ms step_avg:74.46ms +[2025-09-02 08:12:27] [Rank 0] step:3161/10000 train_time:235366ms step_avg:74.46ms +[2025-09-02 08:12:28] [Rank 0] step:3181/10000 train_time:236898ms step_avg:74.47ms +[2025-09-02 08:12:28] [Rank 0] step:3181/10000 train_time:236898ms step_avg:74.47ms +[2025-09-02 08:12:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:12:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:12:42] [Rank 0] PRINT: step:3200/10000 val_loss:4.1330 svd_entropy: attn_qk:H=0.7641,top10E=0.26,eRank=179.0,q75/q25=57.82 attn_vo:H=0.7900,top10E=0.15,eRank=264.2,q75/q25=inf mlp_w1:H=0.8789,top10E=0.17,eRank=349.0,q75/q25=5.45 mlp_w2:H=0.9705,top10E=0.04,eRank=631.3,q75/q25=2.93 vo_prod:H=0.6464,top10E=0.25,eRank=105.4,q75/q25=inf train_time:238512ms step_avg:74.54ms +[2025-09-02 08:12:42] [Rank 0] PRINT: step:3200/10000 val_loss:4.1330 svd_entropy: attn_qk:H=0.7641,top10E=0.26,eRank=179.0,q75/q25=57.82 attn_vo:H=0.7900,top10E=0.15,eRank=264.2,q75/q25=inf mlp_w1:H=0.8789,top10E=0.17,eRank=349.0,q75/q25=5.45 mlp_w2:H=0.9705,top10E=0.04,eRank=631.3,q75/q25=2.93 vo_prod:H=0.6464,top10E=0.25,eRank=105.4,q75/q25=inf train_time:238512ms step_avg:74.54ms +[2025-09-02 08:12:42] [Rank 0] step:3201/10000 train_time:238527ms step_avg:74.52ms +[2025-09-02 08:12:42] [Rank 0] step:3201/10000 train_time:238527ms step_avg:74.52ms +[2025-09-02 08:12:43] [Rank 0] step:3221/10000 train_time:239996ms step_avg:74.51ms +[2025-09-02 08:12:43] [Rank 0] step:3221/10000 train_time:239996ms step_avg:74.51ms +[2025-09-02 08:12:45] [Rank 0] step:3241/10000 train_time:241527ms step_avg:74.52ms +[2025-09-02 08:12:45] [Rank 0] step:3241/10000 train_time:241527ms step_avg:74.52ms +[2025-09-02 08:12:46] [Rank 0] step:3261/10000 train_time:243058ms step_avg:74.53ms +[2025-09-02 08:12:46] [Rank 0] step:3261/10000 train_time:243058ms step_avg:74.53ms +[2025-09-02 08:12:48] [Rank 0] step:3281/10000 train_time:244589ms step_avg:74.55ms +[2025-09-02 08:12:48] [Rank 0] step:3281/10000 train_time:244589ms step_avg:74.55ms +[2025-09-02 08:12:49] [Rank 0] step:3301/10000 train_time:246122ms step_avg:74.56ms +[2025-09-02 08:12:49] [Rank 0] step:3301/10000 train_time:246122ms step_avg:74.56ms +[2025-09-02 08:12:51] [Rank 0] step:3321/10000 train_time:247653ms step_avg:74.57ms +[2025-09-02 08:12:51] [Rank 0] step:3321/10000 train_time:247653ms step_avg:74.57ms +[2025-09-02 08:12:53] [Rank 0] step:3341/10000 train_time:249185ms step_avg:74.58ms +[2025-09-02 08:12:53] [Rank 0] step:3341/10000 train_time:249185ms step_avg:74.58ms +[2025-09-02 08:12:54] [Rank 0] step:3361/10000 train_time:250717ms step_avg:74.60ms +[2025-09-02 08:12:54] [Rank 0] step:3361/10000 train_time:250717ms step_avg:74.60ms +[2025-09-02 08:12:56] [Rank 0] step:3381/10000 train_time:252251ms step_avg:74.61ms +[2025-09-02 08:12:56] [Rank 0] step:3381/10000 train_time:252251ms step_avg:74.61ms +[2025-09-02 08:12:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:12:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:13:09] [Rank 0] PRINT: step:3400/10000 val_loss:4.0941 svd_entropy: attn_qk:H=0.7664,top10E=0.26,eRank=181.1,q75/q25=57.75 attn_vo:H=0.7931,top10E=0.15,eRank=268.6,q75/q25=inf mlp_w1:H=0.8820,top10E=0.17,eRank=355.9,q75/q25=5.34 mlp_w2:H=0.9706,top10E=0.04,eRank=631.7,q75/q25=2.91 vo_prod:H=0.6510,top10E=0.24,eRank=109.1,q75/q25=inf train_time:253862ms step_avg:74.67ms +[2025-09-02 08:13:09] [Rank 0] PRINT: step:3400/10000 val_loss:4.0941 svd_entropy: attn_qk:H=0.7664,top10E=0.26,eRank=181.1,q75/q25=57.75 attn_vo:H=0.7931,top10E=0.15,eRank=268.6,q75/q25=inf mlp_w1:H=0.8820,top10E=0.17,eRank=355.9,q75/q25=5.34 mlp_w2:H=0.9706,top10E=0.04,eRank=631.7,q75/q25=2.91 vo_prod:H=0.6510,top10E=0.24,eRank=109.1,q75/q25=inf train_time:253862ms step_avg:74.67ms +[2025-09-02 08:13:09] [Rank 0] step:3401/10000 train_time:253877ms step_avg:74.65ms +[2025-09-02 08:13:09] [Rank 0] step:3401/10000 train_time:253877ms step_avg:74.65ms +[2025-09-02 08:13:10] [Rank 0] step:3421/10000 train_time:255345ms step_avg:74.64ms +[2025-09-02 08:13:10] [Rank 0] step:3421/10000 train_time:255345ms step_avg:74.64ms +[2025-09-02 08:13:12] [Rank 0] step:3441/10000 train_time:256876ms step_avg:74.65ms +[2025-09-02 08:13:12] [Rank 0] step:3441/10000 train_time:256876ms step_avg:74.65ms +[2025-09-02 08:13:13] [Rank 0] step:3461/10000 train_time:258410ms step_avg:74.66ms +[2025-09-02 08:13:13] [Rank 0] step:3461/10000 train_time:258410ms step_avg:74.66ms +[2025-09-02 08:13:15] [Rank 0] step:3481/10000 train_time:259943ms step_avg:74.67ms +[2025-09-02 08:13:15] [Rank 0] step:3481/10000 train_time:259943ms step_avg:74.67ms +[2025-09-02 08:13:17] [Rank 0] step:3501/10000 train_time:261477ms step_avg:74.69ms +[2025-09-02 08:13:17] [Rank 0] step:3501/10000 train_time:261477ms step_avg:74.69ms +[2025-09-02 08:13:18] [Rank 0] step:3521/10000 train_time:263011ms step_avg:74.70ms +[2025-09-02 08:13:18] [Rank 0] step:3521/10000 train_time:263011ms step_avg:74.70ms +[2025-09-02 08:13:20] [Rank 0] step:3541/10000 train_time:264545ms step_avg:74.71ms +[2025-09-02 08:13:20] [Rank 0] step:3541/10000 train_time:264545ms step_avg:74.71ms +[2025-09-02 08:13:21] [Rank 0] step:3561/10000 train_time:266079ms step_avg:74.72ms +[2025-09-02 08:13:21] [Rank 0] step:3561/10000 train_time:266079ms step_avg:74.72ms +[2025-09-02 08:13:23] [Rank 0] step:3581/10000 train_time:267692ms step_avg:74.75ms +[2025-09-02 08:13:23] [Rank 0] step:3581/10000 train_time:267692ms step_avg:74.75ms +[2025-09-02 08:13:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:13:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:13:36] [Rank 0] PRINT: step:3600/10000 val_loss:4.0806 svd_entropy: attn_qk:H=0.7514,top10E=0.29,eRank=153.0,q75/q25=58.32 attn_vo:H=0.8330,top10E=0.16,eRank=285.8,q75/q25=45.26 mlp_w1:H=0.8846,top10E=0.17,eRank=361.8,q75/q25=5.24 mlp_w2:H=0.9707,top10E=0.04,eRank=632.1,q75/q25=2.91 vo_prod:H=0.7239,top10E=0.27,eRank=126.5,q75/q25=1961.89 train_time:269306ms step_avg:74.81ms +[2025-09-02 08:13:36] [Rank 0] PRINT: step:3600/10000 val_loss:4.0806 svd_entropy: attn_qk:H=0.7514,top10E=0.29,eRank=153.0,q75/q25=58.32 attn_vo:H=0.8330,top10E=0.16,eRank=285.8,q75/q25=45.26 mlp_w1:H=0.8846,top10E=0.17,eRank=361.8,q75/q25=5.24 mlp_w2:H=0.9707,top10E=0.04,eRank=632.1,q75/q25=2.91 vo_prod:H=0.7239,top10E=0.27,eRank=126.5,q75/q25=1961.89 train_time:269306ms step_avg:74.81ms +[2025-09-02 08:13:36] [Rank 0] step:3601/10000 train_time:269321ms step_avg:74.79ms +[2025-09-02 08:13:36] [Rank 0] step:3601/10000 train_time:269321ms step_avg:74.79ms +[2025-09-02 08:13:38] [Rank 0] step:3621/10000 train_time:270799ms step_avg:74.79ms +[2025-09-02 08:13:38] [Rank 0] step:3621/10000 train_time:270799ms step_avg:74.79ms +[2025-09-02 08:13:39] [Rank 0] step:3641/10000 train_time:272330ms step_avg:74.80ms +[2025-09-02 08:13:39] [Rank 0] step:3641/10000 train_time:272330ms step_avg:74.80ms +[2025-09-02 08:13:41] [Rank 0] step:3661/10000 train_time:273862ms step_avg:74.81ms +[2025-09-02 08:13:41] [Rank 0] step:3661/10000 train_time:273862ms step_avg:74.81ms +[2025-09-02 08:13:42] [Rank 0] step:3681/10000 train_time:275393ms step_avg:74.81ms +[2025-09-02 08:13:42] [Rank 0] step:3681/10000 train_time:275393ms step_avg:74.81ms +[2025-09-02 08:13:44] [Rank 0] step:3701/10000 train_time:276926ms step_avg:74.82ms +[2025-09-02 08:13:44] [Rank 0] step:3701/10000 train_time:276926ms step_avg:74.82ms +[2025-09-02 08:13:45] [Rank 0] step:3721/10000 train_time:278486ms step_avg:74.84ms +[2025-09-02 08:13:45] [Rank 0] step:3721/10000 train_time:278486ms step_avg:74.84ms +[2025-09-02 08:13:47] [Rank 0] step:3741/10000 train_time:280053ms step_avg:74.86ms +[2025-09-02 08:13:47] [Rank 0] step:3741/10000 train_time:280053ms step_avg:74.86ms +[2025-09-02 08:13:48] [Rank 0] step:3761/10000 train_time:281620ms step_avg:74.88ms +[2025-09-02 08:13:48] [Rank 0] step:3761/10000 train_time:281620ms step_avg:74.88ms +[2025-09-02 08:13:50] [Rank 0] step:3781/10000 train_time:283189ms step_avg:74.90ms +[2025-09-02 08:13:50] [Rank 0] step:3781/10000 train_time:283189ms step_avg:74.90ms +[2025-09-02 08:13:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:13:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:14:03] [Rank 0] PRINT: step:3800/10000 val_loss:4.0395 svd_entropy: attn_qk:H=0.7543,top10E=0.28,eRank=155.6,q75/q25=58.89 attn_vo:H=0.8357,top10E=0.15,eRank=289.5,q75/q25=44.25 mlp_w1:H=0.8871,top10E=0.16,eRank=367.4,q75/q25=5.15 mlp_w2:H=0.9708,top10E=0.04,eRank=632.4,q75/q25=2.90 vo_prod:H=0.7270,top10E=0.26,eRank=129.1,q75/q25=1794.30 train_time:284840ms step_avg:74.96ms +[2025-09-02 08:14:03] [Rank 0] PRINT: step:3800/10000 val_loss:4.0395 svd_entropy: attn_qk:H=0.7543,top10E=0.28,eRank=155.6,q75/q25=58.89 attn_vo:H=0.8357,top10E=0.15,eRank=289.5,q75/q25=44.25 mlp_w1:H=0.8871,top10E=0.16,eRank=367.4,q75/q25=5.15 mlp_w2:H=0.9708,top10E=0.04,eRank=632.4,q75/q25=2.90 vo_prod:H=0.7270,top10E=0.26,eRank=129.1,q75/q25=1794.30 train_time:284840ms step_avg:74.96ms +[2025-09-02 08:14:03] [Rank 0] step:3801/10000 train_time:284855ms step_avg:74.94ms +[2025-09-02 08:14:03] [Rank 0] step:3801/10000 train_time:284855ms step_avg:74.94ms +[2025-09-02 08:14:05] [Rank 0] step:3821/10000 train_time:286360ms step_avg:74.94ms +[2025-09-02 08:14:05] [Rank 0] step:3821/10000 train_time:286360ms step_avg:74.94ms +[2025-09-02 08:14:06] [Rank 0] step:3841/10000 train_time:287929ms step_avg:74.96ms +[2025-09-02 08:14:06] [Rank 0] step:3841/10000 train_time:287929ms step_avg:74.96ms +[2025-09-02 08:14:08] [Rank 0] step:3861/10000 train_time:289498ms step_avg:74.98ms +[2025-09-02 08:14:08] [Rank 0] step:3861/10000 train_time:289498ms step_avg:74.98ms +[2025-09-02 08:14:10] [Rank 0] step:3881/10000 train_time:291064ms step_avg:75.00ms +[2025-09-02 08:14:10] [Rank 0] step:3881/10000 train_time:291064ms step_avg:75.00ms +[2025-09-02 08:14:11] [Rank 0] step:3901/10000 train_time:292633ms step_avg:75.01ms +[2025-09-02 08:14:11] [Rank 0] step:3901/10000 train_time:292633ms step_avg:75.01ms +[2025-09-02 08:14:13] [Rank 0] step:3921/10000 train_time:294201ms step_avg:75.03ms +[2025-09-02 08:14:13] [Rank 0] step:3921/10000 train_time:294201ms step_avg:75.03ms +[2025-09-02 08:14:14] [Rank 0] step:3941/10000 train_time:295770ms step_avg:75.05ms +[2025-09-02 08:14:14] [Rank 0] step:3941/10000 train_time:295770ms step_avg:75.05ms +[2025-09-02 08:14:16] [Rank 0] step:3961/10000 train_time:297336ms step_avg:75.07ms +[2025-09-02 08:14:16] [Rank 0] step:3961/10000 train_time:297336ms step_avg:75.07ms +[2025-09-02 08:14:17] [Rank 0] step:3981/10000 train_time:298904ms step_avg:75.08ms +[2025-09-02 08:14:17] [Rank 0] step:3981/10000 train_time:298904ms step_avg:75.08ms +[2025-09-02 08:14:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:14:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:14:31] [Rank 0] PRINT: step:4000/10000 val_loss:4.0141 svd_entropy: attn_qk:H=0.7561,top10E=0.28,eRank=157.2,q75/q25=58.73 attn_vo:H=0.8377,top10E=0.15,eRank=292.3,q75/q25=43.20 mlp_w1:H=0.8894,top10E=0.16,eRank=372.8,q75/q25=5.06 mlp_w2:H=0.9708,top10E=0.04,eRank=632.7,q75/q25=2.90 vo_prod:H=0.7291,top10E=0.26,eRank=130.7,q75/q25=1633.87 train_time:300551ms step_avg:75.14ms +[2025-09-02 08:14:31] [Rank 0] PRINT: step:4000/10000 val_loss:4.0141 svd_entropy: attn_qk:H=0.7561,top10E=0.28,eRank=157.2,q75/q25=58.73 attn_vo:H=0.8377,top10E=0.15,eRank=292.3,q75/q25=43.20 mlp_w1:H=0.8894,top10E=0.16,eRank=372.8,q75/q25=5.06 mlp_w2:H=0.9708,top10E=0.04,eRank=632.7,q75/q25=2.90 vo_prod:H=0.7291,top10E=0.26,eRank=130.7,q75/q25=1633.87 train_time:300551ms step_avg:75.14ms +[2025-09-02 08:14:31] [Rank 0] step:4001/10000 train_time:300565ms step_avg:75.12ms +[2025-09-02 08:14:31] [Rank 0] step:4001/10000 train_time:300565ms step_avg:75.12ms +[2025-09-02 08:14:32] [Rank 0] step:4021/10000 train_time:302075ms step_avg:75.12ms +[2025-09-02 08:14:32] [Rank 0] step:4021/10000 train_time:302075ms step_avg:75.12ms +[2025-09-02 08:14:34] [Rank 0] step:4041/10000 train_time:303643ms step_avg:75.14ms +[2025-09-02 08:14:34] [Rank 0] step:4041/10000 train_time:303643ms step_avg:75.14ms +[2025-09-02 08:14:36] [Rank 0] step:4061/10000 train_time:305209ms step_avg:75.16ms +[2025-09-02 08:14:36] [Rank 0] step:4061/10000 train_time:305209ms step_avg:75.16ms +[2025-09-02 08:14:37] [Rank 0] step:4081/10000 train_time:306950ms step_avg:75.21ms +[2025-09-02 08:14:37] [Rank 0] step:4081/10000 train_time:306950ms step_avg:75.21ms +[2025-09-02 08:14:39] [Rank 0] step:4101/10000 train_time:308518ms step_avg:75.23ms +[2025-09-02 08:14:39] [Rank 0] step:4101/10000 train_time:308518ms step_avg:75.23ms +[2025-09-02 08:14:40] [Rank 0] step:4121/10000 train_time:310085ms step_avg:75.25ms +[2025-09-02 08:14:40] [Rank 0] step:4121/10000 train_time:310085ms step_avg:75.25ms +[2025-09-02 08:14:42] [Rank 0] step:4141/10000 train_time:311654ms step_avg:75.26ms +[2025-09-02 08:14:42] [Rank 0] step:4141/10000 train_time:311654ms step_avg:75.26ms +[2025-09-02 08:14:44] [Rank 0] step:4161/10000 train_time:313221ms step_avg:75.28ms +[2025-09-02 08:14:44] [Rank 0] step:4161/10000 train_time:313221ms step_avg:75.28ms +[2025-09-02 08:14:45] [Rank 0] step:4181/10000 train_time:314793ms step_avg:75.29ms +[2025-09-02 08:14:45] [Rank 0] step:4181/10000 train_time:314793ms step_avg:75.29ms +[2025-09-02 08:14:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:14:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:14:58] [Rank 0] PRINT: step:4200/10000 val_loss:3.9932 svd_entropy: attn_qk:H=0.7577,top10E=0.28,eRank=158.8,q75/q25=58.69 attn_vo:H=0.8397,top10E=0.15,eRank=295.4,q75/q25=41.82 mlp_w1:H=0.8914,top10E=0.16,eRank=377.8,q75/q25=4.99 mlp_w2:H=0.9709,top10E=0.04,eRank=632.9,q75/q25=2.89 vo_prod:H=0.7322,top10E=0.25,eRank=133.5,q75/q25=1519.93 train_time:316442ms step_avg:75.34ms +[2025-09-02 08:14:58] [Rank 0] PRINT: step:4200/10000 val_loss:3.9932 svd_entropy: attn_qk:H=0.7577,top10E=0.28,eRank=158.8,q75/q25=58.69 attn_vo:H=0.8397,top10E=0.15,eRank=295.4,q75/q25=41.82 mlp_w1:H=0.8914,top10E=0.16,eRank=377.8,q75/q25=4.99 mlp_w2:H=0.9709,top10E=0.04,eRank=632.9,q75/q25=2.89 vo_prod:H=0.7322,top10E=0.25,eRank=133.5,q75/q25=1519.93 train_time:316442ms step_avg:75.34ms +[2025-09-02 08:14:58] [Rank 0] step:4201/10000 train_time:316457ms step_avg:75.33ms +[2025-09-02 08:14:58] [Rank 0] step:4201/10000 train_time:316457ms step_avg:75.33ms +[2025-09-02 08:15:00] [Rank 0] step:4221/10000 train_time:317963ms step_avg:75.33ms +[2025-09-02 08:15:00] [Rank 0] step:4221/10000 train_time:317963ms step_avg:75.33ms +[2025-09-02 08:15:02] [Rank 0] step:4241/10000 train_time:319533ms step_avg:75.34ms +[2025-09-02 08:15:02] [Rank 0] step:4241/10000 train_time:319533ms step_avg:75.34ms +[2025-09-02 08:15:03] [Rank 0] step:4261/10000 train_time:321103ms step_avg:75.36ms +[2025-09-02 08:15:03] [Rank 0] step:4261/10000 train_time:321103ms step_avg:75.36ms +[2025-09-02 08:15:05] [Rank 0] step:4281/10000 train_time:322672ms step_avg:75.37ms +[2025-09-02 08:15:05] [Rank 0] step:4281/10000 train_time:322672ms step_avg:75.37ms +[2025-09-02 08:15:06] [Rank 0] step:4301/10000 train_time:324243ms step_avg:75.39ms +[2025-09-02 08:15:06] [Rank 0] step:4301/10000 train_time:324243ms step_avg:75.39ms +[2025-09-02 08:15:08] [Rank 0] step:4321/10000 train_time:325813ms step_avg:75.40ms +[2025-09-02 08:15:08] [Rank 0] step:4321/10000 train_time:325813ms step_avg:75.40ms +[2025-09-02 08:15:09] [Rank 0] step:4341/10000 train_time:327381ms step_avg:75.42ms +[2025-09-02 08:15:09] [Rank 0] step:4341/10000 train_time:327381ms step_avg:75.42ms +[2025-09-02 08:15:11] [Rank 0] step:4361/10000 train_time:328950ms step_avg:75.43ms +[2025-09-02 08:15:11] [Rank 0] step:4361/10000 train_time:328950ms step_avg:75.43ms +[2025-09-02 08:15:13] [Rank 0] step:4381/10000 train_time:330516ms step_avg:75.44ms +[2025-09-02 08:15:13] [Rank 0] step:4381/10000 train_time:330516ms step_avg:75.44ms +[2025-09-02 08:15:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:15:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:15:26] [Rank 0] PRINT: step:4400/10000 val_loss:3.9723 svd_entropy: attn_qk:H=0.7592,top10E=0.28,eRank=160.3,q75/q25=57.54 attn_vo:H=0.8415,top10E=0.15,eRank=298.1,q75/q25=40.74 mlp_w1:H=0.8933,top10E=0.16,eRank=382.5,q75/q25=4.93 mlp_w2:H=0.9709,top10E=0.04,eRank=633.0,q75/q25=2.89 vo_prod:H=0.7346,top10E=0.25,eRank=135.7,q75/q25=1389.76 train_time:332167ms step_avg:75.49ms +[2025-09-02 08:15:26] [Rank 0] PRINT: step:4400/10000 val_loss:3.9723 svd_entropy: attn_qk:H=0.7592,top10E=0.28,eRank=160.3,q75/q25=57.54 attn_vo:H=0.8415,top10E=0.15,eRank=298.1,q75/q25=40.74 mlp_w1:H=0.8933,top10E=0.16,eRank=382.5,q75/q25=4.93 mlp_w2:H=0.9709,top10E=0.04,eRank=633.0,q75/q25=2.89 vo_prod:H=0.7346,top10E=0.25,eRank=135.7,q75/q25=1389.76 train_time:332167ms step_avg:75.49ms +[2025-09-02 08:15:26] [Rank 0] step:4401/10000 train_time:332182ms step_avg:75.48ms +[2025-09-02 08:15:26] [Rank 0] step:4401/10000 train_time:332182ms step_avg:75.48ms +[2025-09-02 08:15:27] [Rank 0] step:4421/10000 train_time:333692ms step_avg:75.48ms +[2025-09-02 08:15:27] [Rank 0] step:4421/10000 train_time:333692ms step_avg:75.48ms +[2025-09-02 08:15:29] [Rank 0] step:4441/10000 train_time:335302ms step_avg:75.50ms +[2025-09-02 08:15:29] [Rank 0] step:4441/10000 train_time:335302ms step_avg:75.50ms +[2025-09-02 08:15:31] [Rank 0] step:4461/10000 train_time:336876ms step_avg:75.52ms +[2025-09-02 08:15:31] [Rank 0] step:4461/10000 train_time:336876ms step_avg:75.52ms +[2025-09-02 08:15:32] [Rank 0] step:4481/10000 train_time:338452ms step_avg:75.53ms +[2025-09-02 08:15:32] [Rank 0] step:4481/10000 train_time:338452ms step_avg:75.53ms +[2025-09-02 08:15:34] [Rank 0] step:4501/10000 train_time:340026ms step_avg:75.54ms +[2025-09-02 08:15:34] [Rank 0] step:4501/10000 train_time:340026ms step_avg:75.54ms +[2025-09-02 08:15:35] [Rank 0] step:4521/10000 train_time:341600ms step_avg:75.56ms +[2025-09-02 08:15:35] [Rank 0] step:4521/10000 train_time:341600ms step_avg:75.56ms +[2025-09-02 08:15:37] [Rank 0] step:4541/10000 train_time:343178ms step_avg:75.57ms +[2025-09-02 08:15:37] [Rank 0] step:4541/10000 train_time:343178ms step_avg:75.57ms +[2025-09-02 08:15:39] [Rank 0] step:4561/10000 train_time:344753ms step_avg:75.59ms +[2025-09-02 08:15:39] [Rank 0] step:4561/10000 train_time:344753ms step_avg:75.59ms +[2025-09-02 08:15:40] [Rank 0] step:4581/10000 train_time:346331ms step_avg:75.60ms +[2025-09-02 08:15:40] [Rank 0] step:4581/10000 train_time:346331ms step_avg:75.60ms +[2025-09-02 08:15:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:15:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:15:53] [Rank 0] PRINT: step:4600/10000 val_loss:3.9452 svd_entropy: attn_qk:H=0.7609,top10E=0.27,eRank=161.9,q75/q25=57.71 attn_vo:H=0.8433,top10E=0.14,eRank=300.8,q75/q25=39.70 mlp_w1:H=0.8952,top10E=0.15,eRank=387.1,q75/q25=4.88 mlp_w2:H=0.9709,top10E=0.04,eRank=633.2,q75/q25=2.89 vo_prod:H=0.7371,top10E=0.25,eRank=138.0,q75/q25=1299.08 train_time:347988ms step_avg:75.65ms +[2025-09-02 08:15:53] [Rank 0] PRINT: step:4600/10000 val_loss:3.9452 svd_entropy: attn_qk:H=0.7609,top10E=0.27,eRank=161.9,q75/q25=57.71 attn_vo:H=0.8433,top10E=0.14,eRank=300.8,q75/q25=39.70 mlp_w1:H=0.8952,top10E=0.15,eRank=387.1,q75/q25=4.88 mlp_w2:H=0.9709,top10E=0.04,eRank=633.2,q75/q25=2.89 vo_prod:H=0.7371,top10E=0.25,eRank=138.0,q75/q25=1299.08 train_time:347988ms step_avg:75.65ms +[2025-09-02 08:15:53] [Rank 0] step:4601/10000 train_time:348003ms step_avg:75.64ms +[2025-09-02 08:15:53] [Rank 0] step:4601/10000 train_time:348003ms step_avg:75.64ms +[2025-09-02 08:15:55] [Rank 0] step:4621/10000 train_time:349503ms step_avg:75.63ms +[2025-09-02 08:15:55] [Rank 0] step:4621/10000 train_time:349503ms step_avg:75.63ms +[2025-09-02 08:15:57] [Rank 0] step:4641/10000 train_time:351077ms step_avg:75.65ms +[2025-09-02 08:15:57] [Rank 0] step:4641/10000 train_time:351077ms step_avg:75.65ms +[2025-09-02 08:15:58] [Rank 0] step:4661/10000 train_time:352653ms step_avg:75.66ms +[2025-09-02 08:15:58] [Rank 0] step:4661/10000 train_time:352653ms step_avg:75.66ms +[2025-09-02 08:16:00] [Rank 0] step:4681/10000 train_time:354228ms step_avg:75.67ms +[2025-09-02 08:16:00] [Rank 0] step:4681/10000 train_time:354228ms step_avg:75.67ms +[2025-09-02 08:16:01] [Rank 0] step:4701/10000 train_time:355803ms step_avg:75.69ms +[2025-09-02 08:16:01] [Rank 0] step:4701/10000 train_time:355803ms step_avg:75.69ms +[2025-09-02 08:16:03] [Rank 0] step:4721/10000 train_time:357379ms step_avg:75.70ms +[2025-09-02 08:16:03] [Rank 0] step:4721/10000 train_time:357379ms step_avg:75.70ms +[2025-09-02 08:16:04] [Rank 0] step:4741/10000 train_time:358956ms step_avg:75.71ms +[2025-09-02 08:16:04] [Rank 0] step:4741/10000 train_time:358956ms step_avg:75.71ms +[2025-09-02 08:16:06] [Rank 0] step:4761/10000 train_time:360533ms step_avg:75.73ms +[2025-09-02 08:16:06] [Rank 0] step:4761/10000 train_time:360533ms step_avg:75.73ms +[2025-09-02 08:16:08] [Rank 0] step:4781/10000 train_time:362108ms step_avg:75.74ms +[2025-09-02 08:16:08] [Rank 0] step:4781/10000 train_time:362108ms step_avg:75.74ms +[2025-09-02 08:16:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:16:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:16:21] [Rank 0] PRINT: step:4800/10000 val_loss:3.9335 svd_entropy: attn_qk:H=0.7623,top10E=0.27,eRank=163.3,q75/q25=57.65 attn_vo:H=0.8449,top10E=0.14,eRank=303.4,q75/q25=38.72 mlp_w1:H=0.8969,top10E=0.15,eRank=391.3,q75/q25=4.81 mlp_w2:H=0.9710,top10E=0.04,eRank=633.3,q75/q25=2.88 vo_prod:H=0.7398,top10E=0.24,eRank=140.5,q75/q25=1213.80 train_time:363768ms step_avg:75.78ms +[2025-09-02 08:16:21] [Rank 0] PRINT: step:4800/10000 val_loss:3.9335 svd_entropy: attn_qk:H=0.7623,top10E=0.27,eRank=163.3,q75/q25=57.65 attn_vo:H=0.8449,top10E=0.14,eRank=303.4,q75/q25=38.72 mlp_w1:H=0.8969,top10E=0.15,eRank=391.3,q75/q25=4.81 mlp_w2:H=0.9710,top10E=0.04,eRank=633.3,q75/q25=2.88 vo_prod:H=0.7398,top10E=0.24,eRank=140.5,q75/q25=1213.80 train_time:363768ms step_avg:75.78ms +[2025-09-02 08:16:21] [Rank 0] step:4801/10000 train_time:363783ms step_avg:75.77ms +[2025-09-02 08:16:21] [Rank 0] step:4801/10000 train_time:363783ms step_avg:75.77ms +[2025-09-02 08:16:23] [Rank 0] step:4821/10000 train_time:365299ms step_avg:75.77ms +[2025-09-02 08:16:23] [Rank 0] step:4821/10000 train_time:365299ms step_avg:75.77ms +[2025-09-02 08:16:24] [Rank 0] step:4841/10000 train_time:366871ms step_avg:75.78ms +[2025-09-02 08:16:24] [Rank 0] step:4841/10000 train_time:366871ms step_avg:75.78ms +[2025-09-02 08:16:26] [Rank 0] step:4861/10000 train_time:368447ms step_avg:75.80ms +[2025-09-02 08:16:26] [Rank 0] step:4861/10000 train_time:368447ms step_avg:75.80ms +[2025-09-02 08:16:27] [Rank 0] step:4881/10000 train_time:370021ms step_avg:75.81ms +[2025-09-02 08:16:27] [Rank 0] step:4881/10000 train_time:370021ms step_avg:75.81ms +[2025-09-02 08:16:29] [Rank 0] step:4901/10000 train_time:371592ms step_avg:75.82ms +[2025-09-02 08:16:29] [Rank 0] step:4901/10000 train_time:371592ms step_avg:75.82ms +[2025-09-02 08:16:30] [Rank 0] step:4921/10000 train_time:373169ms step_avg:75.83ms +[2025-09-02 08:16:30] [Rank 0] step:4921/10000 train_time:373169ms step_avg:75.83ms +[2025-09-02 08:16:32] [Rank 0] step:4941/10000 train_time:374798ms step_avg:75.85ms +[2025-09-02 08:16:32] [Rank 0] step:4941/10000 train_time:374798ms step_avg:75.85ms +[2025-09-02 08:16:34] [Rank 0] step:4961/10000 train_time:376373ms step_avg:75.87ms +[2025-09-02 08:16:34] [Rank 0] step:4961/10000 train_time:376373ms step_avg:75.87ms +[2025-09-02 08:16:35] [Rank 0] step:4981/10000 train_time:377949ms step_avg:75.88ms +[2025-09-02 08:16:35] [Rank 0] step:4981/10000 train_time:377949ms step_avg:75.88ms +[2025-09-02 08:16:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:16:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:16:49] [Rank 0] PRINT: step:5000/10000 val_loss:3.9122 svd_entropy: attn_qk:H=0.7637,top10E=0.27,eRank=164.7,q75/q25=57.28 attn_vo:H=0.8464,top10E=0.14,eRank=305.7,q75/q25=38.02 mlp_w1:H=0.8985,top10E=0.15,eRank=395.3,q75/q25=4.75 mlp_w2:H=0.9710,top10E=0.04,eRank=633.4,q75/q25=2.88 vo_prod:H=0.7420,top10E=0.24,eRank=142.6,q75/q25=1110.11 train_time:379603ms step_avg:75.92ms +[2025-09-02 08:16:49] [Rank 0] PRINT: step:5000/10000 val_loss:3.9122 svd_entropy: attn_qk:H=0.7637,top10E=0.27,eRank=164.7,q75/q25=57.28 attn_vo:H=0.8464,top10E=0.14,eRank=305.7,q75/q25=38.02 mlp_w1:H=0.8985,top10E=0.15,eRank=395.3,q75/q25=4.75 mlp_w2:H=0.9710,top10E=0.04,eRank=633.4,q75/q25=2.88 vo_prod:H=0.7420,top10E=0.24,eRank=142.6,q75/q25=1110.11 train_time:379603ms step_avg:75.92ms +[2025-09-02 08:16:49] [Rank 0] step:5001/10000 train_time:379618ms step_avg:75.91ms +[2025-09-02 08:16:49] [Rank 0] step:5001/10000 train_time:379618ms step_avg:75.91ms +[2025-09-02 08:16:50] [Rank 0] step:5021/10000 train_time:381118ms step_avg:75.90ms +[2025-09-02 08:16:50] [Rank 0] step:5021/10000 train_time:381118ms step_avg:75.90ms +[2025-09-02 08:16:52] [Rank 0] step:5041/10000 train_time:382692ms step_avg:75.92ms +[2025-09-02 08:16:52] [Rank 0] step:5041/10000 train_time:382692ms step_avg:75.92ms +[2025-09-02 08:16:53] [Rank 0] step:5061/10000 train_time:384262ms step_avg:75.93ms +[2025-09-02 08:16:53] [Rank 0] step:5061/10000 train_time:384262ms step_avg:75.93ms +[2025-09-02 08:16:55] [Rank 0] step:5081/10000 train_time:385836ms step_avg:75.94ms +[2025-09-02 08:16:55] [Rank 0] step:5081/10000 train_time:385836ms step_avg:75.94ms +[2025-09-02 08:16:56] [Rank 0] step:5101/10000 train_time:387413ms step_avg:75.95ms +[2025-09-02 08:16:56] [Rank 0] step:5101/10000 train_time:387413ms step_avg:75.95ms +[2025-09-02 08:16:58] [Rank 0] step:5121/10000 train_time:388987ms step_avg:75.96ms +[2025-09-02 08:16:58] [Rank 0] step:5121/10000 train_time:388987ms step_avg:75.96ms +[2025-09-02 08:17:00] [Rank 0] step:5141/10000 train_time:390563ms step_avg:75.97ms +[2025-09-02 08:17:00] [Rank 0] step:5141/10000 train_time:390563ms step_avg:75.97ms +[2025-09-02 08:17:01] [Rank 0] step:5161/10000 train_time:392139ms step_avg:75.98ms +[2025-09-02 08:17:01] [Rank 0] step:5161/10000 train_time:392139ms step_avg:75.98ms +[2025-09-02 08:17:03] [Rank 0] step:5181/10000 train_time:393717ms step_avg:75.99ms +[2025-09-02 08:17:03] [Rank 0] step:5181/10000 train_time:393717ms step_avg:75.99ms +[2025-09-02 08:17:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:17:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:17:16] [Rank 0] PRINT: step:5200/10000 val_loss:3.8947 svd_entropy: attn_qk:H=0.7648,top10E=0.27,eRank=165.9,q75/q25=56.77 attn_vo:H=0.8477,top10E=0.14,eRank=307.8,q75/q25=37.29 mlp_w1:H=0.9000,top10E=0.15,eRank=399.2,q75/q25=4.71 mlp_w2:H=0.9710,top10E=0.04,eRank=633.4,q75/q25=2.88 vo_prod:H=0.7441,top10E=0.24,eRank=144.6,q75/q25=1032.33 train_time:395399ms step_avg:76.04ms +[2025-09-02 08:17:16] [Rank 0] PRINT: step:5200/10000 val_loss:3.8947 svd_entropy: attn_qk:H=0.7648,top10E=0.27,eRank=165.9,q75/q25=56.77 attn_vo:H=0.8477,top10E=0.14,eRank=307.8,q75/q25=37.29 mlp_w1:H=0.9000,top10E=0.15,eRank=399.2,q75/q25=4.71 mlp_w2:H=0.9710,top10E=0.04,eRank=633.4,q75/q25=2.88 vo_prod:H=0.7441,top10E=0.24,eRank=144.6,q75/q25=1032.33 train_time:395399ms step_avg:76.04ms +[2025-09-02 08:17:16] [Rank 0] step:5201/10000 train_time:395414ms step_avg:76.03ms +[2025-09-02 08:17:16] [Rank 0] step:5201/10000 train_time:395414ms step_avg:76.03ms +[2025-09-02 08:17:18] [Rank 0] step:5221/10000 train_time:396944ms step_avg:76.03ms +[2025-09-02 08:17:18] [Rank 0] step:5221/10000 train_time:396944ms step_avg:76.03ms +[2025-09-02 08:17:19] [Rank 0] step:5241/10000 train_time:398551ms step_avg:76.04ms +[2025-09-02 08:17:19] [Rank 0] step:5241/10000 train_time:398551ms step_avg:76.04ms +[2025-09-02 08:17:21] [Rank 0] step:5261/10000 train_time:400156ms step_avg:76.06ms +[2025-09-02 08:17:21] [Rank 0] step:5261/10000 train_time:400156ms step_avg:76.06ms +[2025-09-02 08:17:23] [Rank 0] step:5281/10000 train_time:401762ms step_avg:76.08ms +[2025-09-02 08:17:23] [Rank 0] step:5281/10000 train_time:401762ms step_avg:76.08ms +[2025-09-02 08:17:24] [Rank 0] step:5301/10000 train_time:403377ms step_avg:76.09ms +[2025-09-02 08:17:24] [Rank 0] step:5301/10000 train_time:403377ms step_avg:76.09ms +[2025-09-02 08:17:26] [Rank 0] step:5321/10000 train_time:404983ms step_avg:76.11ms +[2025-09-02 08:17:26] [Rank 0] step:5321/10000 train_time:404983ms step_avg:76.11ms +[2025-09-02 08:17:27] [Rank 0] step:5341/10000 train_time:406589ms step_avg:76.13ms +[2025-09-02 08:17:27] [Rank 0] step:5341/10000 train_time:406589ms step_avg:76.13ms +[2025-09-02 08:17:29] [Rank 0] step:5361/10000 train_time:408201ms step_avg:76.14ms +[2025-09-02 08:17:29] [Rank 0] step:5361/10000 train_time:408201ms step_avg:76.14ms +[2025-09-02 08:17:31] [Rank 0] step:5381/10000 train_time:409812ms step_avg:76.16ms +[2025-09-02 08:17:31] [Rank 0] step:5381/10000 train_time:409812ms step_avg:76.16ms +[2025-09-02 08:17:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:17:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:17:44] [Rank 0] PRINT: step:5400/10000 val_loss:3.8762 svd_entropy: attn_qk:H=0.7660,top10E=0.27,eRank=167.1,q75/q25=56.31 attn_vo:H=0.8490,top10E=0.14,eRank=309.8,q75/q25=36.46 mlp_w1:H=0.9014,top10E=0.15,eRank=402.8,q75/q25=4.66 mlp_w2:H=0.9710,top10E=0.04,eRank=633.5,q75/q25=2.88 vo_prod:H=0.7459,top10E=0.24,eRank=146.4,q75/q25=988.53 train_time:411501ms step_avg:76.20ms +[2025-09-02 08:17:44] [Rank 0] PRINT: step:5400/10000 val_loss:3.8762 svd_entropy: attn_qk:H=0.7660,top10E=0.27,eRank=167.1,q75/q25=56.31 attn_vo:H=0.8490,top10E=0.14,eRank=309.8,q75/q25=36.46 mlp_w1:H=0.9014,top10E=0.15,eRank=402.8,q75/q25=4.66 mlp_w2:H=0.9710,top10E=0.04,eRank=633.5,q75/q25=2.88 vo_prod:H=0.7459,top10E=0.24,eRank=146.4,q75/q25=988.53 train_time:411501ms step_avg:76.20ms +[2025-09-02 08:17:44] [Rank 0] step:5401/10000 train_time:411516ms step_avg:76.19ms +[2025-09-02 08:17:44] [Rank 0] step:5401/10000 train_time:411516ms step_avg:76.19ms +[2025-09-02 08:17:46] [Rank 0] step:5421/10000 train_time:413052ms step_avg:76.19ms +[2025-09-02 08:17:46] [Rank 0] step:5421/10000 train_time:413052ms step_avg:76.19ms +[2025-09-02 08:17:47] [Rank 0] step:5441/10000 train_time:414657ms step_avg:76.21ms +[2025-09-02 08:17:47] [Rank 0] step:5441/10000 train_time:414657ms step_avg:76.21ms +[2025-09-02 08:17:49] [Rank 0] step:5461/10000 train_time:416268ms step_avg:76.23ms +[2025-09-02 08:17:49] [Rank 0] step:5461/10000 train_time:416268ms step_avg:76.23ms +[2025-09-02 08:17:50] [Rank 0] step:5481/10000 train_time:417880ms step_avg:76.24ms +[2025-09-02 08:17:50] [Rank 0] step:5481/10000 train_time:417880ms step_avg:76.24ms +[2025-09-02 08:17:52] [Rank 0] step:5501/10000 train_time:419494ms step_avg:76.26ms +[2025-09-02 08:17:52] [Rank 0] step:5501/10000 train_time:419494ms step_avg:76.26ms +[2025-09-02 08:17:54] [Rank 0] step:5521/10000 train_time:421109ms step_avg:76.27ms +[2025-09-02 08:17:54] [Rank 0] step:5521/10000 train_time:421109ms step_avg:76.27ms +[2025-09-02 08:17:55] [Rank 0] step:5541/10000 train_time:422718ms step_avg:76.29ms +[2025-09-02 08:17:55] [Rank 0] step:5541/10000 train_time:422718ms step_avg:76.29ms +[2025-09-02 08:17:57] [Rank 0] step:5561/10000 train_time:424332ms step_avg:76.31ms +[2025-09-02 08:17:57] [Rank 0] step:5561/10000 train_time:424332ms step_avg:76.31ms +[2025-09-02 08:17:59] [Rank 0] step:5581/10000 train_time:425942ms step_avg:76.32ms +[2025-09-02 08:17:59] [Rank 0] step:5581/10000 train_time:425942ms step_avg:76.32ms +[2025-09-02 08:18:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:18:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:18:12] [Rank 0] PRINT: step:5600/10000 val_loss:3.8638 svd_entropy: attn_qk:H=0.7671,top10E=0.27,eRank=168.2,q75/q25=55.94 attn_vo:H=0.8501,top10E=0.14,eRank=311.6,q75/q25=35.67 mlp_w1:H=0.9027,top10E=0.15,eRank=406.2,q75/q25=4.63 mlp_w2:H=0.9710,top10E=0.04,eRank=633.6,q75/q25=2.87 vo_prod:H=0.7476,top10E=0.23,eRank=148.0,q75/q25=927.54 train_time:427637ms step_avg:76.36ms +[2025-09-02 08:18:12] [Rank 0] PRINT: step:5600/10000 val_loss:3.8638 svd_entropy: attn_qk:H=0.7671,top10E=0.27,eRank=168.2,q75/q25=55.94 attn_vo:H=0.8501,top10E=0.14,eRank=311.6,q75/q25=35.67 mlp_w1:H=0.9027,top10E=0.15,eRank=406.2,q75/q25=4.63 mlp_w2:H=0.9710,top10E=0.04,eRank=633.6,q75/q25=2.87 vo_prod:H=0.7476,top10E=0.23,eRank=148.0,q75/q25=927.54 train_time:427637ms step_avg:76.36ms +[2025-09-02 08:18:12] [Rank 0] step:5601/10000 train_time:427652ms step_avg:76.35ms +[2025-09-02 08:18:12] [Rank 0] step:5601/10000 train_time:427652ms step_avg:76.35ms +[2025-09-02 08:18:14] [Rank 0] step:5621/10000 train_time:429177ms step_avg:76.35ms +[2025-09-02 08:18:14] [Rank 0] step:5621/10000 train_time:429177ms step_avg:76.35ms +[2025-09-02 08:18:15] [Rank 0] step:5641/10000 train_time:430784ms step_avg:76.37ms +[2025-09-02 08:18:15] [Rank 0] step:5641/10000 train_time:430784ms step_avg:76.37ms +[2025-09-02 08:18:17] [Rank 0] step:5661/10000 train_time:432386ms step_avg:76.38ms +[2025-09-02 08:18:17] [Rank 0] step:5661/10000 train_time:432386ms step_avg:76.38ms +[2025-09-02 08:18:18] [Rank 0] step:5681/10000 train_time:433997ms step_avg:76.39ms +[2025-09-02 08:18:18] [Rank 0] step:5681/10000 train_time:433997ms step_avg:76.39ms +[2025-09-02 08:18:20] [Rank 0] step:5701/10000 train_time:435603ms step_avg:76.41ms +[2025-09-02 08:18:20] [Rank 0] step:5701/10000 train_time:435603ms step_avg:76.41ms +[2025-09-02 08:18:22] [Rank 0] step:5721/10000 train_time:437211ms step_avg:76.42ms +[2025-09-02 08:18:22] [Rank 0] step:5721/10000 train_time:437211ms step_avg:76.42ms +[2025-09-02 08:18:23] [Rank 0] step:5741/10000 train_time:438817ms step_avg:76.44ms +[2025-09-02 08:18:23] [Rank 0] step:5741/10000 train_time:438817ms step_avg:76.44ms +[2025-09-02 08:18:25] [Rank 0] step:5761/10000 train_time:440425ms step_avg:76.45ms +[2025-09-02 08:18:25] [Rank 0] step:5761/10000 train_time:440425ms step_avg:76.45ms +[2025-09-02 08:18:26] [Rank 0] step:5781/10000 train_time:442036ms step_avg:76.46ms +[2025-09-02 08:18:26] [Rank 0] step:5781/10000 train_time:442036ms step_avg:76.46ms +[2025-09-02 08:18:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:18:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:18:40] [Rank 0] PRINT: step:5800/10000 val_loss:3.8543 svd_entropy: attn_qk:H=0.7682,top10E=0.27,eRank=169.4,q75/q25=55.51 attn_vo:H=0.8512,top10E=0.14,eRank=313.3,q75/q25=34.77 mlp_w1:H=0.9039,top10E=0.14,eRank=409.4,q75/q25=4.60 mlp_w2:H=0.9710,top10E=0.04,eRank=633.6,q75/q25=2.87 vo_prod:H=0.7491,top10E=0.23,eRank=149.6,q75/q25=867.76 train_time:443725ms step_avg:76.50ms +[2025-09-02 08:18:40] [Rank 0] PRINT: step:5800/10000 val_loss:3.8543 svd_entropy: attn_qk:H=0.7682,top10E=0.27,eRank=169.4,q75/q25=55.51 attn_vo:H=0.8512,top10E=0.14,eRank=313.3,q75/q25=34.77 mlp_w1:H=0.9039,top10E=0.14,eRank=409.4,q75/q25=4.60 mlp_w2:H=0.9710,top10E=0.04,eRank=633.6,q75/q25=2.87 vo_prod:H=0.7491,top10E=0.23,eRank=149.6,q75/q25=867.76 train_time:443725ms step_avg:76.50ms +[2025-09-02 08:18:40] [Rank 0] step:5801/10000 train_time:443740ms step_avg:76.49ms +[2025-09-02 08:18:40] [Rank 0] step:5801/10000 train_time:443740ms step_avg:76.49ms +[2025-09-02 08:18:41] [Rank 0] step:5821/10000 train_time:445266ms step_avg:76.49ms +[2025-09-02 08:18:41] [Rank 0] step:5821/10000 train_time:445266ms step_avg:76.49ms +[2025-09-02 08:18:43] [Rank 0] step:5841/10000 train_time:446870ms step_avg:76.51ms +[2025-09-02 08:18:43] [Rank 0] step:5841/10000 train_time:446870ms step_avg:76.51ms +[2025-09-02 08:18:45] [Rank 0] step:5861/10000 train_time:448478ms step_avg:76.52ms +[2025-09-02 08:18:45] [Rank 0] step:5861/10000 train_time:448478ms step_avg:76.52ms +[2025-09-02 08:18:46] [Rank 0] step:5881/10000 train_time:450084ms step_avg:76.53ms +[2025-09-02 08:18:46] [Rank 0] step:5881/10000 train_time:450084ms step_avg:76.53ms +[2025-09-02 08:18:48] [Rank 0] step:5901/10000 train_time:451691ms step_avg:76.54ms +[2025-09-02 08:18:48] [Rank 0] step:5901/10000 train_time:451691ms step_avg:76.54ms +[2025-09-02 08:18:49] [Rank 0] step:5921/10000 train_time:453298ms step_avg:76.56ms +[2025-09-02 08:18:49] [Rank 0] step:5921/10000 train_time:453298ms step_avg:76.56ms +[2025-09-02 08:18:51] [Rank 0] step:5941/10000 train_time:454912ms step_avg:76.57ms +[2025-09-02 08:18:51] [Rank 0] step:5941/10000 train_time:454912ms step_avg:76.57ms +[2025-09-02 08:18:53] [Rank 0] step:5961/10000 train_time:456522ms step_avg:76.58ms +[2025-09-02 08:18:53] [Rank 0] step:5961/10000 train_time:456522ms step_avg:76.58ms +[2025-09-02 08:18:54] [Rank 0] step:5981/10000 train_time:458134ms step_avg:76.60ms +[2025-09-02 08:18:54] [Rank 0] step:5981/10000 train_time:458134ms step_avg:76.60ms +[2025-09-02 08:18:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:18:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:19:08] [Rank 0] PRINT: step:6000/10000 val_loss:3.8312 svd_entropy: attn_qk:H=0.7693,top10E=0.26,eRank=170.6,q75/q25=55.32 attn_vo:H=0.8521,top10E=0.13,eRank=314.9,q75/q25=34.30 mlp_w1:H=0.9051,top10E=0.14,eRank=412.4,q75/q25=4.56 mlp_w2:H=0.9711,top10E=0.04,eRank=633.7,q75/q25=2.87 vo_prod:H=0.7504,top10E=0.23,eRank=150.9,q75/q25=821.94 train_time:459824ms step_avg:76.64ms +[2025-09-02 08:19:08] [Rank 0] PRINT: step:6000/10000 val_loss:3.8312 svd_entropy: attn_qk:H=0.7693,top10E=0.26,eRank=170.6,q75/q25=55.32 attn_vo:H=0.8521,top10E=0.13,eRank=314.9,q75/q25=34.30 mlp_w1:H=0.9051,top10E=0.14,eRank=412.4,q75/q25=4.56 mlp_w2:H=0.9711,top10E=0.04,eRank=633.7,q75/q25=2.87 vo_prod:H=0.7504,top10E=0.23,eRank=150.9,q75/q25=821.94 train_time:459824ms step_avg:76.64ms +[2025-09-02 08:19:08] [Rank 0] step:6001/10000 train_time:459839ms step_avg:76.63ms +[2025-09-02 08:19:08] [Rank 0] step:6001/10000 train_time:459839ms step_avg:76.63ms +[2025-09-02 08:19:09] [Rank 0] step:6021/10000 train_time:461367ms step_avg:76.63ms +[2025-09-02 08:19:09] [Rank 0] step:6021/10000 train_time:461367ms step_avg:76.63ms +[2025-09-02 08:19:11] [Rank 0] step:6041/10000 train_time:462976ms step_avg:76.64ms +[2025-09-02 08:19:11] [Rank 0] step:6041/10000 train_time:462976ms step_avg:76.64ms +[2025-09-02 08:19:13] [Rank 0] step:6061/10000 train_time:464590ms step_avg:76.65ms +[2025-09-02 08:19:13] [Rank 0] step:6061/10000 train_time:464590ms step_avg:76.65ms +[2025-09-02 08:19:14] [Rank 0] step:6081/10000 train_time:466198ms step_avg:76.66ms +[2025-09-02 08:19:14] [Rank 0] step:6081/10000 train_time:466198ms step_avg:76.66ms +[2025-09-02 08:19:16] [Rank 0] step:6101/10000 train_time:467815ms step_avg:76.68ms +[2025-09-02 08:19:16] [Rank 0] step:6101/10000 train_time:467815ms step_avg:76.68ms +[2025-09-02 08:19:18] [Rank 0] step:6121/10000 train_time:469689ms step_avg:76.73ms +[2025-09-02 08:19:18] [Rank 0] step:6121/10000 train_time:469689ms step_avg:76.73ms +[2025-09-02 08:19:19] [Rank 0] step:6141/10000 train_time:471309ms step_avg:76.75ms +[2025-09-02 08:19:19] [Rank 0] step:6141/10000 train_time:471309ms step_avg:76.75ms +[2025-09-02 08:19:21] [Rank 0] step:6161/10000 train_time:472919ms step_avg:76.76ms +[2025-09-02 08:19:21] [Rank 0] step:6161/10000 train_time:472919ms step_avg:76.76ms +[2025-09-02 08:19:22] [Rank 0] step:6181/10000 train_time:474529ms step_avg:76.77ms +[2025-09-02 08:19:22] [Rank 0] step:6181/10000 train_time:474529ms step_avg:76.77ms +[2025-09-02 08:19:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:19:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:19:36] [Rank 0] PRINT: step:6200/10000 val_loss:3.8148 svd_entropy: attn_qk:H=0.7704,top10E=0.26,eRank=171.6,q75/q25=54.96 attn_vo:H=0.8532,top10E=0.13,eRank=316.6,q75/q25=33.62 mlp_w1:H=0.9061,top10E=0.14,eRank=415.2,q75/q25=4.53 mlp_w2:H=0.9711,top10E=0.04,eRank=633.7,q75/q25=2.87 vo_prod:H=0.7522,top10E=0.23,eRank=152.6,q75/q25=774.67 train_time:476233ms step_avg:76.81ms +[2025-09-02 08:19:36] [Rank 0] PRINT: step:6200/10000 val_loss:3.8148 svd_entropy: attn_qk:H=0.7704,top10E=0.26,eRank=171.6,q75/q25=54.96 attn_vo:H=0.8532,top10E=0.13,eRank=316.6,q75/q25=33.62 mlp_w1:H=0.9061,top10E=0.14,eRank=415.2,q75/q25=4.53 mlp_w2:H=0.9711,top10E=0.04,eRank=633.7,q75/q25=2.87 vo_prod:H=0.7522,top10E=0.23,eRank=152.6,q75/q25=774.67 train_time:476233ms step_avg:76.81ms +[2025-09-02 08:19:36] [Rank 0] step:6201/10000 train_time:476247ms step_avg:76.80ms +[2025-09-02 08:19:36] [Rank 0] step:6201/10000 train_time:476247ms step_avg:76.80ms +[2025-09-02 08:19:37] [Rank 0] step:6221/10000 train_time:477790ms step_avg:76.80ms +[2025-09-02 08:19:37] [Rank 0] step:6221/10000 train_time:477790ms step_avg:76.80ms +[2025-09-02 08:19:39] [Rank 0] step:6241/10000 train_time:479402ms step_avg:76.81ms +[2025-09-02 08:19:39] [Rank 0] step:6241/10000 train_time:479402ms step_avg:76.81ms +[2025-09-02 08:19:41] [Rank 0] step:6261/10000 train_time:481012ms step_avg:76.83ms +[2025-09-02 08:19:41] [Rank 0] step:6261/10000 train_time:481012ms step_avg:76.83ms +[2025-09-02 08:19:42] [Rank 0] step:6281/10000 train_time:482652ms step_avg:76.84ms +[2025-09-02 08:19:42] [Rank 0] step:6281/10000 train_time:482652ms step_avg:76.84ms +[2025-09-02 08:19:44] [Rank 0] step:6301/10000 train_time:484268ms step_avg:76.86ms +[2025-09-02 08:19:44] [Rank 0] step:6301/10000 train_time:484268ms step_avg:76.86ms +[2025-09-02 08:19:46] [Rank 0] step:6321/10000 train_time:485881ms step_avg:76.87ms +[2025-09-02 08:19:46] [Rank 0] step:6321/10000 train_time:485881ms step_avg:76.87ms +[2025-09-02 08:19:47] [Rank 0] step:6341/10000 train_time:487497ms step_avg:76.88ms +[2025-09-02 08:19:47] [Rank 0] step:6341/10000 train_time:487497ms step_avg:76.88ms +[2025-09-02 08:19:49] [Rank 0] step:6361/10000 train_time:489116ms step_avg:76.89ms +[2025-09-02 08:19:49] [Rank 0] step:6361/10000 train_time:489116ms step_avg:76.89ms +[2025-09-02 08:19:50] [Rank 0] step:6381/10000 train_time:490733ms step_avg:76.91ms +[2025-09-02 08:19:50] [Rank 0] step:6381/10000 train_time:490733ms step_avg:76.91ms +[2025-09-02 08:19:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:19:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:20:04] [Rank 0] PRINT: step:6400/10000 val_loss:3.7984 svd_entropy: attn_qk:H=0.7712,top10E=0.26,eRank=172.5,q75/q25=54.56 attn_vo:H=0.8541,top10E=0.13,eRank=318.1,q75/q25=32.86 mlp_w1:H=0.9071,top10E=0.14,eRank=417.8,q75/q25=4.50 mlp_w2:H=0.9711,top10E=0.04,eRank=633.7,q75/q25=2.86 vo_prod:H=0.7535,top10E=0.23,eRank=153.9,q75/q25=727.85 train_time:492426ms step_avg:76.94ms +[2025-09-02 08:20:04] [Rank 0] PRINT: step:6400/10000 val_loss:3.7984 svd_entropy: attn_qk:H=0.7712,top10E=0.26,eRank=172.5,q75/q25=54.56 attn_vo:H=0.8541,top10E=0.13,eRank=318.1,q75/q25=32.86 mlp_w1:H=0.9071,top10E=0.14,eRank=417.8,q75/q25=4.50 mlp_w2:H=0.9711,top10E=0.04,eRank=633.7,q75/q25=2.86 vo_prod:H=0.7535,top10E=0.23,eRank=153.9,q75/q25=727.85 train_time:492426ms step_avg:76.94ms +[2025-09-02 08:20:04] [Rank 0] step:6401/10000 train_time:492441ms step_avg:76.93ms +[2025-09-02 08:20:04] [Rank 0] step:6401/10000 train_time:492441ms step_avg:76.93ms +[2025-09-02 08:20:05] [Rank 0] step:6421/10000 train_time:493974ms step_avg:76.93ms +[2025-09-02 08:20:05] [Rank 0] step:6421/10000 train_time:493974ms step_avg:76.93ms +[2025-09-02 08:20:07] [Rank 0] step:6441/10000 train_time:495592ms step_avg:76.94ms +[2025-09-02 08:20:07] [Rank 0] step:6441/10000 train_time:495592ms step_avg:76.94ms +[2025-09-02 08:20:09] [Rank 0] step:6461/10000 train_time:497209ms step_avg:76.96ms +[2025-09-02 08:20:09] [Rank 0] step:6461/10000 train_time:497209ms step_avg:76.96ms +[2025-09-02 08:20:10] [Rank 0] step:6481/10000 train_time:498836ms step_avg:76.97ms +[2025-09-02 08:20:10] [Rank 0] step:6481/10000 train_time:498836ms step_avg:76.97ms +[2025-09-02 08:20:12] [Rank 0] step:6501/10000 train_time:500449ms step_avg:76.98ms +[2025-09-02 08:20:12] [Rank 0] step:6501/10000 train_time:500449ms step_avg:76.98ms +[2025-09-02 08:20:14] [Rank 0] step:6521/10000 train_time:502060ms step_avg:76.99ms +[2025-09-02 08:20:14] [Rank 0] step:6521/10000 train_time:502060ms step_avg:76.99ms +[2025-09-02 08:20:15] [Rank 0] step:6541/10000 train_time:503674ms step_avg:77.00ms +[2025-09-02 08:20:15] [Rank 0] step:6541/10000 train_time:503674ms step_avg:77.00ms +[2025-09-02 08:20:17] [Rank 0] step:6561/10000 train_time:505292ms step_avg:77.01ms +[2025-09-02 08:20:17] [Rank 0] step:6561/10000 train_time:505292ms step_avg:77.01ms +[2025-09-02 08:20:18] [Rank 0] step:6581/10000 train_time:506909ms step_avg:77.03ms +[2025-09-02 08:20:18] [Rank 0] step:6581/10000 train_time:506909ms step_avg:77.03ms +[2025-09-02 08:20:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:20:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:20:32] [Rank 0] PRINT: step:6600/10000 val_loss:3.7838 svd_entropy: attn_qk:H=0.7720,top10E=0.26,eRank=173.4,q75/q25=54.21 attn_vo:H=0.8549,top10E=0.13,eRank=319.4,q75/q25=32.34 mlp_w1:H=0.9079,top10E=0.14,eRank=420.1,q75/q25=4.48 mlp_w2:H=0.9711,top10E=0.04,eRank=633.7,q75/q25=2.86 vo_prod:H=0.7549,top10E=0.23,eRank=155.3,q75/q25=700.95 train_time:508610ms step_avg:77.06ms +[2025-09-02 08:20:32] [Rank 0] PRINT: step:6600/10000 val_loss:3.7838 svd_entropy: attn_qk:H=0.7720,top10E=0.26,eRank=173.4,q75/q25=54.21 attn_vo:H=0.8549,top10E=0.13,eRank=319.4,q75/q25=32.34 mlp_w1:H=0.9079,top10E=0.14,eRank=420.1,q75/q25=4.48 mlp_w2:H=0.9711,top10E=0.04,eRank=633.7,q75/q25=2.86 vo_prod:H=0.7549,top10E=0.23,eRank=155.3,q75/q25=700.95 train_time:508610ms step_avg:77.06ms +[2025-09-02 08:20:32] [Rank 0] step:6601/10000 train_time:508625ms step_avg:77.05ms +[2025-09-02 08:20:32] [Rank 0] step:6601/10000 train_time:508625ms step_avg:77.05ms +[2025-09-02 08:20:33] [Rank 0] step:6621/10000 train_time:510170ms step_avg:77.05ms +[2025-09-02 08:20:33] [Rank 0] step:6621/10000 train_time:510170ms step_avg:77.05ms +[2025-09-02 08:20:35] [Rank 0] step:6641/10000 train_time:511789ms step_avg:77.07ms +[2025-09-02 08:20:35] [Rank 0] step:6641/10000 train_time:511789ms step_avg:77.07ms +[2025-09-02 08:20:37] [Rank 0] step:6661/10000 train_time:513402ms step_avg:77.08ms +[2025-09-02 08:20:37] [Rank 0] step:6661/10000 train_time:513402ms step_avg:77.08ms +[2025-09-02 08:20:38] [Rank 0] step:6681/10000 train_time:515035ms step_avg:77.09ms +[2025-09-02 08:20:38] [Rank 0] step:6681/10000 train_time:515035ms step_avg:77.09ms +[2025-09-02 08:20:40] [Rank 0] step:6701/10000 train_time:516685ms step_avg:77.11ms +[2025-09-02 08:20:40] [Rank 0] step:6701/10000 train_time:516685ms step_avg:77.11ms +[2025-09-02 08:20:42] [Rank 0] step:6721/10000 train_time:518329ms step_avg:77.12ms +[2025-09-02 08:20:42] [Rank 0] step:6721/10000 train_time:518329ms step_avg:77.12ms +[2025-09-02 08:20:43] [Rank 0] step:6741/10000 train_time:519972ms step_avg:77.14ms +[2025-09-02 08:20:43] [Rank 0] step:6741/10000 train_time:519972ms step_avg:77.14ms +[2025-09-02 08:20:45] [Rank 0] step:6761/10000 train_time:521616ms step_avg:77.15ms +[2025-09-02 08:20:45] [Rank 0] step:6761/10000 train_time:521616ms step_avg:77.15ms +[2025-09-02 08:20:46] [Rank 0] step:6781/10000 train_time:523261ms step_avg:77.17ms +[2025-09-02 08:20:46] [Rank 0] step:6781/10000 train_time:523261ms step_avg:77.17ms +[2025-09-02 08:20:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:20:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:21:00] [Rank 0] PRINT: step:6800/10000 val_loss:3.7680 svd_entropy: attn_qk:H=0.7726,top10E=0.26,eRank=174.1,q75/q25=53.91 attn_vo:H=0.8557,top10E=0.13,eRank=320.7,q75/q25=32.11 mlp_w1:H=0.9087,top10E=0.14,eRank=422.2,q75/q25=4.45 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.86 vo_prod:H=0.7563,top10E=0.23,eRank=156.7,q75/q25=676.44 train_time:524992ms step_avg:77.20ms +[2025-09-02 08:21:00] [Rank 0] PRINT: step:6800/10000 val_loss:3.7680 svd_entropy: attn_qk:H=0.7726,top10E=0.26,eRank=174.1,q75/q25=53.91 attn_vo:H=0.8557,top10E=0.13,eRank=320.7,q75/q25=32.11 mlp_w1:H=0.9087,top10E=0.14,eRank=422.2,q75/q25=4.45 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.86 vo_prod:H=0.7563,top10E=0.23,eRank=156.7,q75/q25=676.44 train_time:524992ms step_avg:77.20ms +[2025-09-02 08:21:00] [Rank 0] step:6801/10000 train_time:525007ms step_avg:77.20ms +[2025-09-02 08:21:00] [Rank 0] step:6801/10000 train_time:525007ms step_avg:77.20ms +[2025-09-02 08:21:02] [Rank 0] step:6821/10000 train_time:526574ms step_avg:77.20ms +[2025-09-02 08:21:02] [Rank 0] step:6821/10000 train_time:526574ms step_avg:77.20ms +[2025-09-02 08:21:03] [Rank 0] step:6841/10000 train_time:528210ms step_avg:77.21ms +[2025-09-02 08:21:03] [Rank 0] step:6841/10000 train_time:528210ms step_avg:77.21ms +[2025-09-02 08:21:05] [Rank 0] step:6861/10000 train_time:529850ms step_avg:77.23ms +[2025-09-02 08:21:05] [Rank 0] step:6861/10000 train_time:529850ms step_avg:77.23ms +[2025-09-02 08:21:06] [Rank 0] step:6881/10000 train_time:531492ms step_avg:77.24ms +[2025-09-02 08:21:06] [Rank 0] step:6881/10000 train_time:531492ms step_avg:77.24ms +[2025-09-02 08:21:08] [Rank 0] step:6901/10000 train_time:533134ms step_avg:77.25ms +[2025-09-02 08:21:08] [Rank 0] step:6901/10000 train_time:533134ms step_avg:77.25ms +[2025-09-02 08:21:10] [Rank 0] step:6921/10000 train_time:534772ms step_avg:77.27ms +[2025-09-02 08:21:10] [Rank 0] step:6921/10000 train_time:534772ms step_avg:77.27ms +[2025-09-02 08:21:11] [Rank 0] step:6941/10000 train_time:536415ms step_avg:77.28ms +[2025-09-02 08:21:11] [Rank 0] step:6941/10000 train_time:536415ms step_avg:77.28ms +[2025-09-02 08:21:13] [Rank 0] step:6961/10000 train_time:538072ms step_avg:77.30ms +[2025-09-02 08:21:13] [Rank 0] step:6961/10000 train_time:538072ms step_avg:77.30ms +[2025-09-02 08:21:15] [Rank 0] step:6981/10000 train_time:539717ms step_avg:77.31ms +[2025-09-02 08:21:15] [Rank 0] step:6981/10000 train_time:539717ms step_avg:77.31ms +[2025-09-02 08:21:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:21:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:21:28] [Rank 0] PRINT: step:7000/10000 val_loss:3.7527 svd_entropy: attn_qk:H=0.7733,top10E=0.26,eRank=174.8,q75/q25=53.60 attn_vo:H=0.8564,top10E=0.13,eRank=321.9,q75/q25=31.45 mlp_w1:H=0.9094,top10E=0.14,eRank=424.2,q75/q25=4.42 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.86 vo_prod:H=0.7572,top10E=0.22,eRank=157.7,q75/q25=650.87 train_time:541444ms step_avg:77.35ms +[2025-09-02 08:21:28] [Rank 0] PRINT: step:7000/10000 val_loss:3.7527 svd_entropy: attn_qk:H=0.7733,top10E=0.26,eRank=174.8,q75/q25=53.60 attn_vo:H=0.8564,top10E=0.13,eRank=321.9,q75/q25=31.45 mlp_w1:H=0.9094,top10E=0.14,eRank=424.2,q75/q25=4.42 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.86 vo_prod:H=0.7572,top10E=0.22,eRank=157.7,q75/q25=650.87 train_time:541444ms step_avg:77.35ms +[2025-09-02 08:21:28] [Rank 0] step:7001/10000 train_time:541460ms step_avg:77.34ms +[2025-09-02 08:21:28] [Rank 0] step:7001/10000 train_time:541460ms step_avg:77.34ms +[2025-09-02 08:21:30] [Rank 0] step:7021/10000 train_time:543030ms step_avg:77.34ms +[2025-09-02 08:21:30] [Rank 0] step:7021/10000 train_time:543030ms step_avg:77.34ms +[2025-09-02 08:21:31] [Rank 0] step:7041/10000 train_time:544669ms step_avg:77.36ms +[2025-09-02 08:21:31] [Rank 0] step:7041/10000 train_time:544669ms step_avg:77.36ms +[2025-09-02 08:21:33] [Rank 0] step:7061/10000 train_time:546308ms step_avg:77.37ms +[2025-09-02 08:21:33] [Rank 0] step:7061/10000 train_time:546308ms step_avg:77.37ms +[2025-09-02 08:21:35] [Rank 0] step:7081/10000 train_time:547945ms step_avg:77.38ms +[2025-09-02 08:21:35] [Rank 0] step:7081/10000 train_time:547945ms step_avg:77.38ms +[2025-09-02 08:21:36] [Rank 0] step:7101/10000 train_time:549585ms step_avg:77.40ms +[2025-09-02 08:21:36] [Rank 0] step:7101/10000 train_time:549585ms step_avg:77.40ms +[2025-09-02 08:21:38] [Rank 0] step:7121/10000 train_time:551226ms step_avg:77.41ms +[2025-09-02 08:21:38] [Rank 0] step:7121/10000 train_time:551226ms step_avg:77.41ms +[2025-09-02 08:21:40] [Rank 0] step:7141/10000 train_time:552867ms step_avg:77.42ms +[2025-09-02 08:21:40] [Rank 0] step:7141/10000 train_time:552867ms step_avg:77.42ms +[2025-09-02 08:21:41] [Rank 0] step:7161/10000 train_time:554509ms step_avg:77.43ms +[2025-09-02 08:21:41] [Rank 0] step:7161/10000 train_time:554509ms step_avg:77.43ms +[2025-09-02 08:21:43] [Rank 0] step:7181/10000 train_time:556154ms step_avg:77.45ms +[2025-09-02 08:21:43] [Rank 0] step:7181/10000 train_time:556154ms step_avg:77.45ms +[2025-09-02 08:21:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:21:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:21:56] [Rank 0] PRINT: step:7200/10000 val_loss:3.7434 svd_entropy: attn_qk:H=0.7738,top10E=0.26,eRank=175.4,q75/q25=53.23 attn_vo:H=0.8570,top10E=0.13,eRank=323.0,q75/q25=30.97 mlp_w1:H=0.9100,top10E=0.14,eRank=425.8,q75/q25=4.40 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.86 vo_prod:H=0.7583,top10E=0.22,eRank=158.8,q75/q25=626.22 train_time:557881ms step_avg:77.48ms +[2025-09-02 08:21:56] [Rank 0] PRINT: step:7200/10000 val_loss:3.7434 svd_entropy: attn_qk:H=0.7738,top10E=0.26,eRank=175.4,q75/q25=53.23 attn_vo:H=0.8570,top10E=0.13,eRank=323.0,q75/q25=30.97 mlp_w1:H=0.9100,top10E=0.14,eRank=425.8,q75/q25=4.40 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.86 vo_prod:H=0.7583,top10E=0.22,eRank=158.8,q75/q25=626.22 train_time:557881ms step_avg:77.48ms +[2025-09-02 08:21:56] [Rank 0] step:7201/10000 train_time:557896ms step_avg:77.47ms +[2025-09-02 08:21:56] [Rank 0] step:7201/10000 train_time:557896ms step_avg:77.47ms +[2025-09-02 08:21:58] [Rank 0] step:7221/10000 train_time:559466ms step_avg:77.48ms +[2025-09-02 08:21:58] [Rank 0] step:7221/10000 train_time:559466ms step_avg:77.48ms +[2025-09-02 08:22:00] [Rank 0] step:7241/10000 train_time:561106ms step_avg:77.49ms +[2025-09-02 08:22:00] [Rank 0] step:7241/10000 train_time:561106ms step_avg:77.49ms +[2025-09-02 08:22:01] [Rank 0] step:7261/10000 train_time:562743ms step_avg:77.50ms +[2025-09-02 08:22:01] [Rank 0] step:7261/10000 train_time:562743ms step_avg:77.50ms +[2025-09-02 08:22:03] [Rank 0] step:7281/10000 train_time:564395ms step_avg:77.52ms +[2025-09-02 08:22:03] [Rank 0] step:7281/10000 train_time:564395ms step_avg:77.52ms +[2025-09-02 08:22:05] [Rank 0] step:7301/10000 train_time:566033ms step_avg:77.53ms +[2025-09-02 08:22:05] [Rank 0] step:7301/10000 train_time:566033ms step_avg:77.53ms +[2025-09-02 08:22:06] [Rank 0] step:7321/10000 train_time:567688ms step_avg:77.54ms +[2025-09-02 08:22:06] [Rank 0] step:7321/10000 train_time:567688ms step_avg:77.54ms +[2025-09-02 08:22:08] [Rank 0] step:7341/10000 train_time:569331ms step_avg:77.55ms +[2025-09-02 08:22:08] [Rank 0] step:7341/10000 train_time:569331ms step_avg:77.55ms +[2025-09-02 08:22:09] [Rank 0] step:7361/10000 train_time:570980ms step_avg:77.57ms +[2025-09-02 08:22:09] [Rank 0] step:7361/10000 train_time:570980ms step_avg:77.57ms +[2025-09-02 08:22:11] [Rank 0] step:7381/10000 train_time:572628ms step_avg:77.58ms +[2025-09-02 08:22:11] [Rank 0] step:7381/10000 train_time:572628ms step_avg:77.58ms +[2025-09-02 08:22:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:22:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:22:24] [Rank 0] PRINT: step:7400/10000 val_loss:3.7238 svd_entropy: attn_qk:H=0.7743,top10E=0.26,eRank=176.0,q75/q25=53.18 attn_vo:H=0.8575,top10E=0.13,eRank=323.8,q75/q25=30.60 mlp_w1:H=0.9106,top10E=0.14,eRank=427.3,q75/q25=4.38 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.86 vo_prod:H=0.7591,top10E=0.22,eRank=159.7,q75/q25=596.85 train_time:574344ms step_avg:77.61ms +[2025-09-02 08:22:24] [Rank 0] PRINT: step:7400/10000 val_loss:3.7238 svd_entropy: attn_qk:H=0.7743,top10E=0.26,eRank=176.0,q75/q25=53.18 attn_vo:H=0.8575,top10E=0.13,eRank=323.8,q75/q25=30.60 mlp_w1:H=0.9106,top10E=0.14,eRank=427.3,q75/q25=4.38 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.86 vo_prod:H=0.7591,top10E=0.22,eRank=159.7,q75/q25=596.85 train_time:574344ms step_avg:77.61ms +[2025-09-02 08:22:25] [Rank 0] step:7401/10000 train_time:574360ms step_avg:77.61ms +[2025-09-02 08:22:25] [Rank 0] step:7401/10000 train_time:574360ms step_avg:77.61ms +[2025-09-02 08:22:26] [Rank 0] step:7421/10000 train_time:575929ms step_avg:77.61ms +[2025-09-02 08:22:26] [Rank 0] step:7421/10000 train_time:575929ms step_avg:77.61ms +[2025-09-02 08:22:28] [Rank 0] step:7441/10000 train_time:577569ms step_avg:77.62ms +[2025-09-02 08:22:28] [Rank 0] step:7441/10000 train_time:577569ms step_avg:77.62ms +[2025-09-02 08:22:29] [Rank 0] step:7461/10000 train_time:579212ms step_avg:77.63ms +[2025-09-02 08:22:29] [Rank 0] step:7461/10000 train_time:579212ms step_avg:77.63ms +[2025-09-02 08:22:31] [Rank 0] step:7481/10000 train_time:580865ms step_avg:77.65ms +[2025-09-02 08:22:31] [Rank 0] step:7481/10000 train_time:580865ms step_avg:77.65ms +[2025-09-02 08:22:33] [Rank 0] step:7501/10000 train_time:582516ms step_avg:77.66ms +[2025-09-02 08:22:33] [Rank 0] step:7501/10000 train_time:582516ms step_avg:77.66ms +[2025-09-02 08:22:34] [Rank 0] step:7521/10000 train_time:584166ms step_avg:77.67ms +[2025-09-02 08:22:34] [Rank 0] step:7521/10000 train_time:584166ms step_avg:77.67ms +[2025-09-02 08:22:36] [Rank 0] step:7541/10000 train_time:585824ms step_avg:77.69ms +[2025-09-02 08:22:36] [Rank 0] step:7541/10000 train_time:585824ms step_avg:77.69ms +[2025-09-02 08:22:38] [Rank 0] step:7561/10000 train_time:587462ms step_avg:77.70ms +[2025-09-02 08:22:38] [Rank 0] step:7561/10000 train_time:587462ms step_avg:77.70ms +[2025-09-02 08:22:39] [Rank 0] step:7581/10000 train_time:589119ms step_avg:77.71ms +[2025-09-02 08:22:39] [Rank 0] step:7581/10000 train_time:589119ms step_avg:77.71ms +[2025-09-02 08:22:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:22:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:22:53] [Rank 0] PRINT: step:7600/10000 val_loss:3.7169 svd_entropy: attn_qk:H=0.7749,top10E=0.26,eRank=176.6,q75/q25=52.70 attn_vo:H=0.8581,top10E=0.13,eRank=324.8,q75/q25=30.19 mlp_w1:H=0.9111,top10E=0.14,eRank=428.8,q75/q25=4.37 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.86 vo_prod:H=0.7601,top10E=0.22,eRank=160.7,q75/q25=578.14 train_time:590861ms step_avg:77.74ms +[2025-09-02 08:22:53] [Rank 0] PRINT: step:7600/10000 val_loss:3.7169 svd_entropy: attn_qk:H=0.7749,top10E=0.26,eRank=176.6,q75/q25=52.70 attn_vo:H=0.8581,top10E=0.13,eRank=324.8,q75/q25=30.19 mlp_w1:H=0.9111,top10E=0.14,eRank=428.8,q75/q25=4.37 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.86 vo_prod:H=0.7601,top10E=0.22,eRank=160.7,q75/q25=578.14 train_time:590861ms step_avg:77.74ms +[2025-09-02 08:22:53] [Rank 0] step:7601/10000 train_time:590876ms step_avg:77.74ms +[2025-09-02 08:22:53] [Rank 0] step:7601/10000 train_time:590876ms step_avg:77.74ms +[2025-09-02 08:22:55] [Rank 0] step:7621/10000 train_time:592452ms step_avg:77.74ms +[2025-09-02 08:22:55] [Rank 0] step:7621/10000 train_time:592452ms step_avg:77.74ms +[2025-09-02 08:22:56] [Rank 0] step:7641/10000 train_time:594094ms step_avg:77.75ms +[2025-09-02 08:22:56] [Rank 0] step:7641/10000 train_time:594094ms step_avg:77.75ms +[2025-09-02 08:22:58] [Rank 0] step:7661/10000 train_time:595744ms step_avg:77.76ms +[2025-09-02 08:22:58] [Rank 0] step:7661/10000 train_time:595744ms step_avg:77.76ms +[2025-09-02 08:23:00] [Rank 0] step:7681/10000 train_time:597384ms step_avg:77.77ms +[2025-09-02 08:23:00] [Rank 0] step:7681/10000 train_time:597384ms step_avg:77.77ms +[2025-09-02 08:23:01] [Rank 0] step:7701/10000 train_time:599033ms step_avg:77.79ms +[2025-09-02 08:23:01] [Rank 0] step:7701/10000 train_time:599033ms step_avg:77.79ms +[2025-09-02 08:23:03] [Rank 0] step:7721/10000 train_time:600691ms step_avg:77.80ms +[2025-09-02 08:23:03] [Rank 0] step:7721/10000 train_time:600691ms step_avg:77.80ms +[2025-09-02 08:23:05] [Rank 0] step:7741/10000 train_time:602339ms step_avg:77.81ms +[2025-09-02 08:23:05] [Rank 0] step:7741/10000 train_time:602339ms step_avg:77.81ms +[2025-09-02 08:23:06] [Rank 0] step:7761/10000 train_time:603993ms step_avg:77.82ms +[2025-09-02 08:23:06] [Rank 0] step:7761/10000 train_time:603993ms step_avg:77.82ms +[2025-09-02 08:23:08] [Rank 0] step:7781/10000 train_time:605647ms step_avg:77.84ms +[2025-09-02 08:23:08] [Rank 0] step:7781/10000 train_time:605647ms step_avg:77.84ms +[2025-09-02 08:23:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:23:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:23:22] [Rank 0] PRINT: step:7800/10000 val_loss:3.7032 svd_entropy: attn_qk:H=0.7753,top10E=0.26,eRank=177.0,q75/q25=52.51 attn_vo:H=0.8586,top10E=0.13,eRank=325.6,q75/q25=29.96 mlp_w1:H=0.9116,top10E=0.13,eRank=430.1,q75/q25=4.35 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.86 vo_prod:H=0.7609,top10E=0.22,eRank=161.5,q75/q25=560.91 train_time:607389ms step_avg:77.87ms +[2025-09-02 08:23:22] [Rank 0] PRINT: step:7800/10000 val_loss:3.7032 svd_entropy: attn_qk:H=0.7753,top10E=0.26,eRank=177.0,q75/q25=52.51 attn_vo:H=0.8586,top10E=0.13,eRank=325.6,q75/q25=29.96 mlp_w1:H=0.9116,top10E=0.13,eRank=430.1,q75/q25=4.35 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.86 vo_prod:H=0.7609,top10E=0.22,eRank=161.5,q75/q25=560.91 train_time:607389ms step_avg:77.87ms +[2025-09-02 08:23:22] [Rank 0] step:7801/10000 train_time:607404ms step_avg:77.86ms +[2025-09-02 08:23:22] [Rank 0] step:7801/10000 train_time:607404ms step_avg:77.86ms +[2025-09-02 08:23:23] [Rank 0] step:7821/10000 train_time:608972ms step_avg:77.86ms +[2025-09-02 08:23:23] [Rank 0] step:7821/10000 train_time:608972ms step_avg:77.86ms +[2025-09-02 08:23:25] [Rank 0] step:7841/10000 train_time:610612ms step_avg:77.87ms +[2025-09-02 08:23:25] [Rank 0] step:7841/10000 train_time:610612ms step_avg:77.87ms +[2025-09-02 08:23:27] [Rank 0] step:7861/10000 train_time:612263ms step_avg:77.89ms +[2025-09-02 08:23:27] [Rank 0] step:7861/10000 train_time:612263ms step_avg:77.89ms +[2025-09-02 08:23:28] [Rank 0] step:7881/10000 train_time:613914ms step_avg:77.90ms +[2025-09-02 08:23:28] [Rank 0] step:7881/10000 train_time:613914ms step_avg:77.90ms +[2025-09-02 08:23:30] [Rank 0] step:7901/10000 train_time:615561ms step_avg:77.91ms +[2025-09-02 08:23:30] [Rank 0] step:7901/10000 train_time:615561ms step_avg:77.91ms +[2025-09-02 08:23:32] [Rank 0] step:7921/10000 train_time:617211ms step_avg:77.92ms +[2025-09-02 08:23:32] [Rank 0] step:7921/10000 train_time:617211ms step_avg:77.92ms +[2025-09-02 08:23:33] [Rank 0] step:7941/10000 train_time:618867ms step_avg:77.93ms +[2025-09-02 08:23:33] [Rank 0] step:7941/10000 train_time:618867ms step_avg:77.93ms +[2025-09-02 08:23:35] [Rank 0] step:7961/10000 train_time:620518ms step_avg:77.94ms +[2025-09-02 08:23:35] [Rank 0] step:7961/10000 train_time:620518ms step_avg:77.94ms +[2025-09-02 08:23:37] [Rank 0] step:7981/10000 train_time:622160ms step_avg:77.96ms +[2025-09-02 08:23:37] [Rank 0] step:7981/10000 train_time:622160ms step_avg:77.96ms +[2025-09-02 08:23:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:23:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:23:50] [Rank 0] PRINT: step:8000/10000 val_loss:3.6874 svd_entropy: attn_qk:H=0.7757,top10E=0.26,eRank=177.4,q75/q25=52.05 attn_vo:H=0.8590,top10E=0.13,eRank=326.4,q75/q25=29.64 mlp_w1:H=0.9120,top10E=0.13,eRank=431.2,q75/q25=4.35 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.85 vo_prod:H=0.7617,top10E=0.22,eRank=162.4,q75/q25=541.60 train_time:623894ms step_avg:77.99ms +[2025-09-02 08:23:50] [Rank 0] PRINT: step:8000/10000 val_loss:3.6874 svd_entropy: attn_qk:H=0.7757,top10E=0.26,eRank=177.4,q75/q25=52.05 attn_vo:H=0.8590,top10E=0.13,eRank=326.4,q75/q25=29.64 mlp_w1:H=0.9120,top10E=0.13,eRank=431.2,q75/q25=4.35 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.85 vo_prod:H=0.7617,top10E=0.22,eRank=162.4,q75/q25=541.60 train_time:623894ms step_avg:77.99ms +[2025-09-02 08:23:50] [Rank 0] step:8001/10000 train_time:623909ms step_avg:77.98ms +[2025-09-02 08:23:50] [Rank 0] step:8001/10000 train_time:623909ms step_avg:77.98ms +[2025-09-02 08:23:52] [Rank 0] step:8021/10000 train_time:625467ms step_avg:77.98ms +[2025-09-02 08:23:52] [Rank 0] step:8021/10000 train_time:625467ms step_avg:77.98ms +[2025-09-02 08:23:54] [Rank 0] step:8041/10000 train_time:627118ms step_avg:77.99ms +[2025-09-02 08:23:54] [Rank 0] step:8041/10000 train_time:627118ms step_avg:77.99ms +[2025-09-02 08:23:55] [Rank 0] step:8061/10000 train_time:628763ms step_avg:78.00ms +[2025-09-02 08:23:55] [Rank 0] step:8061/10000 train_time:628763ms step_avg:78.00ms +[2025-09-02 08:23:57] [Rank 0] step:8081/10000 train_time:630396ms step_avg:78.01ms +[2025-09-02 08:23:57] [Rank 0] step:8081/10000 train_time:630396ms step_avg:78.01ms +[2025-09-02 08:23:58] [Rank 0] step:8101/10000 train_time:632049ms step_avg:78.02ms +[2025-09-02 08:23:58] [Rank 0] step:8101/10000 train_time:632049ms step_avg:78.02ms +[2025-09-02 08:24:00] [Rank 0] step:8121/10000 train_time:633693ms step_avg:78.03ms +[2025-09-02 08:24:00] [Rank 0] step:8121/10000 train_time:633693ms step_avg:78.03ms +[2025-09-02 08:24:02] [Rank 0] step:8141/10000 train_time:635507ms step_avg:78.06ms +[2025-09-02 08:24:02] [Rank 0] step:8141/10000 train_time:635507ms step_avg:78.06ms +[2025-09-02 08:24:04] [Rank 0] step:8161/10000 train_time:637166ms step_avg:78.07ms +[2025-09-02 08:24:04] [Rank 0] step:8161/10000 train_time:637166ms step_avg:78.07ms +[2025-09-02 08:24:05] [Rank 0] step:8181/10000 train_time:638839ms step_avg:78.09ms +[2025-09-02 08:24:05] [Rank 0] step:8181/10000 train_time:638839ms step_avg:78.09ms +[2025-09-02 08:24:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:24:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:24:19] [Rank 0] PRINT: step:8200/10000 val_loss:3.6769 svd_entropy: attn_qk:H=0.7760,top10E=0.26,eRank=177.8,q75/q25=52.19 attn_vo:H=0.8594,top10E=0.13,eRank=327.0,q75/q25=29.35 mlp_w1:H=0.9123,top10E=0.13,eRank=432.2,q75/q25=4.33 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.85 vo_prod:H=0.7623,top10E=0.22,eRank=163.0,q75/q25=527.94 train_time:640625ms step_avg:78.12ms +[2025-09-02 08:24:19] [Rank 0] PRINT: step:8200/10000 val_loss:3.6769 svd_entropy: attn_qk:H=0.7760,top10E=0.26,eRank=177.8,q75/q25=52.19 attn_vo:H=0.8594,top10E=0.13,eRank=327.0,q75/q25=29.35 mlp_w1:H=0.9123,top10E=0.13,eRank=432.2,q75/q25=4.33 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.85 vo_prod:H=0.7623,top10E=0.22,eRank=163.0,q75/q25=527.94 train_time:640625ms step_avg:78.12ms +[2025-09-02 08:24:19] [Rank 0] step:8201/10000 train_time:640640ms step_avg:78.12ms +[2025-09-02 08:24:19] [Rank 0] step:8201/10000 train_time:640640ms step_avg:78.12ms +[2025-09-02 08:24:21] [Rank 0] step:8221/10000 train_time:642250ms step_avg:78.12ms +[2025-09-02 08:24:21] [Rank 0] step:8221/10000 train_time:642250ms step_avg:78.12ms +[2025-09-02 08:24:22] [Rank 0] step:8241/10000 train_time:643933ms step_avg:78.14ms +[2025-09-02 08:24:22] [Rank 0] step:8241/10000 train_time:643933ms step_avg:78.14ms +[2025-09-02 08:24:24] [Rank 0] step:8261/10000 train_time:645609ms step_avg:78.15ms +[2025-09-02 08:24:24] [Rank 0] step:8261/10000 train_time:645609ms step_avg:78.15ms +[2025-09-02 08:24:26] [Rank 0] step:8281/10000 train_time:647285ms step_avg:78.17ms +[2025-09-02 08:24:26] [Rank 0] step:8281/10000 train_time:647285ms step_avg:78.17ms +[2025-09-02 08:24:27] [Rank 0] step:8301/10000 train_time:648958ms step_avg:78.18ms +[2025-09-02 08:24:27] [Rank 0] step:8301/10000 train_time:648958ms step_avg:78.18ms +[2025-09-02 08:24:29] [Rank 0] step:8321/10000 train_time:650623ms step_avg:78.19ms +[2025-09-02 08:24:29] [Rank 0] step:8321/10000 train_time:650623ms step_avg:78.19ms +[2025-09-02 08:24:31] [Rank 0] step:8341/10000 train_time:652301ms step_avg:78.20ms +[2025-09-02 08:24:31] [Rank 0] step:8341/10000 train_time:652301ms step_avg:78.20ms +[2025-09-02 08:24:32] [Rank 0] step:8361/10000 train_time:653981ms step_avg:78.22ms +[2025-09-02 08:24:32] [Rank 0] step:8361/10000 train_time:653981ms step_avg:78.22ms +[2025-09-02 08:24:34] [Rank 0] step:8381/10000 train_time:655652ms step_avg:78.23ms +[2025-09-02 08:24:34] [Rank 0] step:8381/10000 train_time:655652ms step_avg:78.23ms +[2025-09-02 08:24:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:24:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:24:48] [Rank 0] PRINT: step:8400/10000 val_loss:3.6669 svd_entropy: attn_qk:H=0.7762,top10E=0.26,eRank=178.1,q75/q25=51.94 attn_vo:H=0.8597,top10E=0.13,eRank=327.6,q75/q25=29.10 mlp_w1:H=0.9127,top10E=0.13,eRank=433.2,q75/q25=4.31 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.85 vo_prod:H=0.7629,top10E=0.22,eRank=163.6,q75/q25=515.06 train_time:657410ms step_avg:78.26ms +[2025-09-02 08:24:48] [Rank 0] PRINT: step:8400/10000 val_loss:3.6669 svd_entropy: attn_qk:H=0.7762,top10E=0.26,eRank=178.1,q75/q25=51.94 attn_vo:H=0.8597,top10E=0.13,eRank=327.6,q75/q25=29.10 mlp_w1:H=0.9127,top10E=0.13,eRank=433.2,q75/q25=4.31 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.85 vo_prod:H=0.7629,top10E=0.22,eRank=163.6,q75/q25=515.06 train_time:657410ms step_avg:78.26ms +[2025-09-02 08:24:48] [Rank 0] step:8401/10000 train_time:657425ms step_avg:78.26ms +[2025-09-02 08:24:48] [Rank 0] step:8401/10000 train_time:657425ms step_avg:78.26ms +[2025-09-02 08:24:50] [Rank 0] step:8421/10000 train_time:659028ms step_avg:78.26ms +[2025-09-02 08:24:50] [Rank 0] step:8421/10000 train_time:659028ms step_avg:78.26ms +[2025-09-02 08:24:51] [Rank 0] step:8441/10000 train_time:660696ms step_avg:78.27ms +[2025-09-02 08:24:51] [Rank 0] step:8441/10000 train_time:660696ms step_avg:78.27ms +[2025-09-02 08:24:53] [Rank 0] step:8461/10000 train_time:662368ms step_avg:78.28ms +[2025-09-02 08:24:53] [Rank 0] step:8461/10000 train_time:662368ms step_avg:78.28ms +[2025-09-02 08:24:55] [Rank 0] step:8481/10000 train_time:664044ms step_avg:78.30ms +[2025-09-02 08:24:55] [Rank 0] step:8481/10000 train_time:664044ms step_avg:78.30ms +[2025-09-02 08:24:56] [Rank 0] step:8501/10000 train_time:665743ms step_avg:78.31ms +[2025-09-02 08:24:56] [Rank 0] step:8501/10000 train_time:665743ms step_avg:78.31ms +[2025-09-02 08:24:58] [Rank 0] step:8521/10000 train_time:667476ms step_avg:78.33ms +[2025-09-02 08:24:58] [Rank 0] step:8521/10000 train_time:667476ms step_avg:78.33ms +[2025-09-02 08:25:00] [Rank 0] step:8541/10000 train_time:669164ms step_avg:78.35ms +[2025-09-02 08:25:00] [Rank 0] step:8541/10000 train_time:669164ms step_avg:78.35ms +[2025-09-02 08:25:01] [Rank 0] step:8561/10000 train_time:670842ms step_avg:78.36ms +[2025-09-02 08:25:01] [Rank 0] step:8561/10000 train_time:670842ms step_avg:78.36ms +[2025-09-02 08:25:03] [Rank 0] step:8581/10000 train_time:672521ms step_avg:78.37ms +[2025-09-02 08:25:03] [Rank 0] step:8581/10000 train_time:672521ms step_avg:78.37ms +[2025-09-02 08:25:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:25:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:25:17] [Rank 0] PRINT: step:8600/10000 val_loss:3.6569 svd_entropy: attn_qk:H=0.7765,top10E=0.26,eRank=178.3,q75/q25=51.65 attn_vo:H=0.8600,top10E=0.13,eRank=328.1,q75/q25=28.85 mlp_w1:H=0.9130,top10E=0.13,eRank=434.0,q75/q25=4.30 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.86 vo_prod:H=0.7634,top10E=0.22,eRank=164.2,q75/q25=504.68 train_time:674274ms step_avg:78.40ms +[2025-09-02 08:25:17] [Rank 0] PRINT: step:8600/10000 val_loss:3.6569 svd_entropy: attn_qk:H=0.7765,top10E=0.26,eRank=178.3,q75/q25=51.65 attn_vo:H=0.8600,top10E=0.13,eRank=328.1,q75/q25=28.85 mlp_w1:H=0.9130,top10E=0.13,eRank=434.0,q75/q25=4.30 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.86 vo_prod:H=0.7634,top10E=0.22,eRank=164.2,q75/q25=504.68 train_time:674274ms step_avg:78.40ms +[2025-09-02 08:25:17] [Rank 0] step:8601/10000 train_time:674290ms step_avg:78.40ms +[2025-09-02 08:25:17] [Rank 0] step:8601/10000 train_time:674290ms step_avg:78.40ms +[2025-09-02 08:25:18] [Rank 0] step:8621/10000 train_time:675902ms step_avg:78.40ms +[2025-09-02 08:25:18] [Rank 0] step:8621/10000 train_time:675902ms step_avg:78.40ms +[2025-09-02 08:25:20] [Rank 0] step:8641/10000 train_time:677576ms step_avg:78.41ms +[2025-09-02 08:25:20] [Rank 0] step:8641/10000 train_time:677576ms step_avg:78.41ms +[2025-09-02 08:25:22] [Rank 0] step:8661/10000 train_time:679254ms step_avg:78.43ms +[2025-09-02 08:25:22] [Rank 0] step:8661/10000 train_time:679254ms step_avg:78.43ms +[2025-09-02 08:25:23] [Rank 0] step:8681/10000 train_time:680929ms step_avg:78.44ms +[2025-09-02 08:25:23] [Rank 0] step:8681/10000 train_time:680929ms step_avg:78.44ms +[2025-09-02 08:25:25] [Rank 0] step:8701/10000 train_time:682599ms step_avg:78.45ms +[2025-09-02 08:25:25] [Rank 0] step:8701/10000 train_time:682599ms step_avg:78.45ms +[2025-09-02 08:25:27] [Rank 0] step:8721/10000 train_time:684280ms step_avg:78.46ms +[2025-09-02 08:25:27] [Rank 0] step:8721/10000 train_time:684280ms step_avg:78.46ms +[2025-09-02 08:25:28] [Rank 0] step:8741/10000 train_time:685945ms step_avg:78.47ms +[2025-09-02 08:25:28] [Rank 0] step:8741/10000 train_time:685945ms step_avg:78.47ms +[2025-09-02 08:25:30] [Rank 0] step:8761/10000 train_time:687619ms step_avg:78.49ms +[2025-09-02 08:25:30] [Rank 0] step:8761/10000 train_time:687619ms step_avg:78.49ms +[2025-09-02 08:25:32] [Rank 0] step:8781/10000 train_time:689299ms step_avg:78.50ms +[2025-09-02 08:25:32] [Rank 0] step:8781/10000 train_time:689299ms step_avg:78.50ms +[2025-09-02 08:25:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:25:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:25:45] [Rank 0] PRINT: step:8800/10000 val_loss:3.6470 svd_entropy: attn_qk:H=0.7767,top10E=0.26,eRank=178.6,q75/q25=51.40 attn_vo:H=0.8603,top10E=0.13,eRank=328.6,q75/q25=28.62 mlp_w1:H=0.9132,top10E=0.13,eRank=434.7,q75/q25=4.29 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.86 vo_prod:H=0.7639,top10E=0.22,eRank=164.7,q75/q25=492.32 train_time:691065ms step_avg:78.53ms +[2025-09-02 08:25:45] [Rank 0] PRINT: step:8800/10000 val_loss:3.6470 svd_entropy: attn_qk:H=0.7767,top10E=0.26,eRank=178.6,q75/q25=51.40 attn_vo:H=0.8603,top10E=0.13,eRank=328.6,q75/q25=28.62 mlp_w1:H=0.9132,top10E=0.13,eRank=434.7,q75/q25=4.29 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.86 vo_prod:H=0.7639,top10E=0.22,eRank=164.7,q75/q25=492.32 train_time:691065ms step_avg:78.53ms +[2025-09-02 08:25:46] [Rank 0] step:8801/10000 train_time:691080ms step_avg:78.52ms +[2025-09-02 08:25:46] [Rank 0] step:8801/10000 train_time:691080ms step_avg:78.52ms +[2025-09-02 08:25:47] [Rank 0] step:8821/10000 train_time:692666ms step_avg:78.52ms +[2025-09-02 08:25:47] [Rank 0] step:8821/10000 train_time:692666ms step_avg:78.52ms +[2025-09-02 08:25:49] [Rank 0] step:8841/10000 train_time:694363ms step_avg:78.54ms +[2025-09-02 08:25:49] [Rank 0] step:8841/10000 train_time:694363ms step_avg:78.54ms +[2025-09-02 08:25:51] [Rank 0] step:8861/10000 train_time:696038ms step_avg:78.55ms +[2025-09-02 08:25:51] [Rank 0] step:8861/10000 train_time:696038ms step_avg:78.55ms +[2025-09-02 08:25:52] [Rank 0] step:8881/10000 train_time:697715ms step_avg:78.56ms +[2025-09-02 08:25:52] [Rank 0] step:8881/10000 train_time:697715ms step_avg:78.56ms +[2025-09-02 08:25:54] [Rank 0] step:8901/10000 train_time:699392ms step_avg:78.57ms +[2025-09-02 08:25:54] [Rank 0] step:8901/10000 train_time:699392ms step_avg:78.57ms +[2025-09-02 08:25:56] [Rank 0] step:8921/10000 train_time:701083ms step_avg:78.59ms +[2025-09-02 08:25:56] [Rank 0] step:8921/10000 train_time:701083ms step_avg:78.59ms +[2025-09-02 08:25:57] [Rank 0] step:8941/10000 train_time:702768ms step_avg:78.60ms +[2025-09-02 08:25:57] [Rank 0] step:8941/10000 train_time:702768ms step_avg:78.60ms +[2025-09-02 08:25:59] [Rank 0] step:8961/10000 train_time:704443ms step_avg:78.61ms +[2025-09-02 08:25:59] [Rank 0] step:8961/10000 train_time:704443ms step_avg:78.61ms +[2025-09-02 08:26:01] [Rank 0] step:8981/10000 train_time:706117ms step_avg:78.62ms +[2025-09-02 08:26:01] [Rank 0] step:8981/10000 train_time:706117ms step_avg:78.62ms +[2025-09-02 08:26:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:26:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:26:14] [Rank 0] PRINT: step:9000/10000 val_loss:3.6381 svd_entropy: attn_qk:H=0.7769,top10E=0.26,eRank=178.8,q75/q25=51.33 attn_vo:H=0.8606,top10E=0.13,eRank=329.0,q75/q25=28.40 mlp_w1:H=0.9134,top10E=0.13,eRank=435.3,q75/q25=4.27 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.86 vo_prod:H=0.7644,top10E=0.22,eRank=165.3,q75/q25=485.22 train_time:707877ms step_avg:78.65ms +[2025-09-02 08:26:14] [Rank 0] PRINT: step:9000/10000 val_loss:3.6381 svd_entropy: attn_qk:H=0.7769,top10E=0.26,eRank=178.8,q75/q25=51.33 attn_vo:H=0.8606,top10E=0.13,eRank=329.0,q75/q25=28.40 mlp_w1:H=0.9134,top10E=0.13,eRank=435.3,q75/q25=4.27 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.86 vo_prod:H=0.7644,top10E=0.22,eRank=165.3,q75/q25=485.22 train_time:707877ms step_avg:78.65ms +[2025-09-02 08:26:14] [Rank 0] step:9001/10000 train_time:707892ms step_avg:78.65ms +[2025-09-02 08:26:14] [Rank 0] step:9001/10000 train_time:707892ms step_avg:78.65ms +[2025-09-02 08:26:16] [Rank 0] step:9021/10000 train_time:709482ms step_avg:78.65ms +[2025-09-02 08:26:16] [Rank 0] step:9021/10000 train_time:709482ms step_avg:78.65ms +[2025-09-02 08:26:18] [Rank 0] step:9041/10000 train_time:711157ms step_avg:78.66ms +[2025-09-02 08:26:18] [Rank 0] step:9041/10000 train_time:711157ms step_avg:78.66ms +[2025-09-02 08:26:19] [Rank 0] step:9061/10000 train_time:712843ms step_avg:78.67ms +[2025-09-02 08:26:19] [Rank 0] step:9061/10000 train_time:712843ms step_avg:78.67ms +[2025-09-02 08:26:21] [Rank 0] step:9081/10000 train_time:714529ms step_avg:78.68ms +[2025-09-02 08:26:21] [Rank 0] step:9081/10000 train_time:714529ms step_avg:78.68ms +[2025-09-02 08:26:23] [Rank 0] step:9101/10000 train_time:716223ms step_avg:78.70ms +[2025-09-02 08:26:23] [Rank 0] step:9101/10000 train_time:716223ms step_avg:78.70ms +[2025-09-02 08:26:24] [Rank 0] step:9121/10000 train_time:717904ms step_avg:78.71ms +[2025-09-02 08:26:24] [Rank 0] step:9121/10000 train_time:717904ms step_avg:78.71ms +[2025-09-02 08:26:26] [Rank 0] step:9141/10000 train_time:719571ms step_avg:78.72ms +[2025-09-02 08:26:26] [Rank 0] step:9141/10000 train_time:719571ms step_avg:78.72ms +[2025-09-02 08:26:28] [Rank 0] step:9161/10000 train_time:721245ms step_avg:78.73ms +[2025-09-02 08:26:28] [Rank 0] step:9161/10000 train_time:721245ms step_avg:78.73ms +[2025-09-02 08:26:29] [Rank 0] step:9181/10000 train_time:722956ms step_avg:78.74ms +[2025-09-02 08:26:29] [Rank 0] step:9181/10000 train_time:722956ms step_avg:78.74ms +[2025-09-02 08:26:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:26:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:26:43] [Rank 0] PRINT: step:9200/10000 val_loss:3.6296 svd_entropy: attn_qk:H=0.7770,top10E=0.26,eRank=178.9,q75/q25=51.19 attn_vo:H=0.8608,top10E=0.13,eRank=329.4,q75/q25=28.15 mlp_w1:H=0.9136,top10E=0.13,eRank=435.9,q75/q25=4.26 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.86 vo_prod:H=0.7648,top10E=0.22,eRank=165.7,q75/q25=479.21 train_time:724713ms step_avg:78.77ms +[2025-09-02 08:26:43] [Rank 0] PRINT: step:9200/10000 val_loss:3.6296 svd_entropy: attn_qk:H=0.7770,top10E=0.26,eRank=178.9,q75/q25=51.19 attn_vo:H=0.8608,top10E=0.13,eRank=329.4,q75/q25=28.15 mlp_w1:H=0.9136,top10E=0.13,eRank=435.9,q75/q25=4.26 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.86 vo_prod:H=0.7648,top10E=0.22,eRank=165.7,q75/q25=479.21 train_time:724713ms step_avg:78.77ms +[2025-09-02 08:26:43] [Rank 0] step:9201/10000 train_time:724728ms step_avg:78.77ms +[2025-09-02 08:26:43] [Rank 0] step:9201/10000 train_time:724728ms step_avg:78.77ms +[2025-09-02 08:26:45] [Rank 0] step:9221/10000 train_time:726338ms step_avg:78.77ms +[2025-09-02 08:26:45] [Rank 0] step:9221/10000 train_time:726338ms step_avg:78.77ms +[2025-09-02 08:26:47] [Rank 0] step:9241/10000 train_time:728024ms step_avg:78.78ms +[2025-09-02 08:26:47] [Rank 0] step:9241/10000 train_time:728024ms step_avg:78.78ms +[2025-09-02 08:26:48] [Rank 0] step:9261/10000 train_time:729710ms step_avg:78.79ms +[2025-09-02 08:26:48] [Rank 0] step:9261/10000 train_time:729710ms step_avg:78.79ms +[2025-09-02 08:26:50] [Rank 0] step:9281/10000 train_time:731379ms step_avg:78.80ms +[2025-09-02 08:26:50] [Rank 0] step:9281/10000 train_time:731379ms step_avg:78.80ms +[2025-09-02 08:26:52] [Rank 0] step:9301/10000 train_time:733055ms step_avg:78.81ms +[2025-09-02 08:26:52] [Rank 0] step:9301/10000 train_time:733055ms step_avg:78.81ms +[2025-09-02 08:26:53] [Rank 0] step:9321/10000 train_time:734737ms step_avg:78.83ms +[2025-09-02 08:26:53] [Rank 0] step:9321/10000 train_time:734737ms step_avg:78.83ms +[2025-09-02 08:26:55] [Rank 0] step:9341/10000 train_time:736415ms step_avg:78.84ms +[2025-09-02 08:26:55] [Rank 0] step:9341/10000 train_time:736415ms step_avg:78.84ms +[2025-09-02 08:26:57] [Rank 0] step:9361/10000 train_time:738098ms step_avg:78.85ms +[2025-09-02 08:26:57] [Rank 0] step:9361/10000 train_time:738098ms step_avg:78.85ms +[2025-09-02 08:26:58] [Rank 0] step:9381/10000 train_time:739785ms step_avg:78.86ms +[2025-09-02 08:26:58] [Rank 0] step:9381/10000 train_time:739785ms step_avg:78.86ms +[2025-09-02 08:27:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:27:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:27:12] [Rank 0] PRINT: step:9400/10000 val_loss:3.6222 svd_entropy: attn_qk:H=0.7772,top10E=0.26,eRank=179.1,q75/q25=51.03 attn_vo:H=0.8610,top10E=0.13,eRank=329.8,q75/q25=28.11 mlp_w1:H=0.9138,top10E=0.13,eRank=436.4,q75/q25=4.26 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.86 vo_prod:H=0.7652,top10E=0.22,eRank=166.1,q75/q25=471.61 train_time:741555ms step_avg:78.89ms +[2025-09-02 08:27:12] [Rank 0] PRINT: step:9400/10000 val_loss:3.6222 svd_entropy: attn_qk:H=0.7772,top10E=0.26,eRank=179.1,q75/q25=51.03 attn_vo:H=0.8610,top10E=0.13,eRank=329.8,q75/q25=28.11 mlp_w1:H=0.9138,top10E=0.13,eRank=436.4,q75/q25=4.26 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.86 vo_prod:H=0.7652,top10E=0.22,eRank=166.1,q75/q25=471.61 train_time:741555ms step_avg:78.89ms +[2025-09-02 08:27:12] [Rank 0] step:9401/10000 train_time:741569ms step_avg:78.88ms +[2025-09-02 08:27:12] [Rank 0] step:9401/10000 train_time:741569ms step_avg:78.88ms +[2025-09-02 08:27:14] [Rank 0] step:9421/10000 train_time:743165ms step_avg:78.88ms +[2025-09-02 08:27:14] [Rank 0] step:9421/10000 train_time:743165ms step_avg:78.88ms +[2025-09-02 08:27:15] [Rank 0] step:9441/10000 train_time:744843ms step_avg:78.89ms +[2025-09-02 08:27:15] [Rank 0] step:9441/10000 train_time:744843ms step_avg:78.89ms +[2025-09-02 08:27:17] [Rank 0] step:9461/10000 train_time:746529ms step_avg:78.91ms +[2025-09-02 08:27:17] [Rank 0] step:9461/10000 train_time:746529ms step_avg:78.91ms +[2025-09-02 08:27:19] [Rank 0] step:9481/10000 train_time:748210ms step_avg:78.92ms +[2025-09-02 08:27:19] [Rank 0] step:9481/10000 train_time:748210ms step_avg:78.92ms +[2025-09-02 08:27:21] [Rank 0] step:9501/10000 train_time:749904ms step_avg:78.93ms +[2025-09-02 08:27:21] [Rank 0] step:9501/10000 train_time:749904ms step_avg:78.93ms +[2025-09-02 08:27:22] [Rank 0] step:9521/10000 train_time:751573ms step_avg:78.94ms +[2025-09-02 08:27:22] [Rank 0] step:9521/10000 train_time:751573ms step_avg:78.94ms +[2025-09-02 08:27:24] [Rank 0] step:9541/10000 train_time:753251ms step_avg:78.95ms +[2025-09-02 08:27:24] [Rank 0] step:9541/10000 train_time:753251ms step_avg:78.95ms +[2025-09-02 08:27:26] [Rank 0] step:9561/10000 train_time:754922ms step_avg:78.96ms +[2025-09-02 08:27:26] [Rank 0] step:9561/10000 train_time:754922ms step_avg:78.96ms +[2025-09-02 08:27:27] [Rank 0] step:9581/10000 train_time:756602ms step_avg:78.97ms +[2025-09-02 08:27:27] [Rank 0] step:9581/10000 train_time:756602ms step_avg:78.97ms +[2025-09-02 08:27:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:27:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:27:41] [Rank 0] PRINT: step:9600/10000 val_loss:3.6161 svd_entropy: attn_qk:H=0.7773,top10E=0.26,eRank=179.2,q75/q25=51.10 attn_vo:H=0.8611,top10E=0.13,eRank=330.1,q75/q25=28.03 mlp_w1:H=0.9139,top10E=0.13,eRank=436.8,q75/q25=4.25 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.86 vo_prod:H=0.7655,top10E=0.22,eRank=166.5,q75/q25=467.11 train_time:758374ms step_avg:79.00ms +[2025-09-02 08:27:41] [Rank 0] PRINT: step:9600/10000 val_loss:3.6161 svd_entropy: attn_qk:H=0.7773,top10E=0.26,eRank=179.2,q75/q25=51.10 attn_vo:H=0.8611,top10E=0.13,eRank=330.1,q75/q25=28.03 mlp_w1:H=0.9139,top10E=0.13,eRank=436.8,q75/q25=4.25 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.86 vo_prod:H=0.7655,top10E=0.22,eRank=166.5,q75/q25=467.11 train_time:758374ms step_avg:79.00ms +[2025-09-02 08:27:41] [Rank 0] step:9601/10000 train_time:758389ms step_avg:78.99ms +[2025-09-02 08:27:41] [Rank 0] step:9601/10000 train_time:758389ms step_avg:78.99ms +[2025-09-02 08:27:43] [Rank 0] step:9621/10000 train_time:759992ms step_avg:78.99ms +[2025-09-02 08:27:43] [Rank 0] step:9621/10000 train_time:759992ms step_avg:78.99ms +[2025-09-02 08:27:44] [Rank 0] step:9641/10000 train_time:761677ms step_avg:79.00ms +[2025-09-02 08:27:44] [Rank 0] step:9641/10000 train_time:761677ms step_avg:79.00ms +[2025-09-02 08:27:46] [Rank 0] step:9661/10000 train_time:763384ms step_avg:79.02ms +[2025-09-02 08:27:46] [Rank 0] step:9661/10000 train_time:763384ms step_avg:79.02ms +[2025-09-02 08:27:48] [Rank 0] step:9681/10000 train_time:765085ms step_avg:79.03ms +[2025-09-02 08:27:48] [Rank 0] step:9681/10000 train_time:765085ms step_avg:79.03ms +[2025-09-02 08:27:49] [Rank 0] step:9701/10000 train_time:766803ms step_avg:79.04ms +[2025-09-02 08:27:49] [Rank 0] step:9701/10000 train_time:766803ms step_avg:79.04ms +[2025-09-02 08:27:51] [Rank 0] step:9721/10000 train_time:768500ms step_avg:79.06ms +[2025-09-02 08:27:51] [Rank 0] step:9721/10000 train_time:768500ms step_avg:79.06ms +[2025-09-02 08:27:53] [Rank 0] step:9741/10000 train_time:770220ms step_avg:79.07ms +[2025-09-02 08:27:53] [Rank 0] step:9741/10000 train_time:770220ms step_avg:79.07ms +[2025-09-02 08:27:55] [Rank 0] step:9761/10000 train_time:771930ms step_avg:79.08ms +[2025-09-02 08:27:55] [Rank 0] step:9761/10000 train_time:771930ms step_avg:79.08ms +[2025-09-02 08:27:56] [Rank 0] step:9781/10000 train_time:773649ms step_avg:79.10ms +[2025-09-02 08:27:56] [Rank 0] step:9781/10000 train_time:773649ms step_avg:79.10ms +[2025-09-02 08:27:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:27:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:28:10] [Rank 0] PRINT: step:9800/10000 val_loss:3.6090 svd_entropy: attn_qk:H=0.7774,top10E=0.26,eRank=179.3,q75/q25=51.07 attn_vo:H=0.8613,top10E=0.13,eRank=330.3,q75/q25=27.88 mlp_w1:H=0.9141,top10E=0.13,eRank=437.1,q75/q25=4.25 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.86 vo_prod:H=0.7658,top10E=0.22,eRank=166.8,q75/q25=464.45 train_time:775459ms step_avg:79.13ms +[2025-09-02 08:28:10] [Rank 0] PRINT: step:9800/10000 val_loss:3.6090 svd_entropy: attn_qk:H=0.7774,top10E=0.26,eRank=179.3,q75/q25=51.07 attn_vo:H=0.8613,top10E=0.13,eRank=330.3,q75/q25=27.88 mlp_w1:H=0.9141,top10E=0.13,eRank=437.1,q75/q25=4.25 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.86 vo_prod:H=0.7658,top10E=0.22,eRank=166.8,q75/q25=464.45 train_time:775459ms step_avg:79.13ms +[2025-09-02 08:28:10] [Rank 0] step:9801/10000 train_time:775473ms step_avg:79.12ms +[2025-09-02 08:28:10] [Rank 0] step:9801/10000 train_time:775473ms step_avg:79.12ms +[2025-09-02 08:28:12] [Rank 0] step:9821/10000 train_time:777088ms step_avg:79.13ms +[2025-09-02 08:28:12] [Rank 0] step:9821/10000 train_time:777088ms step_avg:79.13ms +[2025-09-02 08:28:13] [Rank 0] step:9841/10000 train_time:778803ms step_avg:79.14ms +[2025-09-02 08:28:13] [Rank 0] step:9841/10000 train_time:778803ms step_avg:79.14ms +[2025-09-02 08:28:15] [Rank 0] step:9861/10000 train_time:780498ms step_avg:79.15ms +[2025-09-02 08:28:15] [Rank 0] step:9861/10000 train_time:780498ms step_avg:79.15ms +[2025-09-02 08:28:17] [Rank 0] step:9881/10000 train_time:782193ms step_avg:79.16ms +[2025-09-02 08:28:17] [Rank 0] step:9881/10000 train_time:782193ms step_avg:79.16ms +[2025-09-02 08:28:19] [Rank 0] step:9901/10000 train_time:783901ms step_avg:79.17ms +[2025-09-02 08:28:19] [Rank 0] step:9901/10000 train_time:783901ms step_avg:79.17ms +[2025-09-02 08:28:20] [Rank 0] step:9921/10000 train_time:785610ms step_avg:79.19ms +[2025-09-02 08:28:20] [Rank 0] step:9921/10000 train_time:785610ms step_avg:79.19ms +[2025-09-02 08:28:22] [Rank 0] step:9941/10000 train_time:787319ms step_avg:79.20ms +[2025-09-02 08:28:22] [Rank 0] step:9941/10000 train_time:787319ms step_avg:79.20ms +[2025-09-02 08:28:24] [Rank 0] step:9961/10000 train_time:789024ms step_avg:79.21ms +[2025-09-02 08:28:24] [Rank 0] step:9961/10000 train_time:789024ms step_avg:79.21ms +[2025-09-02 08:28:25] [Rank 0] step:9981/10000 train_time:790731ms step_avg:79.22ms +[2025-09-02 08:28:25] [Rank 0] step:9981/10000 train_time:790731ms step_avg:79.22ms +[2025-09-02 08:28:27] [Rank 0] step:10000/10000 train_time:792357ms step_avg:79.24ms +[2025-09-02 08:28:27] [Rank 0] step:10000/10000 train_time:792357ms step_avg:79.24ms +[2025-09-02 08:28:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:28:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:28:39] [Rank 0] PRINT: step:10000/10000 val_loss:3.6033 svd_entropy: attn_qk:H=0.7774,top10E=0.26,eRank=179.3,q75/q25=51.03 attn_vo:H=0.8614,top10E=0.13,eRank=330.4,q75/q25=27.87 mlp_w1:H=0.9141,top10E=0.13,eRank=437.3,q75/q25=4.24 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.86 vo_prod:H=0.7660,top10E=0.22,eRank=167.0,q75/q25=463.65 train_time:792540ms step_avg:79.25ms +[2025-09-02 08:28:39] [Rank 0] PRINT: step:10000/10000 val_loss:3.6033 svd_entropy: attn_qk:H=0.7774,top10E=0.26,eRank=179.3,q75/q25=51.03 attn_vo:H=0.8614,top10E=0.13,eRank=330.4,q75/q25=27.87 mlp_w1:H=0.9141,top10E=0.13,eRank=437.3,q75/q25=4.24 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.86 vo_prod:H=0.7660,top10E=0.22,eRank=167.0,q75/q25=463.65 train_time:792540ms step_avg:79.25ms +[2025-09-02 08:28:39] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 08:28:39 2025 --- +[2025-09-02 08:28:39] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 08:28:39 2025 --- +[2025-09-02 08:28:39] [Rank 0] PRINT: Peak memory allocated: 10115 MiB reserved: 15076 MiB +[2025-09-02 08:28:39] [Rank 0] PRINT: Peak memory allocated: 10115 MiB reserved: 15076 MiB diff --git a/logs_svd_qkvo/mode_13_param_qkvo_seed_45/config.json b/logs_svd_qkvo/mode_13_param_qkvo_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..024cafdc5349767f93e7256b1a316f84b2d7bfbf --- /dev/null +++ b/logs_svd_qkvo/mode_13_param_qkvo_seed_45/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 13, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "9d16dd7f-a4bb-4150-87a4-f3d16326861c", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_13_param_qkvo_seed_45/training_log_9d16dd7f-a4bb-4150-87a4-f3d16326861c.txt b/logs_svd_qkvo/mode_13_param_qkvo_seed_45/training_log_9d16dd7f-a4bb-4150-87a4-f3d16326861c.txt new file mode 100644 index 0000000000000000000000000000000000000000..fabc207d8cee4821eeb9de52dfdd6d54103b6aac --- /dev/null +++ b/logs_svd_qkvo/mode_13_param_qkvo_seed_45/training_log_9d16dd7f-a4bb-4150-87a4-f3d16326861c.txt @@ -0,0 +1,2984 @@ +[2025-09-02 09:17:07] [Rank 0] PRINT: --- Script Start: Tue Sep 2 09:17:07 2025 --- +[2025-09-02 09:17:07] [Rank 0] PRINT: --- Script Start: Tue Sep 2 09:17:07 2025 --- +[2025-09-02 09:17:07] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=13, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 09:17:07] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=13, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 09:17:07] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 09:17:07] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 09:17:07] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-02 09:17:07] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-02 09:17:07] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_13_param_qkvo_seed_45 +[2025-09-02 09:17:07] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_13_param_qkvo_seed_45 +[2025-09-02 09:17:07] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 09:17:07] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 09:17:07] [Rank 0] PRINT: Constructing model... +[2025-09-02 09:17:07] [Rank 0] PRINT: Constructing model... +[2025-09-02 09:17:09] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 09:17:09] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 09:17:09] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 09:17:09] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 09:17:09] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 09:17:09] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 09:17:09] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 13 +[2025-09-02 09:17:09] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 13 +[2025-09-02 09:17:09] [Rank 0] PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: 0.008). +[2025-09-02 09:17:09] [Rank 0] PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: 0.008). +[2025-09-02 09:17:09] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 09:17:09] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 09:17:09] [Rank 0] PRINT: Muon optimizer is active with 23 parameters. +[2025-09-02 09:17:09] [Rank 0] PRINT: Muon optimizer is active with 23 parameters. +[2025-09-02 09:17:09] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 09:17:09] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 09:17:09] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 09:17:09] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 09:17:09] [Rank 0] PRINT: Starting warmup... +[2025-09-02 09:17:09] [Rank 0] PRINT: Starting warmup... +[2025-09-02 09:17:50] [Rank 0] PRINT: Warmup complete. +[2025-09-02 09:17:50] [Rank 0] PRINT: Warmup complete. +[2025-09-02 09:17:50] [Rank 0] PRINT: Starting training... +[2025-09-02 09:17:50] [Rank 0] PRINT: Starting training... +[2025-09-02 09:17:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:17:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:18:06] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.28 attn_vo:H=0.4623,top10E=0.02,eRank=232.8,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 09:18:06] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.28 attn_vo:H=0.4623,top10E=0.02,eRank=232.8,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 09:18:08] [Rank 0] step:21/10000 train_time:1629ms step_avg:77.57ms +[2025-09-02 09:18:08] [Rank 0] step:21/10000 train_time:1629ms step_avg:77.57ms +[2025-09-02 09:18:09] [Rank 0] step:41/10000 train_time:3061ms step_avg:74.66ms +[2025-09-02 09:18:09] [Rank 0] step:41/10000 train_time:3061ms step_avg:74.66ms +[2025-09-02 09:18:11] [Rank 0] step:61/10000 train_time:4512ms step_avg:73.97ms +[2025-09-02 09:18:11] [Rank 0] step:61/10000 train_time:4512ms step_avg:73.97ms +[2025-09-02 09:18:12] [Rank 0] step:81/10000 train_time:5966ms step_avg:73.65ms +[2025-09-02 09:18:12] [Rank 0] step:81/10000 train_time:5966ms step_avg:73.65ms +[2025-09-02 09:18:14] [Rank 0] step:101/10000 train_time:7419ms step_avg:73.46ms +[2025-09-02 09:18:14] [Rank 0] step:101/10000 train_time:7419ms step_avg:73.46ms +[2025-09-02 09:18:15] [Rank 0] step:121/10000 train_time:8872ms step_avg:73.32ms +[2025-09-02 09:18:15] [Rank 0] step:121/10000 train_time:8872ms step_avg:73.32ms +[2025-09-02 09:18:16] [Rank 0] step:141/10000 train_time:10324ms step_avg:73.22ms +[2025-09-02 09:18:16] [Rank 0] step:141/10000 train_time:10324ms step_avg:73.22ms +[2025-09-02 09:18:18] [Rank 0] step:161/10000 train_time:11777ms step_avg:73.15ms +[2025-09-02 09:18:18] [Rank 0] step:161/10000 train_time:11777ms step_avg:73.15ms +[2025-09-02 09:18:19] [Rank 0] step:181/10000 train_time:13229ms step_avg:73.09ms +[2025-09-02 09:18:19] [Rank 0] step:181/10000 train_time:13229ms step_avg:73.09ms +[2025-09-02 09:18:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:18:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:18:32] [Rank 0] PRINT: step:200/10000 val_loss:6.2223 svd_entropy: attn_qk:H=0.6101,top10E=0.54,eRank=97.6,q75/q25=13.06 attn_vo:H=0.5198,top10E=0.57,eRank=77.4,q75/q25=inf mlp_w1:H=0.6661,top10E=0.51,eRank=97.2,q75/q25=2.95 mlp_w2:H=0.8047,top10E=0.17,eRank=213.6,q75/q25=17.07 vo_prod:H=0.3302,top10E=0.80,eRank=15.2,q75/q25=inf train_time:14757ms step_avg:73.78ms +[2025-09-02 09:18:32] [Rank 0] PRINT: step:200/10000 val_loss:6.2223 svd_entropy: attn_qk:H=0.6101,top10E=0.54,eRank=97.6,q75/q25=13.06 attn_vo:H=0.5198,top10E=0.57,eRank=77.4,q75/q25=inf mlp_w1:H=0.6661,top10E=0.51,eRank=97.2,q75/q25=2.95 mlp_w2:H=0.8047,top10E=0.17,eRank=213.6,q75/q25=17.07 vo_prod:H=0.3302,top10E=0.80,eRank=15.2,q75/q25=inf train_time:14757ms step_avg:73.78ms +[2025-09-02 09:18:33] [Rank 0] step:201/10000 train_time:14772ms step_avg:73.49ms +[2025-09-02 09:18:33] [Rank 0] step:201/10000 train_time:14772ms step_avg:73.49ms +[2025-09-02 09:18:34] [Rank 0] step:221/10000 train_time:16166ms step_avg:73.15ms +[2025-09-02 09:18:34] [Rank 0] step:221/10000 train_time:16166ms step_avg:73.15ms +[2025-09-02 09:18:35] [Rank 0] step:241/10000 train_time:17614ms step_avg:73.09ms +[2025-09-02 09:18:35] [Rank 0] step:241/10000 train_time:17614ms step_avg:73.09ms +[2025-09-02 09:18:37] [Rank 0] step:261/10000 train_time:19064ms step_avg:73.04ms +[2025-09-02 09:18:37] [Rank 0] step:261/10000 train_time:19064ms step_avg:73.04ms +[2025-09-02 09:18:38] [Rank 0] step:281/10000 train_time:20514ms step_avg:73.00ms +[2025-09-02 09:18:38] [Rank 0] step:281/10000 train_time:20514ms step_avg:73.00ms +[2025-09-02 09:18:40] [Rank 0] step:301/10000 train_time:21962ms step_avg:72.96ms +[2025-09-02 09:18:40] [Rank 0] step:301/10000 train_time:21962ms step_avg:72.96ms +[2025-09-02 09:18:41] [Rank 0] step:321/10000 train_time:23411ms step_avg:72.93ms +[2025-09-02 09:18:41] [Rank 0] step:321/10000 train_time:23411ms step_avg:72.93ms +[2025-09-02 09:18:43] [Rank 0] step:341/10000 train_time:24920ms step_avg:73.08ms +[2025-09-02 09:18:43] [Rank 0] step:341/10000 train_time:24920ms step_avg:73.08ms +[2025-09-02 09:18:44] [Rank 0] step:361/10000 train_time:26371ms step_avg:73.05ms +[2025-09-02 09:18:44] [Rank 0] step:361/10000 train_time:26371ms step_avg:73.05ms +[2025-09-02 09:18:46] [Rank 0] step:381/10000 train_time:27821ms step_avg:73.02ms +[2025-09-02 09:18:46] [Rank 0] step:381/10000 train_time:27821ms step_avg:73.02ms +[2025-09-02 09:18:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:18:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:18:59] [Rank 0] PRINT: step:400/10000 val_loss:5.7156 svd_entropy: attn_qk:H=0.6159,top10E=0.50,eRank=72.7,q75/q25=15.98 attn_vo:H=0.6295,top10E=0.43,eRank=99.5,q75/q25=26.17 mlp_w1:H=0.6869,top10E=0.41,eRank=115.4,q75/q25=4.55 mlp_w2:H=0.9298,top10E=0.07,eRank=483.1,q75/q25=6.39 vo_prod:H=0.4824,top10E=0.70,eRank=29.6,q75/q25=226.14 train_time:29346ms step_avg:73.36ms +[2025-09-02 09:18:59] [Rank 0] PRINT: step:400/10000 val_loss:5.7156 svd_entropy: attn_qk:H=0.6159,top10E=0.50,eRank=72.7,q75/q25=15.98 attn_vo:H=0.6295,top10E=0.43,eRank=99.5,q75/q25=26.17 mlp_w1:H=0.6869,top10E=0.41,eRank=115.4,q75/q25=4.55 mlp_w2:H=0.9298,top10E=0.07,eRank=483.1,q75/q25=6.39 vo_prod:H=0.4824,top10E=0.70,eRank=29.6,q75/q25=226.14 train_time:29346ms step_avg:73.36ms +[2025-09-02 09:18:59] [Rank 0] step:401/10000 train_time:29361ms step_avg:73.22ms +[2025-09-02 09:18:59] [Rank 0] step:401/10000 train_time:29361ms step_avg:73.22ms +[2025-09-02 09:19:00] [Rank 0] step:421/10000 train_time:30746ms step_avg:73.03ms +[2025-09-02 09:19:00] [Rank 0] step:421/10000 train_time:30746ms step_avg:73.03ms +[2025-09-02 09:19:02] [Rank 0] step:441/10000 train_time:32201ms step_avg:73.02ms +[2025-09-02 09:19:02] [Rank 0] step:441/10000 train_time:32201ms step_avg:73.02ms +[2025-09-02 09:19:03] [Rank 0] step:461/10000 train_time:33650ms step_avg:72.99ms +[2025-09-02 09:19:03] [Rank 0] step:461/10000 train_time:33650ms step_avg:72.99ms +[2025-09-02 09:19:05] [Rank 0] step:481/10000 train_time:35099ms step_avg:72.97ms +[2025-09-02 09:19:05] [Rank 0] step:481/10000 train_time:35099ms step_avg:72.97ms +[2025-09-02 09:19:06] [Rank 0] step:501/10000 train_time:36549ms step_avg:72.95ms +[2025-09-02 09:19:06] [Rank 0] step:501/10000 train_time:36549ms step_avg:72.95ms +[2025-09-02 09:19:08] [Rank 0] step:521/10000 train_time:37999ms step_avg:72.93ms +[2025-09-02 09:19:08] [Rank 0] step:521/10000 train_time:37999ms step_avg:72.93ms +[2025-09-02 09:19:09] [Rank 0] step:541/10000 train_time:39449ms step_avg:72.92ms +[2025-09-02 09:19:09] [Rank 0] step:541/10000 train_time:39449ms step_avg:72.92ms +[2025-09-02 09:19:10] [Rank 0] step:561/10000 train_time:40899ms step_avg:72.90ms +[2025-09-02 09:19:10] [Rank 0] step:561/10000 train_time:40899ms step_avg:72.90ms +[2025-09-02 09:19:12] [Rank 0] step:581/10000 train_time:42349ms step_avg:72.89ms +[2025-09-02 09:19:12] [Rank 0] step:581/10000 train_time:42349ms step_avg:72.89ms +[2025-09-02 09:19:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:19:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:19:25] [Rank 0] PRINT: step:600/10000 val_loss:5.4252 svd_entropy: attn_qk:H=0.6451,top10E=0.44,eRank=84.8,q75/q25=21.17 attn_vo:H=0.6742,top10E=0.36,eRank=122.7,q75/q25=36.75 mlp_w1:H=0.7292,top10E=0.35,eRank=145.5,q75/q25=6.36 mlp_w2:H=0.9490,top10E=0.05,eRank=548.1,q75/q25=4.48 vo_prod:H=0.5355,top10E=0.58,eRank=39.4,q75/q25=521.08 train_time:43873ms step_avg:73.12ms +[2025-09-02 09:19:25] [Rank 0] PRINT: step:600/10000 val_loss:5.4252 svd_entropy: attn_qk:H=0.6451,top10E=0.44,eRank=84.8,q75/q25=21.17 attn_vo:H=0.6742,top10E=0.36,eRank=122.7,q75/q25=36.75 mlp_w1:H=0.7292,top10E=0.35,eRank=145.5,q75/q25=6.36 mlp_w2:H=0.9490,top10E=0.05,eRank=548.1,q75/q25=4.48 vo_prod:H=0.5355,top10E=0.58,eRank=39.4,q75/q25=521.08 train_time:43873ms step_avg:73.12ms +[2025-09-02 09:19:25] [Rank 0] step:601/10000 train_time:43887ms step_avg:73.02ms +[2025-09-02 09:19:25] [Rank 0] step:601/10000 train_time:43887ms step_avg:73.02ms +[2025-09-02 09:19:27] [Rank 0] step:621/10000 train_time:45277ms step_avg:72.91ms +[2025-09-02 09:19:27] [Rank 0] step:621/10000 train_time:45277ms step_avg:72.91ms +[2025-09-02 09:19:28] [Rank 0] step:641/10000 train_time:46725ms step_avg:72.89ms +[2025-09-02 09:19:28] [Rank 0] step:641/10000 train_time:46725ms step_avg:72.89ms +[2025-09-02 09:19:29] [Rank 0] step:661/10000 train_time:48175ms step_avg:72.88ms +[2025-09-02 09:19:29] [Rank 0] step:661/10000 train_time:48175ms step_avg:72.88ms +[2025-09-02 09:19:31] [Rank 0] step:681/10000 train_time:49623ms step_avg:72.87ms +[2025-09-02 09:19:31] [Rank 0] step:681/10000 train_time:49623ms step_avg:72.87ms +[2025-09-02 09:19:32] [Rank 0] step:701/10000 train_time:51073ms step_avg:72.86ms +[2025-09-02 09:19:32] [Rank 0] step:701/10000 train_time:51073ms step_avg:72.86ms +[2025-09-02 09:19:34] [Rank 0] step:721/10000 train_time:52522ms step_avg:72.85ms +[2025-09-02 09:19:34] [Rank 0] step:721/10000 train_time:52522ms step_avg:72.85ms +[2025-09-02 09:19:35] [Rank 0] step:741/10000 train_time:53971ms step_avg:72.84ms +[2025-09-02 09:19:35] [Rank 0] step:741/10000 train_time:53971ms step_avg:72.84ms +[2025-09-02 09:19:37] [Rank 0] step:761/10000 train_time:55433ms step_avg:72.84ms +[2025-09-02 09:19:37] [Rank 0] step:761/10000 train_time:55433ms step_avg:72.84ms +[2025-09-02 09:19:38] [Rank 0] step:781/10000 train_time:56896ms step_avg:72.85ms +[2025-09-02 09:19:38] [Rank 0] step:781/10000 train_time:56896ms step_avg:72.85ms +[2025-09-02 09:19:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:19:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:19:51] [Rank 0] PRINT: step:800/10000 val_loss:5.1928 svd_entropy: attn_qk:H=0.6659,top10E=0.40,eRank=94.5,q75/q25=28.44 attn_vo:H=0.7059,top10E=0.31,eRank=143.3,q75/q25=46.25 mlp_w1:H=0.7607,top10E=0.31,eRank=172.9,q75/q25=7.24 mlp_w2:H=0.9558,top10E=0.05,eRank=573.1,q75/q25=3.93 vo_prod:H=0.5727,top10E=0.51,eRank=48.6,q75/q25=1102.78 train_time:58434ms step_avg:73.04ms +[2025-09-02 09:19:51] [Rank 0] PRINT: step:800/10000 val_loss:5.1928 svd_entropy: attn_qk:H=0.6659,top10E=0.40,eRank=94.5,q75/q25=28.44 attn_vo:H=0.7059,top10E=0.31,eRank=143.3,q75/q25=46.25 mlp_w1:H=0.7607,top10E=0.31,eRank=172.9,q75/q25=7.24 mlp_w2:H=0.9558,top10E=0.05,eRank=573.1,q75/q25=3.93 vo_prod:H=0.5727,top10E=0.51,eRank=48.6,q75/q25=1102.78 train_time:58434ms step_avg:73.04ms +[2025-09-02 09:19:51] [Rank 0] step:801/10000 train_time:58449ms step_avg:72.97ms +[2025-09-02 09:19:51] [Rank 0] step:801/10000 train_time:58449ms step_avg:72.97ms +[2025-09-02 09:19:53] [Rank 0] step:821/10000 train_time:59848ms step_avg:72.90ms +[2025-09-02 09:19:53] [Rank 0] step:821/10000 train_time:59848ms step_avg:72.90ms +[2025-09-02 09:19:54] [Rank 0] step:841/10000 train_time:61311ms step_avg:72.90ms +[2025-09-02 09:19:54] [Rank 0] step:841/10000 train_time:61311ms step_avg:72.90ms +[2025-09-02 09:19:56] [Rank 0] step:861/10000 train_time:62774ms step_avg:72.91ms +[2025-09-02 09:19:56] [Rank 0] step:861/10000 train_time:62774ms step_avg:72.91ms +[2025-09-02 09:19:57] [Rank 0] step:881/10000 train_time:64239ms step_avg:72.92ms +[2025-09-02 09:19:57] [Rank 0] step:881/10000 train_time:64239ms step_avg:72.92ms +[2025-09-02 09:19:59] [Rank 0] step:901/10000 train_time:65703ms step_avg:72.92ms +[2025-09-02 09:19:59] [Rank 0] step:901/10000 train_time:65703ms step_avg:72.92ms +[2025-09-02 09:20:00] [Rank 0] step:921/10000 train_time:67170ms step_avg:72.93ms +[2025-09-02 09:20:00] [Rank 0] step:921/10000 train_time:67170ms step_avg:72.93ms +[2025-09-02 09:20:02] [Rank 0] step:941/10000 train_time:68635ms step_avg:72.94ms +[2025-09-02 09:20:02] [Rank 0] step:941/10000 train_time:68635ms step_avg:72.94ms +[2025-09-02 09:20:03] [Rank 0] step:961/10000 train_time:70100ms step_avg:72.95ms +[2025-09-02 09:20:03] [Rank 0] step:961/10000 train_time:70100ms step_avg:72.95ms +[2025-09-02 09:20:05] [Rank 0] step:981/10000 train_time:71565ms step_avg:72.95ms +[2025-09-02 09:20:05] [Rank 0] step:981/10000 train_time:71565ms step_avg:72.95ms +[2025-09-02 09:20:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:20:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:20:18] [Rank 0] PRINT: step:1000/10000 val_loss:5.0018 svd_entropy: attn_qk:H=0.6816,top10E=0.38,eRank=102.8,q75/q25=35.72 attn_vo:H=0.7285,top10E=0.28,eRank=160.8,q75/q25=52.54 mlp_w1:H=0.7865,top10E=0.28,eRank=199.7,q75/q25=7.47 mlp_w2:H=0.9601,top10E=0.05,eRank=589.7,q75/q25=3.61 vo_prod:H=0.5996,top10E=0.46,eRank=56.9,q75/q25=1985.18 train_time:73105ms step_avg:73.10ms +[2025-09-02 09:20:18] [Rank 0] PRINT: step:1000/10000 val_loss:5.0018 svd_entropy: attn_qk:H=0.6816,top10E=0.38,eRank=102.8,q75/q25=35.72 attn_vo:H=0.7285,top10E=0.28,eRank=160.8,q75/q25=52.54 mlp_w1:H=0.7865,top10E=0.28,eRank=199.7,q75/q25=7.47 mlp_w2:H=0.9601,top10E=0.05,eRank=589.7,q75/q25=3.61 vo_prod:H=0.5996,top10E=0.46,eRank=56.9,q75/q25=1985.18 train_time:73105ms step_avg:73.10ms +[2025-09-02 09:20:18] [Rank 0] step:1001/10000 train_time:73119ms step_avg:73.05ms +[2025-09-02 09:20:18] [Rank 0] step:1001/10000 train_time:73119ms step_avg:73.05ms +[2025-09-02 09:20:19] [Rank 0] step:1021/10000 train_time:74506ms step_avg:72.97ms +[2025-09-02 09:20:19] [Rank 0] step:1021/10000 train_time:74506ms step_avg:72.97ms +[2025-09-02 09:20:21] [Rank 0] step:1041/10000 train_time:75969ms step_avg:72.98ms +[2025-09-02 09:20:21] [Rank 0] step:1041/10000 train_time:75969ms step_avg:72.98ms +[2025-09-02 09:20:22] [Rank 0] step:1061/10000 train_time:77434ms step_avg:72.98ms +[2025-09-02 09:20:22] [Rank 0] step:1061/10000 train_time:77434ms step_avg:72.98ms +[2025-09-02 09:20:24] [Rank 0] step:1081/10000 train_time:78897ms step_avg:72.99ms +[2025-09-02 09:20:24] [Rank 0] step:1081/10000 train_time:78897ms step_avg:72.99ms +[2025-09-02 09:20:25] [Rank 0] step:1101/10000 train_time:80361ms step_avg:72.99ms +[2025-09-02 09:20:25] [Rank 0] step:1101/10000 train_time:80361ms step_avg:72.99ms +[2025-09-02 09:20:26] [Rank 0] step:1121/10000 train_time:81827ms step_avg:72.99ms +[2025-09-02 09:20:26] [Rank 0] step:1121/10000 train_time:81827ms step_avg:72.99ms +[2025-09-02 09:20:28] [Rank 0] step:1141/10000 train_time:83290ms step_avg:73.00ms +[2025-09-02 09:20:28] [Rank 0] step:1141/10000 train_time:83290ms step_avg:73.00ms +[2025-09-02 09:20:29] [Rank 0] step:1161/10000 train_time:84755ms step_avg:73.00ms +[2025-09-02 09:20:29] [Rank 0] step:1161/10000 train_time:84755ms step_avg:73.00ms +[2025-09-02 09:20:31] [Rank 0] step:1181/10000 train_time:86218ms step_avg:73.00ms +[2025-09-02 09:20:31] [Rank 0] step:1181/10000 train_time:86218ms step_avg:73.00ms +[2025-09-02 09:20:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:20:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:20:44] [Rank 0] PRINT: step:1200/10000 val_loss:4.8246 svd_entropy: attn_qk:H=0.6938,top10E=0.36,eRank=110.0,q75/q25=42.90 attn_vo:H=0.7465,top10E=0.25,eRank=177.0,q75/q25=56.63 mlp_w1:H=0.8065,top10E=0.25,eRank=224.3,q75/q25=7.43 mlp_w2:H=0.9631,top10E=0.04,eRank=601.6,q75/q25=3.39 vo_prod:H=0.6191,top10E=0.42,eRank=64.0,q75/q25=3005.95 train_time:87759ms step_avg:73.13ms +[2025-09-02 09:20:44] [Rank 0] PRINT: step:1200/10000 val_loss:4.8246 svd_entropy: attn_qk:H=0.6938,top10E=0.36,eRank=110.0,q75/q25=42.90 attn_vo:H=0.7465,top10E=0.25,eRank=177.0,q75/q25=56.63 mlp_w1:H=0.8065,top10E=0.25,eRank=224.3,q75/q25=7.43 mlp_w2:H=0.9631,top10E=0.04,eRank=601.6,q75/q25=3.39 vo_prod:H=0.6191,top10E=0.42,eRank=64.0,q75/q25=3005.95 train_time:87759ms step_avg:73.13ms +[2025-09-02 09:20:44] [Rank 0] step:1201/10000 train_time:87774ms step_avg:73.08ms +[2025-09-02 09:20:44] [Rank 0] step:1201/10000 train_time:87774ms step_avg:73.08ms +[2025-09-02 09:20:46] [Rank 0] step:1221/10000 train_time:89168ms step_avg:73.03ms +[2025-09-02 09:20:46] [Rank 0] step:1221/10000 train_time:89168ms step_avg:73.03ms +[2025-09-02 09:20:47] [Rank 0] step:1241/10000 train_time:90630ms step_avg:73.03ms +[2025-09-02 09:20:47] [Rank 0] step:1241/10000 train_time:90630ms step_avg:73.03ms +[2025-09-02 09:20:48] [Rank 0] step:1261/10000 train_time:92093ms step_avg:73.03ms +[2025-09-02 09:20:48] [Rank 0] step:1261/10000 train_time:92093ms step_avg:73.03ms +[2025-09-02 09:20:50] [Rank 0] step:1281/10000 train_time:93600ms step_avg:73.07ms +[2025-09-02 09:20:50] [Rank 0] step:1281/10000 train_time:93600ms step_avg:73.07ms +[2025-09-02 09:20:51] [Rank 0] step:1301/10000 train_time:95065ms step_avg:73.07ms +[2025-09-02 09:20:51] [Rank 0] step:1301/10000 train_time:95065ms step_avg:73.07ms +[2025-09-02 09:20:53] [Rank 0] step:1321/10000 train_time:96530ms step_avg:73.07ms +[2025-09-02 09:20:53] [Rank 0] step:1321/10000 train_time:96530ms step_avg:73.07ms +[2025-09-02 09:20:54] [Rank 0] step:1341/10000 train_time:97995ms step_avg:73.08ms +[2025-09-02 09:20:54] [Rank 0] step:1341/10000 train_time:97995ms step_avg:73.08ms +[2025-09-02 09:20:56] [Rank 0] step:1361/10000 train_time:99459ms step_avg:73.08ms +[2025-09-02 09:20:56] [Rank 0] step:1361/10000 train_time:99459ms step_avg:73.08ms +[2025-09-02 09:20:57] [Rank 0] step:1381/10000 train_time:100924ms step_avg:73.08ms +[2025-09-02 09:20:57] [Rank 0] step:1381/10000 train_time:100924ms step_avg:73.08ms +[2025-09-02 09:20:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:20:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:21:10] [Rank 0] PRINT: step:1400/10000 val_loss:4.7090 svd_entropy: attn_qk:H=0.7037,top10E=0.34,eRank=116.4,q75/q25=49.07 attn_vo:H=0.7607,top10E=0.24,eRank=191.0,q75/q25=59.57 mlp_w1:H=0.8220,top10E=0.23,eRank=245.7,q75/q25=7.23 mlp_w2:H=0.9653,top10E=0.04,eRank=610.1,q75/q25=3.25 vo_prod:H=0.6346,top10E=0.39,eRank=70.5,q75/q25=3713.96 train_time:102462ms step_avg:73.19ms +[2025-09-02 09:21:10] [Rank 0] PRINT: step:1400/10000 val_loss:4.7090 svd_entropy: attn_qk:H=0.7037,top10E=0.34,eRank=116.4,q75/q25=49.07 attn_vo:H=0.7607,top10E=0.24,eRank=191.0,q75/q25=59.57 mlp_w1:H=0.8220,top10E=0.23,eRank=245.7,q75/q25=7.23 mlp_w2:H=0.9653,top10E=0.04,eRank=610.1,q75/q25=3.25 vo_prod:H=0.6346,top10E=0.39,eRank=70.5,q75/q25=3713.96 train_time:102462ms step_avg:73.19ms +[2025-09-02 09:21:10] [Rank 0] step:1401/10000 train_time:102476ms step_avg:73.14ms +[2025-09-02 09:21:10] [Rank 0] step:1401/10000 train_time:102476ms step_avg:73.14ms +[2025-09-02 09:21:12] [Rank 0] step:1421/10000 train_time:103874ms step_avg:73.10ms +[2025-09-02 09:21:12] [Rank 0] step:1421/10000 train_time:103874ms step_avg:73.10ms +[2025-09-02 09:21:13] [Rank 0] step:1441/10000 train_time:105336ms step_avg:73.10ms +[2025-09-02 09:21:13] [Rank 0] step:1441/10000 train_time:105336ms step_avg:73.10ms +[2025-09-02 09:21:15] [Rank 0] step:1461/10000 train_time:106798ms step_avg:73.10ms +[2025-09-02 09:21:15] [Rank 0] step:1461/10000 train_time:106798ms step_avg:73.10ms +[2025-09-02 09:21:16] [Rank 0] step:1481/10000 train_time:108262ms step_avg:73.10ms +[2025-09-02 09:21:16] [Rank 0] step:1481/10000 train_time:108262ms step_avg:73.10ms +[2025-09-02 09:21:18] [Rank 0] step:1501/10000 train_time:109734ms step_avg:73.11ms +[2025-09-02 09:21:18] [Rank 0] step:1501/10000 train_time:109734ms step_avg:73.11ms +[2025-09-02 09:21:19] [Rank 0] step:1521/10000 train_time:111208ms step_avg:73.12ms +[2025-09-02 09:21:19] [Rank 0] step:1521/10000 train_time:111208ms step_avg:73.12ms +[2025-09-02 09:21:21] [Rank 0] step:1541/10000 train_time:112683ms step_avg:73.12ms +[2025-09-02 09:21:21] [Rank 0] step:1541/10000 train_time:112683ms step_avg:73.12ms +[2025-09-02 09:21:22] [Rank 0] step:1561/10000 train_time:114157ms step_avg:73.13ms +[2025-09-02 09:21:22] [Rank 0] step:1561/10000 train_time:114157ms step_avg:73.13ms +[2025-09-02 09:21:24] [Rank 0] step:1581/10000 train_time:115631ms step_avg:73.14ms +[2025-09-02 09:21:24] [Rank 0] step:1581/10000 train_time:115631ms step_avg:73.14ms +[2025-09-02 09:21:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:21:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:21:37] [Rank 0] PRINT: step:1600/10000 val_loss:4.5882 svd_entropy: attn_qk:H=0.7115,top10E=0.33,eRank=121.6,q75/q25=53.75 attn_vo:H=0.7724,top10E=0.22,eRank=203.3,q75/q25=61.16 mlp_w1:H=0.8342,top10E=0.22,eRank=264.4,q75/q25=6.99 mlp_w2:H=0.9668,top10E=0.04,eRank=616.4,q75/q25=3.15 vo_prod:H=0.6481,top10E=0.37,eRank=76.8,q75/q25=4115.86 train_time:117182ms step_avg:73.24ms +[2025-09-02 09:21:37] [Rank 0] PRINT: step:1600/10000 val_loss:4.5882 svd_entropy: attn_qk:H=0.7115,top10E=0.33,eRank=121.6,q75/q25=53.75 attn_vo:H=0.7724,top10E=0.22,eRank=203.3,q75/q25=61.16 mlp_w1:H=0.8342,top10E=0.22,eRank=264.4,q75/q25=6.99 mlp_w2:H=0.9668,top10E=0.04,eRank=616.4,q75/q25=3.15 vo_prod:H=0.6481,top10E=0.37,eRank=76.8,q75/q25=4115.86 train_time:117182ms step_avg:73.24ms +[2025-09-02 09:21:37] [Rank 0] step:1601/10000 train_time:117196ms step_avg:73.20ms +[2025-09-02 09:21:37] [Rank 0] step:1601/10000 train_time:117196ms step_avg:73.20ms +[2025-09-02 09:21:38] [Rank 0] step:1621/10000 train_time:118607ms step_avg:73.17ms +[2025-09-02 09:21:38] [Rank 0] step:1621/10000 train_time:118607ms step_avg:73.17ms +[2025-09-02 09:21:40] [Rank 0] step:1641/10000 train_time:120081ms step_avg:73.18ms +[2025-09-02 09:21:40] [Rank 0] step:1641/10000 train_time:120081ms step_avg:73.18ms +[2025-09-02 09:21:41] [Rank 0] step:1661/10000 train_time:121555ms step_avg:73.18ms +[2025-09-02 09:21:41] [Rank 0] step:1661/10000 train_time:121555ms step_avg:73.18ms +[2025-09-02 09:21:43] [Rank 0] step:1681/10000 train_time:123030ms step_avg:73.19ms +[2025-09-02 09:21:43] [Rank 0] step:1681/10000 train_time:123030ms step_avg:73.19ms +[2025-09-02 09:21:44] [Rank 0] step:1701/10000 train_time:124504ms step_avg:73.19ms +[2025-09-02 09:21:44] [Rank 0] step:1701/10000 train_time:124504ms step_avg:73.19ms +[2025-09-02 09:21:46] [Rank 0] step:1721/10000 train_time:125980ms step_avg:73.20ms +[2025-09-02 09:21:46] [Rank 0] step:1721/10000 train_time:125980ms step_avg:73.20ms +[2025-09-02 09:21:47] [Rank 0] step:1741/10000 train_time:127454ms step_avg:73.21ms +[2025-09-02 09:21:47] [Rank 0] step:1741/10000 train_time:127454ms step_avg:73.21ms +[2025-09-02 09:21:49] [Rank 0] step:1761/10000 train_time:128931ms step_avg:73.21ms +[2025-09-02 09:21:49] [Rank 0] step:1761/10000 train_time:128931ms step_avg:73.21ms +[2025-09-02 09:21:50] [Rank 0] step:1781/10000 train_time:130405ms step_avg:73.22ms +[2025-09-02 09:21:50] [Rank 0] step:1781/10000 train_time:130405ms step_avg:73.22ms +[2025-09-02 09:21:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:21:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:22:03] [Rank 0] PRINT: step:1800/10000 val_loss:4.4948 svd_entropy: attn_qk:H=0.7179,top10E=0.33,eRank=126.1,q75/q25=57.04 attn_vo:H=0.7821,top10E=0.21,eRank=214.2,q75/q25=62.15 mlp_w1:H=0.8440,top10E=0.21,eRank=280.5,q75/q25=6.71 mlp_w2:H=0.9680,top10E=0.04,eRank=621.0,q75/q25=3.08 vo_prod:H=0.6594,top10E=0.35,eRank=82.7,q75/q25=4157.17 train_time:131958ms step_avg:73.31ms +[2025-09-02 09:22:03] [Rank 0] PRINT: step:1800/10000 val_loss:4.4948 svd_entropy: attn_qk:H=0.7179,top10E=0.33,eRank=126.1,q75/q25=57.04 attn_vo:H=0.7821,top10E=0.21,eRank=214.2,q75/q25=62.15 mlp_w1:H=0.8440,top10E=0.21,eRank=280.5,q75/q25=6.71 mlp_w2:H=0.9680,top10E=0.04,eRank=621.0,q75/q25=3.08 vo_prod:H=0.6594,top10E=0.35,eRank=82.7,q75/q25=4157.17 train_time:131958ms step_avg:73.31ms +[2025-09-02 09:22:03] [Rank 0] step:1801/10000 train_time:131971ms step_avg:73.28ms +[2025-09-02 09:22:03] [Rank 0] step:1801/10000 train_time:131971ms step_avg:73.28ms +[2025-09-02 09:22:05] [Rank 0] step:1821/10000 train_time:133373ms step_avg:73.24ms +[2025-09-02 09:22:05] [Rank 0] step:1821/10000 train_time:133373ms step_avg:73.24ms +[2025-09-02 09:22:06] [Rank 0] step:1841/10000 train_time:134849ms step_avg:73.25ms +[2025-09-02 09:22:06] [Rank 0] step:1841/10000 train_time:134849ms step_avg:73.25ms +[2025-09-02 09:22:08] [Rank 0] step:1861/10000 train_time:136324ms step_avg:73.25ms +[2025-09-02 09:22:08] [Rank 0] step:1861/10000 train_time:136324ms step_avg:73.25ms +[2025-09-02 09:22:09] [Rank 0] step:1881/10000 train_time:137800ms step_avg:73.26ms +[2025-09-02 09:22:09] [Rank 0] step:1881/10000 train_time:137800ms step_avg:73.26ms +[2025-09-02 09:22:11] [Rank 0] step:1901/10000 train_time:139276ms step_avg:73.26ms +[2025-09-02 09:22:11] [Rank 0] step:1901/10000 train_time:139276ms step_avg:73.26ms +[2025-09-02 09:22:12] [Rank 0] step:1921/10000 train_time:140752ms step_avg:73.27ms +[2025-09-02 09:22:12] [Rank 0] step:1921/10000 train_time:140752ms step_avg:73.27ms +[2025-09-02 09:22:14] [Rank 0] step:1941/10000 train_time:142229ms step_avg:73.28ms +[2025-09-02 09:22:14] [Rank 0] step:1941/10000 train_time:142229ms step_avg:73.28ms +[2025-09-02 09:22:15] [Rank 0] step:1961/10000 train_time:143705ms step_avg:73.28ms +[2025-09-02 09:22:15] [Rank 0] step:1961/10000 train_time:143705ms step_avg:73.28ms +[2025-09-02 09:22:16] [Rank 0] step:1981/10000 train_time:145180ms step_avg:73.29ms +[2025-09-02 09:22:16] [Rank 0] step:1981/10000 train_time:145180ms step_avg:73.29ms +[2025-09-02 09:22:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:22:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:22:30] [Rank 0] PRINT: step:2000/10000 val_loss:4.4370 svd_entropy: attn_qk:H=0.7234,top10E=0.32,eRank=130.1,q75/q25=59.93 attn_vo:H=0.7900,top10E=0.20,eRank=223.5,q75/q25=62.12 mlp_w1:H=0.8519,top10E=0.20,eRank=294.5,q75/q25=6.47 mlp_w2:H=0.9688,top10E=0.04,eRank=624.3,q75/q25=3.03 vo_prod:H=0.6691,top10E=0.34,eRank=88.2,q75/q25=4071.82 train_time:146735ms step_avg:73.37ms +[2025-09-02 09:22:30] [Rank 0] PRINT: step:2000/10000 val_loss:4.4370 svd_entropy: attn_qk:H=0.7234,top10E=0.32,eRank=130.1,q75/q25=59.93 attn_vo:H=0.7900,top10E=0.20,eRank=223.5,q75/q25=62.12 mlp_w1:H=0.8519,top10E=0.20,eRank=294.5,q75/q25=6.47 mlp_w2:H=0.9688,top10E=0.04,eRank=624.3,q75/q25=3.03 vo_prod:H=0.6691,top10E=0.34,eRank=88.2,q75/q25=4071.82 train_time:146735ms step_avg:73.37ms +[2025-09-02 09:22:30] [Rank 0] step:2001/10000 train_time:146749ms step_avg:73.34ms +[2025-09-02 09:22:30] [Rank 0] step:2001/10000 train_time:146749ms step_avg:73.34ms +[2025-09-02 09:22:31] [Rank 0] step:2021/10000 train_time:148179ms step_avg:73.32ms +[2025-09-02 09:22:31] [Rank 0] step:2021/10000 train_time:148179ms step_avg:73.32ms +[2025-09-02 09:22:33] [Rank 0] step:2041/10000 train_time:149836ms step_avg:73.41ms +[2025-09-02 09:22:33] [Rank 0] step:2041/10000 train_time:149836ms step_avg:73.41ms +[2025-09-02 09:22:34] [Rank 0] step:2061/10000 train_time:151310ms step_avg:73.42ms +[2025-09-02 09:22:34] [Rank 0] step:2061/10000 train_time:151310ms step_avg:73.42ms +[2025-09-02 09:22:36] [Rank 0] step:2081/10000 train_time:152785ms step_avg:73.42ms +[2025-09-02 09:22:36] [Rank 0] step:2081/10000 train_time:152785ms step_avg:73.42ms +[2025-09-02 09:22:37] [Rank 0] step:2101/10000 train_time:154260ms step_avg:73.42ms +[2025-09-02 09:22:37] [Rank 0] step:2101/10000 train_time:154260ms step_avg:73.42ms +[2025-09-02 09:22:39] [Rank 0] step:2121/10000 train_time:155736ms step_avg:73.43ms +[2025-09-02 09:22:39] [Rank 0] step:2121/10000 train_time:155736ms step_avg:73.43ms +[2025-09-02 09:22:40] [Rank 0] step:2141/10000 train_time:157213ms step_avg:73.43ms +[2025-09-02 09:22:40] [Rank 0] step:2141/10000 train_time:157213ms step_avg:73.43ms +[2025-09-02 09:22:42] [Rank 0] step:2161/10000 train_time:158689ms step_avg:73.43ms +[2025-09-02 09:22:42] [Rank 0] step:2161/10000 train_time:158689ms step_avg:73.43ms +[2025-09-02 09:22:43] [Rank 0] step:2181/10000 train_time:160165ms step_avg:73.44ms +[2025-09-02 09:22:43] [Rank 0] step:2181/10000 train_time:160165ms step_avg:73.44ms +[2025-09-02 09:22:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:22:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:22:56] [Rank 0] PRINT: step:2200/10000 val_loss:4.3707 svd_entropy: attn_qk:H=0.7279,top10E=0.31,eRank=133.6,q75/q25=61.79 attn_vo:H=0.7965,top10E=0.19,eRank=231.3,q75/q25=61.69 mlp_w1:H=0.8583,top10E=0.19,eRank=306.3,q75/q25=6.25 mlp_w2:H=0.9694,top10E=0.04,eRank=626.9,q75/q25=2.99 vo_prod:H=0.6772,top10E=0.32,eRank=93.1,q75/q25=3922.76 train_time:161716ms step_avg:73.51ms +[2025-09-02 09:22:56] [Rank 0] PRINT: step:2200/10000 val_loss:4.3707 svd_entropy: attn_qk:H=0.7279,top10E=0.31,eRank=133.6,q75/q25=61.79 attn_vo:H=0.7965,top10E=0.19,eRank=231.3,q75/q25=61.69 mlp_w1:H=0.8583,top10E=0.19,eRank=306.3,q75/q25=6.25 mlp_w2:H=0.9694,top10E=0.04,eRank=626.9,q75/q25=2.99 vo_prod:H=0.6772,top10E=0.32,eRank=93.1,q75/q25=3922.76 train_time:161716ms step_avg:73.51ms +[2025-09-02 09:22:56] [Rank 0] step:2201/10000 train_time:161730ms step_avg:73.48ms +[2025-09-02 09:22:56] [Rank 0] step:2201/10000 train_time:161730ms step_avg:73.48ms +[2025-09-02 09:22:58] [Rank 0] step:2221/10000 train_time:163156ms step_avg:73.46ms +[2025-09-02 09:22:58] [Rank 0] step:2221/10000 train_time:163156ms step_avg:73.46ms +[2025-09-02 09:22:59] [Rank 0] step:2241/10000 train_time:164668ms step_avg:73.48ms +[2025-09-02 09:22:59] [Rank 0] step:2241/10000 train_time:164668ms step_avg:73.48ms +[2025-09-02 09:23:01] [Rank 0] step:2261/10000 train_time:166185ms step_avg:73.50ms +[2025-09-02 09:23:01] [Rank 0] step:2261/10000 train_time:166185ms step_avg:73.50ms +[2025-09-02 09:23:02] [Rank 0] step:2281/10000 train_time:167703ms step_avg:73.52ms +[2025-09-02 09:23:02] [Rank 0] step:2281/10000 train_time:167703ms step_avg:73.52ms +[2025-09-02 09:23:04] [Rank 0] step:2301/10000 train_time:169222ms step_avg:73.54ms +[2025-09-02 09:23:04] [Rank 0] step:2301/10000 train_time:169222ms step_avg:73.54ms +[2025-09-02 09:23:05] [Rank 0] step:2321/10000 train_time:170743ms step_avg:73.56ms +[2025-09-02 09:23:05] [Rank 0] step:2321/10000 train_time:170743ms step_avg:73.56ms +[2025-09-02 09:23:07] [Rank 0] step:2341/10000 train_time:172262ms step_avg:73.58ms +[2025-09-02 09:23:07] [Rank 0] step:2341/10000 train_time:172262ms step_avg:73.58ms +[2025-09-02 09:23:09] [Rank 0] step:2361/10000 train_time:173782ms step_avg:73.61ms +[2025-09-02 09:23:09] [Rank 0] step:2361/10000 train_time:173782ms step_avg:73.61ms +[2025-09-02 09:23:10] [Rank 0] step:2381/10000 train_time:175302ms step_avg:73.63ms +[2025-09-02 09:23:10] [Rank 0] step:2381/10000 train_time:175302ms step_avg:73.63ms +[2025-09-02 09:23:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:23:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:23:23] [Rank 0] PRINT: step:2400/10000 val_loss:4.2941 svd_entropy: attn_qk:H=0.7313,top10E=0.31,eRank=136.1,q75/q25=63.20 attn_vo:H=0.8021,top10E=0.18,eRank=238.3,q75/q25=60.99 mlp_w1:H=0.8640,top10E=0.19,eRank=317.4,q75/q25=6.04 mlp_w2:H=0.9699,top10E=0.04,eRank=628.8,q75/q25=2.97 vo_prod:H=0.6843,top10E=0.31,eRank=97.5,q75/q25=3682.60 train_time:176899ms step_avg:73.71ms +[2025-09-02 09:23:23] [Rank 0] PRINT: step:2400/10000 val_loss:4.2941 svd_entropy: attn_qk:H=0.7313,top10E=0.31,eRank=136.1,q75/q25=63.20 attn_vo:H=0.8021,top10E=0.18,eRank=238.3,q75/q25=60.99 mlp_w1:H=0.8640,top10E=0.19,eRank=317.4,q75/q25=6.04 mlp_w2:H=0.9699,top10E=0.04,eRank=628.8,q75/q25=2.97 vo_prod:H=0.6843,top10E=0.31,eRank=97.5,q75/q25=3682.60 train_time:176899ms step_avg:73.71ms +[2025-09-02 09:23:23] [Rank 0] step:2401/10000 train_time:176914ms step_avg:73.68ms +[2025-09-02 09:23:23] [Rank 0] step:2401/10000 train_time:176914ms step_avg:73.68ms +[2025-09-02 09:23:25] [Rank 0] step:2421/10000 train_time:178357ms step_avg:73.67ms +[2025-09-02 09:23:25] [Rank 0] step:2421/10000 train_time:178357ms step_avg:73.67ms +[2025-09-02 09:23:26] [Rank 0] step:2441/10000 train_time:179875ms step_avg:73.69ms +[2025-09-02 09:23:26] [Rank 0] step:2441/10000 train_time:179875ms step_avg:73.69ms +[2025-09-02 09:23:28] [Rank 0] step:2461/10000 train_time:181393ms step_avg:73.71ms +[2025-09-02 09:23:28] [Rank 0] step:2461/10000 train_time:181393ms step_avg:73.71ms +[2025-09-02 09:23:29] [Rank 0] step:2481/10000 train_time:182912ms step_avg:73.73ms +[2025-09-02 09:23:29] [Rank 0] step:2481/10000 train_time:182912ms step_avg:73.73ms +[2025-09-02 09:23:31] [Rank 0] step:2501/10000 train_time:184432ms step_avg:73.74ms +[2025-09-02 09:23:31] [Rank 0] step:2501/10000 train_time:184432ms step_avg:73.74ms +[2025-09-02 09:23:32] [Rank 0] step:2521/10000 train_time:185952ms step_avg:73.76ms +[2025-09-02 09:23:32] [Rank 0] step:2521/10000 train_time:185952ms step_avg:73.76ms +[2025-09-02 09:23:34] [Rank 0] step:2541/10000 train_time:187472ms step_avg:73.78ms +[2025-09-02 09:23:34] [Rank 0] step:2541/10000 train_time:187472ms step_avg:73.78ms +[2025-09-02 09:23:35] [Rank 0] step:2561/10000 train_time:188991ms step_avg:73.80ms +[2025-09-02 09:23:35] [Rank 0] step:2561/10000 train_time:188991ms step_avg:73.80ms +[2025-09-02 09:23:37] [Rank 0] step:2581/10000 train_time:190512ms step_avg:73.81ms +[2025-09-02 09:23:37] [Rank 0] step:2581/10000 train_time:190512ms step_avg:73.81ms +[2025-09-02 09:23:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:23:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:23:50] [Rank 0] PRINT: step:2600/10000 val_loss:4.2487 svd_entropy: attn_qk:H=0.7350,top10E=0.30,eRank=139.1,q75/q25=63.84 attn_vo:H=0.8070,top10E=0.18,eRank=244.6,q75/q25=59.64 mlp_w1:H=0.8689,top10E=0.18,eRank=327.1,q75/q25=5.86 mlp_w2:H=0.9702,top10E=0.04,eRank=630.3,q75/q25=2.94 vo_prod:H=0.6908,top10E=0.30,eRank=102.0,q75/q25=3539.78 train_time:192110ms step_avg:73.89ms +[2025-09-02 09:23:50] [Rank 0] PRINT: step:2600/10000 val_loss:4.2487 svd_entropy: attn_qk:H=0.7350,top10E=0.30,eRank=139.1,q75/q25=63.84 attn_vo:H=0.8070,top10E=0.18,eRank=244.6,q75/q25=59.64 mlp_w1:H=0.8689,top10E=0.18,eRank=327.1,q75/q25=5.86 mlp_w2:H=0.9702,top10E=0.04,eRank=630.3,q75/q25=2.94 vo_prod:H=0.6908,top10E=0.30,eRank=102.0,q75/q25=3539.78 train_time:192110ms step_avg:73.89ms +[2025-09-02 09:23:50] [Rank 0] step:2601/10000 train_time:192125ms step_avg:73.87ms +[2025-09-02 09:23:50] [Rank 0] step:2601/10000 train_time:192125ms step_avg:73.87ms +[2025-09-02 09:23:52] [Rank 0] step:2621/10000 train_time:193585ms step_avg:73.86ms +[2025-09-02 09:23:52] [Rank 0] step:2621/10000 train_time:193585ms step_avg:73.86ms +[2025-09-02 09:23:53] [Rank 0] step:2641/10000 train_time:195105ms step_avg:73.88ms +[2025-09-02 09:23:53] [Rank 0] step:2641/10000 train_time:195105ms step_avg:73.88ms +[2025-09-02 09:23:55] [Rank 0] step:2661/10000 train_time:196624ms step_avg:73.89ms +[2025-09-02 09:23:55] [Rank 0] step:2661/10000 train_time:196624ms step_avg:73.89ms +[2025-09-02 09:23:56] [Rank 0] step:2681/10000 train_time:198145ms step_avg:73.91ms +[2025-09-02 09:23:56] [Rank 0] step:2681/10000 train_time:198145ms step_avg:73.91ms +[2025-09-02 09:23:58] [Rank 0] step:2701/10000 train_time:199666ms step_avg:73.92ms +[2025-09-02 09:23:58] [Rank 0] step:2701/10000 train_time:199666ms step_avg:73.92ms +[2025-09-02 09:23:59] [Rank 0] step:2721/10000 train_time:201272ms step_avg:73.97ms +[2025-09-02 09:23:59] [Rank 0] step:2721/10000 train_time:201272ms step_avg:73.97ms +[2025-09-02 09:24:01] [Rank 0] step:2741/10000 train_time:202793ms step_avg:73.99ms +[2025-09-02 09:24:01] [Rank 0] step:2741/10000 train_time:202793ms step_avg:73.99ms +[2025-09-02 09:24:03] [Rank 0] step:2761/10000 train_time:204316ms step_avg:74.00ms +[2025-09-02 09:24:03] [Rank 0] step:2761/10000 train_time:204316ms step_avg:74.00ms +[2025-09-02 09:24:04] [Rank 0] step:2781/10000 train_time:205838ms step_avg:74.02ms +[2025-09-02 09:24:04] [Rank 0] step:2781/10000 train_time:205838ms step_avg:74.02ms +[2025-09-02 09:24:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:24:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:24:17] [Rank 0] PRINT: step:2800/10000 val_loss:4.2113 svd_entropy: attn_qk:H=0.7383,top10E=0.30,eRank=141.9,q75/q25=64.22 attn_vo:H=0.8124,top10E=0.17,eRank=251.8,q75/q25=56.34 mlp_w1:H=0.8731,top10E=0.18,eRank=336.0,q75/q25=5.69 mlp_w2:H=0.9705,top10E=0.04,eRank=631.4,q75/q25=2.92 vo_prod:H=0.6971,top10E=0.30,eRank=106.4,q75/q25=3110.78 train_time:207439ms step_avg:74.09ms +[2025-09-02 09:24:17] [Rank 0] PRINT: step:2800/10000 val_loss:4.2113 svd_entropy: attn_qk:H=0.7383,top10E=0.30,eRank=141.9,q75/q25=64.22 attn_vo:H=0.8124,top10E=0.17,eRank=251.8,q75/q25=56.34 mlp_w1:H=0.8731,top10E=0.18,eRank=336.0,q75/q25=5.69 mlp_w2:H=0.9705,top10E=0.04,eRank=631.4,q75/q25=2.92 vo_prod:H=0.6971,top10E=0.30,eRank=106.4,q75/q25=3110.78 train_time:207439ms step_avg:74.09ms +[2025-09-02 09:24:17] [Rank 0] step:2801/10000 train_time:207452ms step_avg:74.06ms +[2025-09-02 09:24:17] [Rank 0] step:2801/10000 train_time:207452ms step_avg:74.06ms +[2025-09-02 09:24:19] [Rank 0] step:2821/10000 train_time:208911ms step_avg:74.06ms +[2025-09-02 09:24:19] [Rank 0] step:2821/10000 train_time:208911ms step_avg:74.06ms +[2025-09-02 09:24:20] [Rank 0] step:2841/10000 train_time:210430ms step_avg:74.07ms +[2025-09-02 09:24:20] [Rank 0] step:2841/10000 train_time:210430ms step_avg:74.07ms +[2025-09-02 09:24:22] [Rank 0] step:2861/10000 train_time:211949ms step_avg:74.08ms +[2025-09-02 09:24:22] [Rank 0] step:2861/10000 train_time:211949ms step_avg:74.08ms +[2025-09-02 09:24:23] [Rank 0] step:2881/10000 train_time:213468ms step_avg:74.10ms +[2025-09-02 09:24:23] [Rank 0] step:2881/10000 train_time:213468ms step_avg:74.10ms +[2025-09-02 09:24:25] [Rank 0] step:2901/10000 train_time:214988ms step_avg:74.11ms +[2025-09-02 09:24:25] [Rank 0] step:2901/10000 train_time:214988ms step_avg:74.11ms +[2025-09-02 09:24:26] [Rank 0] step:2921/10000 train_time:216509ms step_avg:74.12ms +[2025-09-02 09:24:26] [Rank 0] step:2921/10000 train_time:216509ms step_avg:74.12ms +[2025-09-02 09:24:28] [Rank 0] step:2941/10000 train_time:218028ms step_avg:74.13ms +[2025-09-02 09:24:28] [Rank 0] step:2941/10000 train_time:218028ms step_avg:74.13ms +[2025-09-02 09:24:29] [Rank 0] step:2961/10000 train_time:219549ms step_avg:74.15ms +[2025-09-02 09:24:29] [Rank 0] step:2961/10000 train_time:219549ms step_avg:74.15ms +[2025-09-02 09:24:31] [Rank 0] step:2981/10000 train_time:221076ms step_avg:74.16ms +[2025-09-02 09:24:31] [Rank 0] step:2981/10000 train_time:221076ms step_avg:74.16ms +[2025-09-02 09:24:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:24:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:24:44] [Rank 0] PRINT: step:3000/10000 val_loss:4.1699 svd_entropy: attn_qk:H=0.7414,top10E=0.30,eRank=144.5,q75/q25=64.78 attn_vo:H=0.8178,top10E=0.17,eRank=259.7,q75/q25=55.69 mlp_w1:H=0.8767,top10E=0.17,eRank=343.7,q75/q25=5.54 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.91 vo_prod:H=0.7045,top10E=0.29,eRank=111.2,q75/q25=3137.47 train_time:222684ms step_avg:74.23ms +[2025-09-02 09:24:44] [Rank 0] PRINT: step:3000/10000 val_loss:4.1699 svd_entropy: attn_qk:H=0.7414,top10E=0.30,eRank=144.5,q75/q25=64.78 attn_vo:H=0.8178,top10E=0.17,eRank=259.7,q75/q25=55.69 mlp_w1:H=0.8767,top10E=0.17,eRank=343.7,q75/q25=5.54 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.91 vo_prod:H=0.7045,top10E=0.29,eRank=111.2,q75/q25=3137.47 train_time:222684ms step_avg:74.23ms +[2025-09-02 09:24:44] [Rank 0] step:3001/10000 train_time:222699ms step_avg:74.21ms +[2025-09-02 09:24:44] [Rank 0] step:3001/10000 train_time:222699ms step_avg:74.21ms +[2025-09-02 09:24:46] [Rank 0] step:3021/10000 train_time:224169ms step_avg:74.20ms +[2025-09-02 09:24:46] [Rank 0] step:3021/10000 train_time:224169ms step_avg:74.20ms +[2025-09-02 09:24:47] [Rank 0] step:3041/10000 train_time:225695ms step_avg:74.22ms +[2025-09-02 09:24:47] [Rank 0] step:3041/10000 train_time:225695ms step_avg:74.22ms +[2025-09-02 09:24:49] [Rank 0] step:3061/10000 train_time:227222ms step_avg:74.23ms +[2025-09-02 09:24:49] [Rank 0] step:3061/10000 train_time:227222ms step_avg:74.23ms +[2025-09-02 09:24:50] [Rank 0] step:3081/10000 train_time:228752ms step_avg:74.25ms +[2025-09-02 09:24:50] [Rank 0] step:3081/10000 train_time:228752ms step_avg:74.25ms +[2025-09-02 09:24:52] [Rank 0] step:3101/10000 train_time:230279ms step_avg:74.26ms +[2025-09-02 09:24:52] [Rank 0] step:3101/10000 train_time:230279ms step_avg:74.26ms +[2025-09-02 09:24:54] [Rank 0] step:3121/10000 train_time:231806ms step_avg:74.27ms +[2025-09-02 09:24:54] [Rank 0] step:3121/10000 train_time:231806ms step_avg:74.27ms +[2025-09-02 09:24:55] [Rank 0] step:3141/10000 train_time:233332ms step_avg:74.29ms +[2025-09-02 09:24:55] [Rank 0] step:3141/10000 train_time:233332ms step_avg:74.29ms +[2025-09-02 09:24:57] [Rank 0] step:3161/10000 train_time:234861ms step_avg:74.30ms +[2025-09-02 09:24:57] [Rank 0] step:3161/10000 train_time:234861ms step_avg:74.30ms +[2025-09-02 09:24:58] [Rank 0] step:3181/10000 train_time:236391ms step_avg:74.31ms +[2025-09-02 09:24:58] [Rank 0] step:3181/10000 train_time:236391ms step_avg:74.31ms +[2025-09-02 09:25:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:25:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:25:11] [Rank 0] PRINT: step:3200/10000 val_loss:4.1366 svd_entropy: attn_qk:H=0.7439,top10E=0.29,eRank=146.6,q75/q25=64.70 attn_vo:H=0.8212,top10E=0.16,eRank=264.3,q75/q25=55.38 mlp_w1:H=0.8801,top10E=0.17,eRank=351.1,q75/q25=5.41 mlp_w2:H=0.9709,top10E=0.04,eRank=633.1,q75/q25=2.90 vo_prod:H=0.7083,top10E=0.28,eRank=114.3,q75/q25=3063.44 train_time:237998ms step_avg:74.37ms +[2025-09-02 09:25:11] [Rank 0] PRINT: step:3200/10000 val_loss:4.1366 svd_entropy: attn_qk:H=0.7439,top10E=0.29,eRank=146.6,q75/q25=64.70 attn_vo:H=0.8212,top10E=0.16,eRank=264.3,q75/q25=55.38 mlp_w1:H=0.8801,top10E=0.17,eRank=351.1,q75/q25=5.41 mlp_w2:H=0.9709,top10E=0.04,eRank=633.1,q75/q25=2.90 vo_prod:H=0.7083,top10E=0.28,eRank=114.3,q75/q25=3063.44 train_time:237998ms step_avg:74.37ms +[2025-09-02 09:25:11] [Rank 0] step:3201/10000 train_time:238012ms step_avg:74.36ms +[2025-09-02 09:25:11] [Rank 0] step:3201/10000 train_time:238012ms step_avg:74.36ms +[2025-09-02 09:25:13] [Rank 0] step:3221/10000 train_time:239477ms step_avg:74.35ms +[2025-09-02 09:25:13] [Rank 0] step:3221/10000 train_time:239477ms step_avg:74.35ms +[2025-09-02 09:25:14] [Rank 0] step:3241/10000 train_time:241002ms step_avg:74.36ms +[2025-09-02 09:25:14] [Rank 0] step:3241/10000 train_time:241002ms step_avg:74.36ms +[2025-09-02 09:25:16] [Rank 0] step:3261/10000 train_time:242527ms step_avg:74.37ms +[2025-09-02 09:25:16] [Rank 0] step:3261/10000 train_time:242527ms step_avg:74.37ms +[2025-09-02 09:25:18] [Rank 0] step:3281/10000 train_time:244053ms step_avg:74.38ms +[2025-09-02 09:25:18] [Rank 0] step:3281/10000 train_time:244053ms step_avg:74.38ms +[2025-09-02 09:25:19] [Rank 0] step:3301/10000 train_time:245580ms step_avg:74.40ms +[2025-09-02 09:25:19] [Rank 0] step:3301/10000 train_time:245580ms step_avg:74.40ms +[2025-09-02 09:25:21] [Rank 0] step:3321/10000 train_time:247108ms step_avg:74.41ms +[2025-09-02 09:25:21] [Rank 0] step:3321/10000 train_time:247108ms step_avg:74.41ms +[2025-09-02 09:25:22] [Rank 0] step:3341/10000 train_time:248636ms step_avg:74.42ms +[2025-09-02 09:25:22] [Rank 0] step:3341/10000 train_time:248636ms step_avg:74.42ms +[2025-09-02 09:25:24] [Rank 0] step:3361/10000 train_time:250164ms step_avg:74.43ms +[2025-09-02 09:25:24] [Rank 0] step:3361/10000 train_time:250164ms step_avg:74.43ms +[2025-09-02 09:25:25] [Rank 0] step:3381/10000 train_time:251692ms step_avg:74.44ms +[2025-09-02 09:25:25] [Rank 0] step:3381/10000 train_time:251692ms step_avg:74.44ms +[2025-09-02 09:25:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:25:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:25:38] [Rank 0] PRINT: step:3400/10000 val_loss:4.0983 svd_entropy: attn_qk:H=0.7465,top10E=0.29,eRank=148.9,q75/q25=64.57 attn_vo:H=0.8243,top10E=0.16,eRank=268.6,q75/q25=54.17 mlp_w1:H=0.8832,top10E=0.17,eRank=357.9,q75/q25=5.29 mlp_w2:H=0.9711,top10E=0.04,eRank=633.6,q75/q25=2.89 vo_prod:H=0.7126,top10E=0.27,eRank=117.8,q75/q25=2885.78 train_time:253298ms step_avg:74.50ms +[2025-09-02 09:25:38] [Rank 0] PRINT: step:3400/10000 val_loss:4.0983 svd_entropy: attn_qk:H=0.7465,top10E=0.29,eRank=148.9,q75/q25=64.57 attn_vo:H=0.8243,top10E=0.16,eRank=268.6,q75/q25=54.17 mlp_w1:H=0.8832,top10E=0.17,eRank=357.9,q75/q25=5.29 mlp_w2:H=0.9711,top10E=0.04,eRank=633.6,q75/q25=2.89 vo_prod:H=0.7126,top10E=0.27,eRank=117.8,q75/q25=2885.78 train_time:253298ms step_avg:74.50ms +[2025-09-02 09:25:38] [Rank 0] step:3401/10000 train_time:253313ms step_avg:74.48ms +[2025-09-02 09:25:38] [Rank 0] step:3401/10000 train_time:253313ms step_avg:74.48ms +[2025-09-02 09:25:40] [Rank 0] step:3421/10000 train_time:254778ms step_avg:74.47ms +[2025-09-02 09:25:40] [Rank 0] step:3421/10000 train_time:254778ms step_avg:74.47ms +[2025-09-02 09:25:42] [Rank 0] step:3441/10000 train_time:256303ms step_avg:74.49ms +[2025-09-02 09:25:42] [Rank 0] step:3441/10000 train_time:256303ms step_avg:74.49ms +[2025-09-02 09:25:43] [Rank 0] step:3461/10000 train_time:257830ms step_avg:74.50ms +[2025-09-02 09:25:43] [Rank 0] step:3461/10000 train_time:257830ms step_avg:74.50ms +[2025-09-02 09:25:45] [Rank 0] step:3481/10000 train_time:259357ms step_avg:74.51ms +[2025-09-02 09:25:45] [Rank 0] step:3481/10000 train_time:259357ms step_avg:74.51ms +[2025-09-02 09:25:46] [Rank 0] step:3501/10000 train_time:260886ms step_avg:74.52ms +[2025-09-02 09:25:46] [Rank 0] step:3501/10000 train_time:260886ms step_avg:74.52ms +[2025-09-02 09:25:48] [Rank 0] step:3521/10000 train_time:262413ms step_avg:74.53ms +[2025-09-02 09:25:48] [Rank 0] step:3521/10000 train_time:262413ms step_avg:74.53ms +[2025-09-02 09:25:49] [Rank 0] step:3541/10000 train_time:263941ms step_avg:74.54ms +[2025-09-02 09:25:49] [Rank 0] step:3541/10000 train_time:263941ms step_avg:74.54ms +[2025-09-02 09:25:51] [Rank 0] step:3561/10000 train_time:265469ms step_avg:74.55ms +[2025-09-02 09:25:51] [Rank 0] step:3561/10000 train_time:265469ms step_avg:74.55ms +[2025-09-02 09:25:52] [Rank 0] step:3581/10000 train_time:266997ms step_avg:74.56ms +[2025-09-02 09:25:52] [Rank 0] step:3581/10000 train_time:266997ms step_avg:74.56ms +[2025-09-02 09:25:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:25:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:26:05] [Rank 0] PRINT: step:3600/10000 val_loss:4.0867 svd_entropy: attn_qk:H=0.7488,top10E=0.29,eRank=151.0,q75/q25=64.64 attn_vo:H=0.8270,top10E=0.16,eRank=272.4,q75/q25=52.92 mlp_w1:H=0.8858,top10E=0.16,eRank=363.9,q75/q25=5.19 mlp_w2:H=0.9712,top10E=0.04,eRank=634.1,q75/q25=2.89 vo_prod:H=0.7162,top10E=0.27,eRank=120.9,q75/q25=2741.88 train_time:268605ms step_avg:74.61ms +[2025-09-02 09:26:05] [Rank 0] PRINT: step:3600/10000 val_loss:4.0867 svd_entropy: attn_qk:H=0.7488,top10E=0.29,eRank=151.0,q75/q25=64.64 attn_vo:H=0.8270,top10E=0.16,eRank=272.4,q75/q25=52.92 mlp_w1:H=0.8858,top10E=0.16,eRank=363.9,q75/q25=5.19 mlp_w2:H=0.9712,top10E=0.04,eRank=634.1,q75/q25=2.89 vo_prod:H=0.7162,top10E=0.27,eRank=120.9,q75/q25=2741.88 train_time:268605ms step_avg:74.61ms +[2025-09-02 09:26:05] [Rank 0] step:3601/10000 train_time:268620ms step_avg:74.60ms +[2025-09-02 09:26:05] [Rank 0] step:3601/10000 train_time:268620ms step_avg:74.60ms +[2025-09-02 09:26:07] [Rank 0] step:3621/10000 train_time:270090ms step_avg:74.59ms +[2025-09-02 09:26:07] [Rank 0] step:3621/10000 train_time:270090ms step_avg:74.59ms +[2025-09-02 09:26:09] [Rank 0] step:3641/10000 train_time:271617ms step_avg:74.60ms +[2025-09-02 09:26:09] [Rank 0] step:3641/10000 train_time:271617ms step_avg:74.60ms +[2025-09-02 09:26:10] [Rank 0] step:3661/10000 train_time:273146ms step_avg:74.61ms +[2025-09-02 09:26:10] [Rank 0] step:3661/10000 train_time:273146ms step_avg:74.61ms +[2025-09-02 09:26:12] [Rank 0] step:3681/10000 train_time:274677ms step_avg:74.62ms +[2025-09-02 09:26:12] [Rank 0] step:3681/10000 train_time:274677ms step_avg:74.62ms +[2025-09-02 09:26:13] [Rank 0] step:3701/10000 train_time:276207ms step_avg:74.63ms +[2025-09-02 09:26:13] [Rank 0] step:3701/10000 train_time:276207ms step_avg:74.63ms +[2025-09-02 09:26:15] [Rank 0] step:3721/10000 train_time:277765ms step_avg:74.65ms +[2025-09-02 09:26:15] [Rank 0] step:3721/10000 train_time:277765ms step_avg:74.65ms +[2025-09-02 09:26:16] [Rank 0] step:3741/10000 train_time:279331ms step_avg:74.67ms +[2025-09-02 09:26:16] [Rank 0] step:3741/10000 train_time:279331ms step_avg:74.67ms +[2025-09-02 09:26:18] [Rank 0] step:3761/10000 train_time:280903ms step_avg:74.69ms +[2025-09-02 09:26:18] [Rank 0] step:3761/10000 train_time:280903ms step_avg:74.69ms +[2025-09-02 09:26:19] [Rank 0] step:3781/10000 train_time:282469ms step_avg:74.71ms +[2025-09-02 09:26:19] [Rank 0] step:3781/10000 train_time:282469ms step_avg:74.71ms +[2025-09-02 09:26:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:26:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:26:33] [Rank 0] PRINT: step:3800/10000 val_loss:4.0404 svd_entropy: attn_qk:H=0.7507,top10E=0.29,eRank=152.7,q75/q25=64.19 attn_vo:H=0.8294,top10E=0.15,eRank=275.9,q75/q25=51.73 mlp_w1:H=0.8882,top10E=0.16,eRank=369.5,q75/q25=5.10 mlp_w2:H=0.9713,top10E=0.04,eRank=634.5,q75/q25=2.88 vo_prod:H=0.7196,top10E=0.27,eRank=123.8,q75/q25=2611.68 train_time:284117ms step_avg:74.77ms +[2025-09-02 09:26:33] [Rank 0] PRINT: step:3800/10000 val_loss:4.0404 svd_entropy: attn_qk:H=0.7507,top10E=0.29,eRank=152.7,q75/q25=64.19 attn_vo:H=0.8294,top10E=0.15,eRank=275.9,q75/q25=51.73 mlp_w1:H=0.8882,top10E=0.16,eRank=369.5,q75/q25=5.10 mlp_w2:H=0.9713,top10E=0.04,eRank=634.5,q75/q25=2.88 vo_prod:H=0.7196,top10E=0.27,eRank=123.8,q75/q25=2611.68 train_time:284117ms step_avg:74.77ms +[2025-09-02 09:26:33] [Rank 0] step:3801/10000 train_time:284131ms step_avg:74.75ms +[2025-09-02 09:26:33] [Rank 0] step:3801/10000 train_time:284131ms step_avg:74.75ms +[2025-09-02 09:26:34] [Rank 0] step:3821/10000 train_time:285634ms step_avg:74.75ms +[2025-09-02 09:26:34] [Rank 0] step:3821/10000 train_time:285634ms step_avg:74.75ms +[2025-09-02 09:26:36] [Rank 0] step:3841/10000 train_time:287199ms step_avg:74.77ms +[2025-09-02 09:26:36] [Rank 0] step:3841/10000 train_time:287199ms step_avg:74.77ms +[2025-09-02 09:26:37] [Rank 0] step:3861/10000 train_time:288763ms step_avg:74.79ms +[2025-09-02 09:26:37] [Rank 0] step:3861/10000 train_time:288763ms step_avg:74.79ms +[2025-09-02 09:26:39] [Rank 0] step:3881/10000 train_time:290326ms step_avg:74.81ms +[2025-09-02 09:26:39] [Rank 0] step:3881/10000 train_time:290326ms step_avg:74.81ms +[2025-09-02 09:26:41] [Rank 0] step:3901/10000 train_time:291892ms step_avg:74.82ms +[2025-09-02 09:26:41] [Rank 0] step:3901/10000 train_time:291892ms step_avg:74.82ms +[2025-09-02 09:26:42] [Rank 0] step:3921/10000 train_time:293458ms step_avg:74.84ms +[2025-09-02 09:26:42] [Rank 0] step:3921/10000 train_time:293458ms step_avg:74.84ms +[2025-09-02 09:26:44] [Rank 0] step:3941/10000 train_time:295019ms step_avg:74.86ms +[2025-09-02 09:26:44] [Rank 0] step:3941/10000 train_time:295019ms step_avg:74.86ms +[2025-09-02 09:26:45] [Rank 0] step:3961/10000 train_time:296580ms step_avg:74.88ms +[2025-09-02 09:26:45] [Rank 0] step:3961/10000 train_time:296580ms step_avg:74.88ms +[2025-09-02 09:26:47] [Rank 0] step:3981/10000 train_time:298143ms step_avg:74.89ms +[2025-09-02 09:26:47] [Rank 0] step:3981/10000 train_time:298143ms step_avg:74.89ms +[2025-09-02 09:26:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:26:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:27:00] [Rank 0] PRINT: step:4000/10000 val_loss:4.0141 svd_entropy: attn_qk:H=0.7527,top10E=0.28,eRank=154.5,q75/q25=63.33 attn_vo:H=0.8317,top10E=0.15,eRank=279.3,q75/q25=50.00 mlp_w1:H=0.8905,top10E=0.16,eRank=374.9,q75/q25=5.01 mlp_w2:H=0.9713,top10E=0.04,eRank=634.8,q75/q25=2.87 vo_prod:H=0.7228,top10E=0.26,eRank=126.7,q75/q25=2477.38 train_time:299787ms step_avg:74.95ms +[2025-09-02 09:27:00] [Rank 0] PRINT: step:4000/10000 val_loss:4.0141 svd_entropy: attn_qk:H=0.7527,top10E=0.28,eRank=154.5,q75/q25=63.33 attn_vo:H=0.8317,top10E=0.15,eRank=279.3,q75/q25=50.00 mlp_w1:H=0.8905,top10E=0.16,eRank=374.9,q75/q25=5.01 mlp_w2:H=0.9713,top10E=0.04,eRank=634.8,q75/q25=2.87 vo_prod:H=0.7228,top10E=0.26,eRank=126.7,q75/q25=2477.38 train_time:299787ms step_avg:74.95ms +[2025-09-02 09:27:00] [Rank 0] step:4001/10000 train_time:299802ms step_avg:74.93ms +[2025-09-02 09:27:00] [Rank 0] step:4001/10000 train_time:299802ms step_avg:74.93ms +[2025-09-02 09:27:02] [Rank 0] step:4021/10000 train_time:301286ms step_avg:74.93ms +[2025-09-02 09:27:02] [Rank 0] step:4021/10000 train_time:301286ms step_avg:74.93ms +[2025-09-02 09:27:03] [Rank 0] step:4041/10000 train_time:302849ms step_avg:74.94ms +[2025-09-02 09:27:03] [Rank 0] step:4041/10000 train_time:302849ms step_avg:74.94ms +[2025-09-02 09:27:05] [Rank 0] step:4061/10000 train_time:304411ms step_avg:74.96ms +[2025-09-02 09:27:05] [Rank 0] step:4061/10000 train_time:304411ms step_avg:74.96ms +[2025-09-02 09:27:07] [Rank 0] step:4081/10000 train_time:306154ms step_avg:75.02ms +[2025-09-02 09:27:07] [Rank 0] step:4081/10000 train_time:306154ms step_avg:75.02ms +[2025-09-02 09:27:08] [Rank 0] step:4101/10000 train_time:307717ms step_avg:75.03ms +[2025-09-02 09:27:08] [Rank 0] step:4101/10000 train_time:307717ms step_avg:75.03ms +[2025-09-02 09:27:10] [Rank 0] step:4121/10000 train_time:309319ms step_avg:75.06ms +[2025-09-02 09:27:10] [Rank 0] step:4121/10000 train_time:309319ms step_avg:75.06ms +[2025-09-02 09:27:11] [Rank 0] step:4141/10000 train_time:310884ms step_avg:75.07ms +[2025-09-02 09:27:11] [Rank 0] step:4141/10000 train_time:310884ms step_avg:75.07ms +[2025-09-02 09:27:13] [Rank 0] step:4161/10000 train_time:312445ms step_avg:75.09ms +[2025-09-02 09:27:13] [Rank 0] step:4161/10000 train_time:312445ms step_avg:75.09ms +[2025-09-02 09:27:14] [Rank 0] step:4181/10000 train_time:314009ms step_avg:75.10ms +[2025-09-02 09:27:14] [Rank 0] step:4181/10000 train_time:314009ms step_avg:75.10ms +[2025-09-02 09:27:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:27:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:27:28] [Rank 0] PRINT: step:4200/10000 val_loss:3.9966 svd_entropy: attn_qk:H=0.7548,top10E=0.28,eRank=156.4,q75/q25=63.09 attn_vo:H=0.8337,top10E=0.15,eRank=282.3,q75/q25=49.36 mlp_w1:H=0.8925,top10E=0.16,eRank=379.9,q75/q25=4.92 mlp_w2:H=0.9714,top10E=0.04,eRank=635.1,q75/q25=2.86 vo_prod:H=0.7258,top10E=0.26,eRank=129.4,q75/q25=2389.58 train_time:315654ms step_avg:75.16ms +[2025-09-02 09:27:28] [Rank 0] PRINT: step:4200/10000 val_loss:3.9966 svd_entropy: attn_qk:H=0.7548,top10E=0.28,eRank=156.4,q75/q25=63.09 attn_vo:H=0.8337,top10E=0.15,eRank=282.3,q75/q25=49.36 mlp_w1:H=0.8925,top10E=0.16,eRank=379.9,q75/q25=4.92 mlp_w2:H=0.9714,top10E=0.04,eRank=635.1,q75/q25=2.86 vo_prod:H=0.7258,top10E=0.26,eRank=129.4,q75/q25=2389.58 train_time:315654ms step_avg:75.16ms +[2025-09-02 09:27:28] [Rank 0] step:4201/10000 train_time:315669ms step_avg:75.14ms +[2025-09-02 09:27:28] [Rank 0] step:4201/10000 train_time:315669ms step_avg:75.14ms +[2025-09-02 09:27:30] [Rank 0] step:4221/10000 train_time:317170ms step_avg:75.14ms +[2025-09-02 09:27:30] [Rank 0] step:4221/10000 train_time:317170ms step_avg:75.14ms +[2025-09-02 09:27:31] [Rank 0] step:4241/10000 train_time:318733ms step_avg:75.16ms +[2025-09-02 09:27:31] [Rank 0] step:4241/10000 train_time:318733ms step_avg:75.16ms +[2025-09-02 09:27:33] [Rank 0] step:4261/10000 train_time:320296ms step_avg:75.17ms +[2025-09-02 09:27:33] [Rank 0] step:4261/10000 train_time:320296ms step_avg:75.17ms +[2025-09-02 09:27:34] [Rank 0] step:4281/10000 train_time:321860ms step_avg:75.18ms +[2025-09-02 09:27:34] [Rank 0] step:4281/10000 train_time:321860ms step_avg:75.18ms +[2025-09-02 09:27:36] [Rank 0] step:4301/10000 train_time:323423ms step_avg:75.20ms +[2025-09-02 09:27:36] [Rank 0] step:4301/10000 train_time:323423ms step_avg:75.20ms +[2025-09-02 09:27:37] [Rank 0] step:4321/10000 train_time:324987ms step_avg:75.21ms +[2025-09-02 09:27:37] [Rank 0] step:4321/10000 train_time:324987ms step_avg:75.21ms +[2025-09-02 09:27:39] [Rank 0] step:4341/10000 train_time:326551ms step_avg:75.22ms +[2025-09-02 09:27:39] [Rank 0] step:4341/10000 train_time:326551ms step_avg:75.22ms +[2025-09-02 09:27:41] [Rank 0] step:4361/10000 train_time:328115ms step_avg:75.24ms +[2025-09-02 09:27:41] [Rank 0] step:4361/10000 train_time:328115ms step_avg:75.24ms +[2025-09-02 09:27:42] [Rank 0] step:4381/10000 train_time:329676ms step_avg:75.25ms +[2025-09-02 09:27:42] [Rank 0] step:4381/10000 train_time:329676ms step_avg:75.25ms +[2025-09-02 09:27:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:27:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:27:55] [Rank 0] PRINT: step:4400/10000 val_loss:3.9717 svd_entropy: attn_qk:H=0.7566,top10E=0.28,eRank=158.0,q75/q25=62.24 attn_vo:H=0.8356,top10E=0.15,eRank=285.1,q75/q25=48.24 mlp_w1:H=0.8945,top10E=0.15,eRank=384.6,q75/q25=4.86 mlp_w2:H=0.9715,top10E=0.04,eRank=635.4,q75/q25=2.86 vo_prod:H=0.7284,top10E=0.25,eRank=131.8,q75/q25=2296.56 train_time:331320ms step_avg:75.30ms +[2025-09-02 09:27:55] [Rank 0] PRINT: step:4400/10000 val_loss:3.9717 svd_entropy: attn_qk:H=0.7566,top10E=0.28,eRank=158.0,q75/q25=62.24 attn_vo:H=0.8356,top10E=0.15,eRank=285.1,q75/q25=48.24 mlp_w1:H=0.8945,top10E=0.15,eRank=384.6,q75/q25=4.86 mlp_w2:H=0.9715,top10E=0.04,eRank=635.4,q75/q25=2.86 vo_prod:H=0.7284,top10E=0.25,eRank=131.8,q75/q25=2296.56 train_time:331320ms step_avg:75.30ms +[2025-09-02 09:27:55] [Rank 0] step:4401/10000 train_time:331334ms step_avg:75.29ms +[2025-09-02 09:27:55] [Rank 0] step:4401/10000 train_time:331334ms step_avg:75.29ms +[2025-09-02 09:27:57] [Rank 0] step:4421/10000 train_time:332820ms step_avg:75.28ms +[2025-09-02 09:27:57] [Rank 0] step:4421/10000 train_time:332820ms step_avg:75.28ms +[2025-09-02 09:27:59] [Rank 0] step:4441/10000 train_time:334382ms step_avg:75.29ms +[2025-09-02 09:27:59] [Rank 0] step:4441/10000 train_time:334382ms step_avg:75.29ms +[2025-09-02 09:28:00] [Rank 0] step:4461/10000 train_time:335950ms step_avg:75.31ms +[2025-09-02 09:28:00] [Rank 0] step:4461/10000 train_time:335950ms step_avg:75.31ms +[2025-09-02 09:28:02] [Rank 0] step:4481/10000 train_time:337521ms step_avg:75.32ms +[2025-09-02 09:28:02] [Rank 0] step:4481/10000 train_time:337521ms step_avg:75.32ms +[2025-09-02 09:28:03] [Rank 0] step:4501/10000 train_time:339091ms step_avg:75.34ms +[2025-09-02 09:28:03] [Rank 0] step:4501/10000 train_time:339091ms step_avg:75.34ms +[2025-09-02 09:28:05] [Rank 0] step:4521/10000 train_time:340659ms step_avg:75.35ms +[2025-09-02 09:28:05] [Rank 0] step:4521/10000 train_time:340659ms step_avg:75.35ms +[2025-09-02 09:28:06] [Rank 0] step:4541/10000 train_time:342230ms step_avg:75.36ms +[2025-09-02 09:28:06] [Rank 0] step:4541/10000 train_time:342230ms step_avg:75.36ms +[2025-09-02 09:28:08] [Rank 0] step:4561/10000 train_time:343799ms step_avg:75.38ms +[2025-09-02 09:28:08] [Rank 0] step:4561/10000 train_time:343799ms step_avg:75.38ms +[2025-09-02 09:28:10] [Rank 0] step:4581/10000 train_time:345372ms step_avg:75.39ms +[2025-09-02 09:28:10] [Rank 0] step:4581/10000 train_time:345372ms step_avg:75.39ms +[2025-09-02 09:28:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:28:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:28:23] [Rank 0] PRINT: step:4600/10000 val_loss:3.9458 svd_entropy: attn_qk:H=0.7585,top10E=0.28,eRank=159.8,q75/q25=62.01 attn_vo:H=0.8374,top10E=0.15,eRank=287.8,q75/q25=47.22 mlp_w1:H=0.8964,top10E=0.15,eRank=389.3,q75/q25=4.79 mlp_w2:H=0.9715,top10E=0.04,eRank=635.6,q75/q25=2.86 vo_prod:H=0.7312,top10E=0.25,eRank=134.3,q75/q25=2195.29 train_time:347023ms step_avg:75.44ms +[2025-09-02 09:28:23] [Rank 0] PRINT: step:4600/10000 val_loss:3.9458 svd_entropy: attn_qk:H=0.7585,top10E=0.28,eRank=159.8,q75/q25=62.01 attn_vo:H=0.8374,top10E=0.15,eRank=287.8,q75/q25=47.22 mlp_w1:H=0.8964,top10E=0.15,eRank=389.3,q75/q25=4.79 mlp_w2:H=0.9715,top10E=0.04,eRank=635.6,q75/q25=2.86 vo_prod:H=0.7312,top10E=0.25,eRank=134.3,q75/q25=2195.29 train_time:347023ms step_avg:75.44ms +[2025-09-02 09:28:23] [Rank 0] step:4601/10000 train_time:347037ms step_avg:75.43ms +[2025-09-02 09:28:23] [Rank 0] step:4601/10000 train_time:347037ms step_avg:75.43ms +[2025-09-02 09:28:24] [Rank 0] step:4621/10000 train_time:348530ms step_avg:75.42ms +[2025-09-02 09:28:24] [Rank 0] step:4621/10000 train_time:348530ms step_avg:75.42ms +[2025-09-02 09:28:26] [Rank 0] step:4641/10000 train_time:350099ms step_avg:75.44ms +[2025-09-02 09:28:26] [Rank 0] step:4641/10000 train_time:350099ms step_avg:75.44ms +[2025-09-02 09:28:28] [Rank 0] step:4661/10000 train_time:351671ms step_avg:75.45ms +[2025-09-02 09:28:28] [Rank 0] step:4661/10000 train_time:351671ms step_avg:75.45ms +[2025-09-02 09:28:29] [Rank 0] step:4681/10000 train_time:353242ms step_avg:75.46ms +[2025-09-02 09:28:29] [Rank 0] step:4681/10000 train_time:353242ms step_avg:75.46ms +[2025-09-02 09:28:31] [Rank 0] step:4701/10000 train_time:354814ms step_avg:75.48ms +[2025-09-02 09:28:31] [Rank 0] step:4701/10000 train_time:354814ms step_avg:75.48ms +[2025-09-02 09:28:32] [Rank 0] step:4721/10000 train_time:356386ms step_avg:75.49ms +[2025-09-02 09:28:32] [Rank 0] step:4721/10000 train_time:356386ms step_avg:75.49ms +[2025-09-02 09:28:34] [Rank 0] step:4741/10000 train_time:357959ms step_avg:75.50ms +[2025-09-02 09:28:34] [Rank 0] step:4741/10000 train_time:357959ms step_avg:75.50ms +[2025-09-02 09:28:35] [Rank 0] step:4761/10000 train_time:359531ms step_avg:75.52ms +[2025-09-02 09:28:35] [Rank 0] step:4761/10000 train_time:359531ms step_avg:75.52ms +[2025-09-02 09:28:37] [Rank 0] step:4781/10000 train_time:361105ms step_avg:75.53ms +[2025-09-02 09:28:37] [Rank 0] step:4781/10000 train_time:361105ms step_avg:75.53ms +[2025-09-02 09:28:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:28:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:28:50] [Rank 0] PRINT: step:4800/10000 val_loss:3.9350 svd_entropy: attn_qk:H=0.7601,top10E=0.27,eRank=161.5,q75/q25=61.28 attn_vo:H=0.8392,top10E=0.14,eRank=290.5,q75/q25=46.30 mlp_w1:H=0.8980,top10E=0.15,eRank=393.5,q75/q25=4.72 mlp_w2:H=0.9715,top10E=0.04,eRank=635.6,q75/q25=2.85 vo_prod:H=0.7337,top10E=0.25,eRank=136.8,q75/q25=2127.68 train_time:362761ms step_avg:75.58ms +[2025-09-02 09:28:50] [Rank 0] PRINT: step:4800/10000 val_loss:3.9350 svd_entropy: attn_qk:H=0.7601,top10E=0.27,eRank=161.5,q75/q25=61.28 attn_vo:H=0.8392,top10E=0.14,eRank=290.5,q75/q25=46.30 mlp_w1:H=0.8980,top10E=0.15,eRank=393.5,q75/q25=4.72 mlp_w2:H=0.9715,top10E=0.04,eRank=635.6,q75/q25=2.85 vo_prod:H=0.7337,top10E=0.25,eRank=136.8,q75/q25=2127.68 train_time:362761ms step_avg:75.58ms +[2025-09-02 09:28:50] [Rank 0] step:4801/10000 train_time:362775ms step_avg:75.56ms +[2025-09-02 09:28:50] [Rank 0] step:4801/10000 train_time:362775ms step_avg:75.56ms +[2025-09-02 09:28:52] [Rank 0] step:4821/10000 train_time:364295ms step_avg:75.56ms +[2025-09-02 09:28:52] [Rank 0] step:4821/10000 train_time:364295ms step_avg:75.56ms +[2025-09-02 09:28:54] [Rank 0] step:4841/10000 train_time:365867ms step_avg:75.58ms +[2025-09-02 09:28:54] [Rank 0] step:4841/10000 train_time:365867ms step_avg:75.58ms +[2025-09-02 09:28:55] [Rank 0] step:4861/10000 train_time:367440ms step_avg:75.59ms +[2025-09-02 09:28:55] [Rank 0] step:4861/10000 train_time:367440ms step_avg:75.59ms +[2025-09-02 09:28:57] [Rank 0] step:4881/10000 train_time:369008ms step_avg:75.60ms +[2025-09-02 09:28:57] [Rank 0] step:4881/10000 train_time:369008ms step_avg:75.60ms +[2025-09-02 09:28:58] [Rank 0] step:4901/10000 train_time:370576ms step_avg:75.61ms +[2025-09-02 09:28:58] [Rank 0] step:4901/10000 train_time:370576ms step_avg:75.61ms +[2025-09-02 09:29:00] [Rank 0] step:4921/10000 train_time:372150ms step_avg:75.62ms +[2025-09-02 09:29:00] [Rank 0] step:4921/10000 train_time:372150ms step_avg:75.62ms +[2025-09-02 09:29:01] [Rank 0] step:4941/10000 train_time:373723ms step_avg:75.64ms +[2025-09-02 09:29:01] [Rank 0] step:4941/10000 train_time:373723ms step_avg:75.64ms +[2025-09-02 09:29:03] [Rank 0] step:4961/10000 train_time:375296ms step_avg:75.65ms +[2025-09-02 09:29:03] [Rank 0] step:4961/10000 train_time:375296ms step_avg:75.65ms +[2025-09-02 09:29:05] [Rank 0] step:4981/10000 train_time:376867ms step_avg:75.66ms +[2025-09-02 09:29:05] [Rank 0] step:4981/10000 train_time:376867ms step_avg:75.66ms +[2025-09-02 09:29:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:29:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:29:18] [Rank 0] PRINT: step:5000/10000 val_loss:3.9144 svd_entropy: attn_qk:H=0.7617,top10E=0.27,eRank=162.9,q75/q25=61.04 attn_vo:H=0.8407,top10E=0.14,eRank=292.8,q75/q25=45.23 mlp_w1:H=0.8996,top10E=0.15,eRank=397.5,q75/q25=4.67 mlp_w2:H=0.9715,top10E=0.04,eRank=635.7,q75/q25=2.85 vo_prod:H=0.7360,top10E=0.24,eRank=138.9,q75/q25=2075.11 train_time:378518ms step_avg:75.70ms +[2025-09-02 09:29:18] [Rank 0] PRINT: step:5000/10000 val_loss:3.9144 svd_entropy: attn_qk:H=0.7617,top10E=0.27,eRank=162.9,q75/q25=61.04 attn_vo:H=0.8407,top10E=0.14,eRank=292.8,q75/q25=45.23 mlp_w1:H=0.8996,top10E=0.15,eRank=397.5,q75/q25=4.67 mlp_w2:H=0.9715,top10E=0.04,eRank=635.7,q75/q25=2.85 vo_prod:H=0.7360,top10E=0.24,eRank=138.9,q75/q25=2075.11 train_time:378518ms step_avg:75.70ms +[2025-09-02 09:29:18] [Rank 0] step:5001/10000 train_time:378532ms step_avg:75.69ms +[2025-09-02 09:29:18] [Rank 0] step:5001/10000 train_time:378532ms step_avg:75.69ms +[2025-09-02 09:29:20] [Rank 0] step:5021/10000 train_time:380029ms step_avg:75.69ms +[2025-09-02 09:29:20] [Rank 0] step:5021/10000 train_time:380029ms step_avg:75.69ms +[2025-09-02 09:29:21] [Rank 0] step:5041/10000 train_time:381599ms step_avg:75.70ms +[2025-09-02 09:29:21] [Rank 0] step:5041/10000 train_time:381599ms step_avg:75.70ms +[2025-09-02 09:29:23] [Rank 0] step:5061/10000 train_time:383165ms step_avg:75.71ms +[2025-09-02 09:29:23] [Rank 0] step:5061/10000 train_time:383165ms step_avg:75.71ms +[2025-09-02 09:29:24] [Rank 0] step:5081/10000 train_time:384733ms step_avg:75.72ms +[2025-09-02 09:29:24] [Rank 0] step:5081/10000 train_time:384733ms step_avg:75.72ms +[2025-09-02 09:29:26] [Rank 0] step:5101/10000 train_time:386304ms step_avg:75.73ms +[2025-09-02 09:29:26] [Rank 0] step:5101/10000 train_time:386304ms step_avg:75.73ms +[2025-09-02 09:29:27] [Rank 0] step:5121/10000 train_time:387873ms step_avg:75.74ms +[2025-09-02 09:29:27] [Rank 0] step:5121/10000 train_time:387873ms step_avg:75.74ms +[2025-09-02 09:29:29] [Rank 0] step:5141/10000 train_time:389446ms step_avg:75.75ms +[2025-09-02 09:29:29] [Rank 0] step:5141/10000 train_time:389446ms step_avg:75.75ms +[2025-09-02 09:29:31] [Rank 0] step:5161/10000 train_time:391017ms step_avg:75.76ms +[2025-09-02 09:29:31] [Rank 0] step:5161/10000 train_time:391017ms step_avg:75.76ms +[2025-09-02 09:29:32] [Rank 0] step:5181/10000 train_time:392590ms step_avg:75.77ms +[2025-09-02 09:29:32] [Rank 0] step:5181/10000 train_time:392590ms step_avg:75.77ms +[2025-09-02 09:29:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:29:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:29:45] [Rank 0] PRINT: step:5200/10000 val_loss:3.8969 svd_entropy: attn_qk:H=0.7631,top10E=0.27,eRank=164.4,q75/q25=60.66 attn_vo:H=0.8421,top10E=0.14,eRank=295.1,q75/q25=44.39 mlp_w1:H=0.9011,top10E=0.15,eRank=401.4,q75/q25=4.62 mlp_w2:H=0.9716,top10E=0.04,eRank=635.8,q75/q25=2.85 vo_prod:H=0.7382,top10E=0.24,eRank=141.0,q75/q25=2014.01 train_time:394267ms step_avg:75.82ms +[2025-09-02 09:29:45] [Rank 0] PRINT: step:5200/10000 val_loss:3.8969 svd_entropy: attn_qk:H=0.7631,top10E=0.27,eRank=164.4,q75/q25=60.66 attn_vo:H=0.8421,top10E=0.14,eRank=295.1,q75/q25=44.39 mlp_w1:H=0.9011,top10E=0.15,eRank=401.4,q75/q25=4.62 mlp_w2:H=0.9716,top10E=0.04,eRank=635.8,q75/q25=2.85 vo_prod:H=0.7382,top10E=0.24,eRank=141.0,q75/q25=2014.01 train_time:394267ms step_avg:75.82ms +[2025-09-02 09:29:45] [Rank 0] step:5201/10000 train_time:394281ms step_avg:75.81ms +[2025-09-02 09:29:45] [Rank 0] step:5201/10000 train_time:394281ms step_avg:75.81ms +[2025-09-02 09:29:47] [Rank 0] step:5221/10000 train_time:395799ms step_avg:75.81ms +[2025-09-02 09:29:47] [Rank 0] step:5221/10000 train_time:395799ms step_avg:75.81ms +[2025-09-02 09:29:49] [Rank 0] step:5241/10000 train_time:397400ms step_avg:75.83ms +[2025-09-02 09:29:49] [Rank 0] step:5241/10000 train_time:397400ms step_avg:75.83ms +[2025-09-02 09:29:50] [Rank 0] step:5261/10000 train_time:399000ms step_avg:75.84ms +[2025-09-02 09:29:50] [Rank 0] step:5261/10000 train_time:399000ms step_avg:75.84ms +[2025-09-02 09:29:52] [Rank 0] step:5281/10000 train_time:400601ms step_avg:75.86ms +[2025-09-02 09:29:52] [Rank 0] step:5281/10000 train_time:400601ms step_avg:75.86ms +[2025-09-02 09:29:53] [Rank 0] step:5301/10000 train_time:402212ms step_avg:75.87ms +[2025-09-02 09:29:53] [Rank 0] step:5301/10000 train_time:402212ms step_avg:75.87ms +[2025-09-02 09:29:55] [Rank 0] step:5321/10000 train_time:403811ms step_avg:75.89ms +[2025-09-02 09:29:55] [Rank 0] step:5321/10000 train_time:403811ms step_avg:75.89ms +[2025-09-02 09:29:57] [Rank 0] step:5341/10000 train_time:405411ms step_avg:75.91ms +[2025-09-02 09:29:57] [Rank 0] step:5341/10000 train_time:405411ms step_avg:75.91ms +[2025-09-02 09:29:58] [Rank 0] step:5361/10000 train_time:407015ms step_avg:75.92ms +[2025-09-02 09:29:58] [Rank 0] step:5361/10000 train_time:407015ms step_avg:75.92ms +[2025-09-02 09:30:00] [Rank 0] step:5381/10000 train_time:408618ms step_avg:75.94ms +[2025-09-02 09:30:00] [Rank 0] step:5381/10000 train_time:408618ms step_avg:75.94ms +[2025-09-02 09:30:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:30:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:30:13] [Rank 0] PRINT: step:5400/10000 val_loss:3.8795 svd_entropy: attn_qk:H=0.7643,top10E=0.27,eRank=165.6,q75/q25=60.00 attn_vo:H=0.8434,top10E=0.14,eRank=297.0,q75/q25=43.85 mlp_w1:H=0.9025,top10E=0.15,eRank=405.1,q75/q25=4.57 mlp_w2:H=0.9716,top10E=0.04,eRank=635.9,q75/q25=2.85 vo_prod:H=0.7401,top10E=0.24,eRank=142.9,q75/q25=1952.17 train_time:410300ms step_avg:75.98ms +[2025-09-02 09:30:13] [Rank 0] PRINT: step:5400/10000 val_loss:3.8795 svd_entropy: attn_qk:H=0.7643,top10E=0.27,eRank=165.6,q75/q25=60.00 attn_vo:H=0.8434,top10E=0.14,eRank=297.0,q75/q25=43.85 mlp_w1:H=0.9025,top10E=0.15,eRank=405.1,q75/q25=4.57 mlp_w2:H=0.9716,top10E=0.04,eRank=635.9,q75/q25=2.85 vo_prod:H=0.7401,top10E=0.24,eRank=142.9,q75/q25=1952.17 train_time:410300ms step_avg:75.98ms +[2025-09-02 09:30:13] [Rank 0] step:5401/10000 train_time:410314ms step_avg:75.97ms +[2025-09-02 09:30:13] [Rank 0] step:5401/10000 train_time:410314ms step_avg:75.97ms +[2025-09-02 09:30:15] [Rank 0] step:5421/10000 train_time:411844ms step_avg:75.97ms +[2025-09-02 09:30:15] [Rank 0] step:5421/10000 train_time:411844ms step_avg:75.97ms +[2025-09-02 09:30:16] [Rank 0] step:5441/10000 train_time:413442ms step_avg:75.99ms +[2025-09-02 09:30:16] [Rank 0] step:5441/10000 train_time:413442ms step_avg:75.99ms +[2025-09-02 09:30:18] [Rank 0] step:5461/10000 train_time:415046ms step_avg:76.00ms +[2025-09-02 09:30:18] [Rank 0] step:5461/10000 train_time:415046ms step_avg:76.00ms +[2025-09-02 09:30:20] [Rank 0] step:5481/10000 train_time:416679ms step_avg:76.02ms +[2025-09-02 09:30:20] [Rank 0] step:5481/10000 train_time:416679ms step_avg:76.02ms +[2025-09-02 09:30:21] [Rank 0] step:5501/10000 train_time:418287ms step_avg:76.04ms +[2025-09-02 09:30:21] [Rank 0] step:5501/10000 train_time:418287ms step_avg:76.04ms +[2025-09-02 09:30:23] [Rank 0] step:5521/10000 train_time:419895ms step_avg:76.05ms +[2025-09-02 09:30:23] [Rank 0] step:5521/10000 train_time:419895ms step_avg:76.05ms +[2025-09-02 09:30:25] [Rank 0] step:5541/10000 train_time:421498ms step_avg:76.07ms +[2025-09-02 09:30:25] [Rank 0] step:5541/10000 train_time:421498ms step_avg:76.07ms +[2025-09-02 09:30:26] [Rank 0] step:5561/10000 train_time:423101ms step_avg:76.08ms +[2025-09-02 09:30:26] [Rank 0] step:5561/10000 train_time:423101ms step_avg:76.08ms +[2025-09-02 09:30:28] [Rank 0] step:5581/10000 train_time:424705ms step_avg:76.10ms +[2025-09-02 09:30:28] [Rank 0] step:5581/10000 train_time:424705ms step_avg:76.10ms +[2025-09-02 09:30:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:30:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:30:41] [Rank 0] PRINT: step:5600/10000 val_loss:3.8666 svd_entropy: attn_qk:H=0.7657,top10E=0.27,eRank=167.0,q75/q25=59.34 attn_vo:H=0.8446,top10E=0.14,eRank=299.0,q75/q25=43.00 mlp_w1:H=0.9039,top10E=0.14,eRank=408.6,q75/q25=4.53 mlp_w2:H=0.9716,top10E=0.04,eRank=635.9,q75/q25=2.84 vo_prod:H=0.7420,top10E=0.24,eRank=144.7,q75/q25=1898.22 train_time:426394ms step_avg:76.14ms +[2025-09-02 09:30:41] [Rank 0] PRINT: step:5600/10000 val_loss:3.8666 svd_entropy: attn_qk:H=0.7657,top10E=0.27,eRank=167.0,q75/q25=59.34 attn_vo:H=0.8446,top10E=0.14,eRank=299.0,q75/q25=43.00 mlp_w1:H=0.9039,top10E=0.14,eRank=408.6,q75/q25=4.53 mlp_w2:H=0.9716,top10E=0.04,eRank=635.9,q75/q25=2.84 vo_prod:H=0.7420,top10E=0.24,eRank=144.7,q75/q25=1898.22 train_time:426394ms step_avg:76.14ms +[2025-09-02 09:30:41] [Rank 0] step:5601/10000 train_time:426408ms step_avg:76.13ms +[2025-09-02 09:30:41] [Rank 0] step:5601/10000 train_time:426408ms step_avg:76.13ms +[2025-09-02 09:30:43] [Rank 0] step:5621/10000 train_time:427938ms step_avg:76.13ms +[2025-09-02 09:30:43] [Rank 0] step:5621/10000 train_time:427938ms step_avg:76.13ms +[2025-09-02 09:30:44] [Rank 0] step:5641/10000 train_time:429543ms step_avg:76.15ms +[2025-09-02 09:30:44] [Rank 0] step:5641/10000 train_time:429543ms step_avg:76.15ms +[2025-09-02 09:30:46] [Rank 0] step:5661/10000 train_time:431147ms step_avg:76.16ms +[2025-09-02 09:30:46] [Rank 0] step:5661/10000 train_time:431147ms step_avg:76.16ms +[2025-09-02 09:30:48] [Rank 0] step:5681/10000 train_time:432754ms step_avg:76.18ms +[2025-09-02 09:30:48] [Rank 0] step:5681/10000 train_time:432754ms step_avg:76.18ms +[2025-09-02 09:30:49] [Rank 0] step:5701/10000 train_time:434356ms step_avg:76.19ms +[2025-09-02 09:30:49] [Rank 0] step:5701/10000 train_time:434356ms step_avg:76.19ms +[2025-09-02 09:30:51] [Rank 0] step:5721/10000 train_time:435963ms step_avg:76.20ms +[2025-09-02 09:30:51] [Rank 0] step:5721/10000 train_time:435963ms step_avg:76.20ms +[2025-09-02 09:30:52] [Rank 0] step:5741/10000 train_time:437567ms step_avg:76.22ms +[2025-09-02 09:30:52] [Rank 0] step:5741/10000 train_time:437567ms step_avg:76.22ms +[2025-09-02 09:30:54] [Rank 0] step:5761/10000 train_time:439177ms step_avg:76.23ms +[2025-09-02 09:30:54] [Rank 0] step:5761/10000 train_time:439177ms step_avg:76.23ms +[2025-09-02 09:30:56] [Rank 0] step:5781/10000 train_time:440785ms step_avg:76.25ms +[2025-09-02 09:30:56] [Rank 0] step:5781/10000 train_time:440785ms step_avg:76.25ms +[2025-09-02 09:30:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:30:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:31:09] [Rank 0] PRINT: step:5800/10000 val_loss:3.8585 svd_entropy: attn_qk:H=0.7670,top10E=0.27,eRank=168.3,q75/q25=59.10 attn_vo:H=0.8457,top10E=0.14,eRank=300.8,q75/q25=42.35 mlp_w1:H=0.9051,top10E=0.14,eRank=411.9,q75/q25=4.50 mlp_w2:H=0.9716,top10E=0.04,eRank=636.0,q75/q25=2.84 vo_prod:H=0.7437,top10E=0.23,eRank=146.4,q75/q25=1863.24 train_time:442473ms step_avg:76.29ms +[2025-09-02 09:31:09] [Rank 0] PRINT: step:5800/10000 val_loss:3.8585 svd_entropy: attn_qk:H=0.7670,top10E=0.27,eRank=168.3,q75/q25=59.10 attn_vo:H=0.8457,top10E=0.14,eRank=300.8,q75/q25=42.35 mlp_w1:H=0.9051,top10E=0.14,eRank=411.9,q75/q25=4.50 mlp_w2:H=0.9716,top10E=0.04,eRank=636.0,q75/q25=2.84 vo_prod:H=0.7437,top10E=0.23,eRank=146.4,q75/q25=1863.24 train_time:442473ms step_avg:76.29ms +[2025-09-02 09:31:09] [Rank 0] step:5801/10000 train_time:442487ms step_avg:76.28ms +[2025-09-02 09:31:09] [Rank 0] step:5801/10000 train_time:442487ms step_avg:76.28ms +[2025-09-02 09:31:11] [Rank 0] step:5821/10000 train_time:444013ms step_avg:76.28ms +[2025-09-02 09:31:11] [Rank 0] step:5821/10000 train_time:444013ms step_avg:76.28ms +[2025-09-02 09:31:12] [Rank 0] step:5841/10000 train_time:445613ms step_avg:76.29ms +[2025-09-02 09:31:12] [Rank 0] step:5841/10000 train_time:445613ms step_avg:76.29ms +[2025-09-02 09:31:14] [Rank 0] step:5861/10000 train_time:447218ms step_avg:76.30ms +[2025-09-02 09:31:14] [Rank 0] step:5861/10000 train_time:447218ms step_avg:76.30ms +[2025-09-02 09:31:15] [Rank 0] step:5881/10000 train_time:448822ms step_avg:76.32ms +[2025-09-02 09:31:15] [Rank 0] step:5881/10000 train_time:448822ms step_avg:76.32ms +[2025-09-02 09:31:17] [Rank 0] step:5901/10000 train_time:450425ms step_avg:76.33ms +[2025-09-02 09:31:17] [Rank 0] step:5901/10000 train_time:450425ms step_avg:76.33ms +[2025-09-02 09:31:19] [Rank 0] step:5921/10000 train_time:452029ms step_avg:76.34ms +[2025-09-02 09:31:19] [Rank 0] step:5921/10000 train_time:452029ms step_avg:76.34ms +[2025-09-02 09:31:20] [Rank 0] step:5941/10000 train_time:453636ms step_avg:76.36ms +[2025-09-02 09:31:20] [Rank 0] step:5941/10000 train_time:453636ms step_avg:76.36ms +[2025-09-02 09:31:22] [Rank 0] step:5961/10000 train_time:455276ms step_avg:76.38ms +[2025-09-02 09:31:22] [Rank 0] step:5961/10000 train_time:455276ms step_avg:76.38ms +[2025-09-02 09:31:23] [Rank 0] step:5981/10000 train_time:456882ms step_avg:76.39ms +[2025-09-02 09:31:23] [Rank 0] step:5981/10000 train_time:456882ms step_avg:76.39ms +[2025-09-02 09:31:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:31:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:31:37] [Rank 0] PRINT: step:6000/10000 val_loss:3.8343 svd_entropy: attn_qk:H=0.7682,top10E=0.27,eRank=169.6,q75/q25=58.59 attn_vo:H=0.8468,top10E=0.14,eRank=302.6,q75/q25=41.56 mlp_w1:H=0.9064,top10E=0.14,eRank=415.2,q75/q25=4.46 mlp_w2:H=0.9716,top10E=0.04,eRank=636.0,q75/q25=2.84 vo_prod:H=0.7454,top10E=0.23,eRank=148.1,q75/q25=1827.73 train_time:458568ms step_avg:76.43ms +[2025-09-02 09:31:37] [Rank 0] PRINT: step:6000/10000 val_loss:3.8343 svd_entropy: attn_qk:H=0.7682,top10E=0.27,eRank=169.6,q75/q25=58.59 attn_vo:H=0.8468,top10E=0.14,eRank=302.6,q75/q25=41.56 mlp_w1:H=0.9064,top10E=0.14,eRank=415.2,q75/q25=4.46 mlp_w2:H=0.9716,top10E=0.04,eRank=636.0,q75/q25=2.84 vo_prod:H=0.7454,top10E=0.23,eRank=148.1,q75/q25=1827.73 train_time:458568ms step_avg:76.43ms +[2025-09-02 09:31:37] [Rank 0] step:6001/10000 train_time:458583ms step_avg:76.42ms +[2025-09-02 09:31:37] [Rank 0] step:6001/10000 train_time:458583ms step_avg:76.42ms +[2025-09-02 09:31:39] [Rank 0] step:6021/10000 train_time:460113ms step_avg:76.42ms +[2025-09-02 09:31:39] [Rank 0] step:6021/10000 train_time:460113ms step_avg:76.42ms +[2025-09-02 09:31:40] [Rank 0] step:6041/10000 train_time:461720ms step_avg:76.43ms +[2025-09-02 09:31:40] [Rank 0] step:6041/10000 train_time:461720ms step_avg:76.43ms +[2025-09-02 09:31:42] [Rank 0] step:6061/10000 train_time:463330ms step_avg:76.44ms +[2025-09-02 09:31:42] [Rank 0] step:6061/10000 train_time:463330ms step_avg:76.44ms +[2025-09-02 09:31:44] [Rank 0] step:6081/10000 train_time:464938ms step_avg:76.46ms +[2025-09-02 09:31:44] [Rank 0] step:6081/10000 train_time:464938ms step_avg:76.46ms +[2025-09-02 09:31:45] [Rank 0] step:6101/10000 train_time:466545ms step_avg:76.47ms +[2025-09-02 09:31:45] [Rank 0] step:6101/10000 train_time:466545ms step_avg:76.47ms +[2025-09-02 09:31:47] [Rank 0] step:6121/10000 train_time:468413ms step_avg:76.53ms +[2025-09-02 09:31:47] [Rank 0] step:6121/10000 train_time:468413ms step_avg:76.53ms +[2025-09-02 09:31:49] [Rank 0] step:6141/10000 train_time:470028ms step_avg:76.54ms +[2025-09-02 09:31:49] [Rank 0] step:6141/10000 train_time:470028ms step_avg:76.54ms +[2025-09-02 09:31:50] [Rank 0] step:6161/10000 train_time:471636ms step_avg:76.55ms +[2025-09-02 09:31:50] [Rank 0] step:6161/10000 train_time:471636ms step_avg:76.55ms +[2025-09-02 09:31:52] [Rank 0] step:6181/10000 train_time:473241ms step_avg:76.56ms +[2025-09-02 09:31:52] [Rank 0] step:6181/10000 train_time:473241ms step_avg:76.56ms +[2025-09-02 09:31:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:31:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:32:05] [Rank 0] PRINT: step:6200/10000 val_loss:3.8181 svd_entropy: attn_qk:H=0.7694,top10E=0.26,eRank=170.7,q75/q25=58.12 attn_vo:H=0.8479,top10E=0.13,eRank=304.3,q75/q25=40.98 mlp_w1:H=0.9075,top10E=0.14,eRank=418.4,q75/q25=4.43 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.84 vo_prod:H=0.7471,top10E=0.23,eRank=149.8,q75/q25=1778.95 train_time:474930ms step_avg:76.60ms +[2025-09-02 09:32:05] [Rank 0] PRINT: step:6200/10000 val_loss:3.8181 svd_entropy: attn_qk:H=0.7694,top10E=0.26,eRank=170.7,q75/q25=58.12 attn_vo:H=0.8479,top10E=0.13,eRank=304.3,q75/q25=40.98 mlp_w1:H=0.9075,top10E=0.14,eRank=418.4,q75/q25=4.43 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.84 vo_prod:H=0.7471,top10E=0.23,eRank=149.8,q75/q25=1778.95 train_time:474930ms step_avg:76.60ms +[2025-09-02 09:32:06] [Rank 0] step:6201/10000 train_time:474945ms step_avg:76.59ms +[2025-09-02 09:32:06] [Rank 0] step:6201/10000 train_time:474945ms step_avg:76.59ms +[2025-09-02 09:32:07] [Rank 0] step:6221/10000 train_time:476478ms step_avg:76.59ms +[2025-09-02 09:32:07] [Rank 0] step:6221/10000 train_time:476478ms step_avg:76.59ms +[2025-09-02 09:32:09] [Rank 0] step:6241/10000 train_time:478084ms step_avg:76.60ms +[2025-09-02 09:32:09] [Rank 0] step:6241/10000 train_time:478084ms step_avg:76.60ms +[2025-09-02 09:32:10] [Rank 0] step:6261/10000 train_time:479693ms step_avg:76.62ms +[2025-09-02 09:32:10] [Rank 0] step:6261/10000 train_time:479693ms step_avg:76.62ms +[2025-09-02 09:32:12] [Rank 0] step:6281/10000 train_time:481302ms step_avg:76.63ms +[2025-09-02 09:32:12] [Rank 0] step:6281/10000 train_time:481302ms step_avg:76.63ms +[2025-09-02 09:32:14] [Rank 0] step:6301/10000 train_time:482913ms step_avg:76.64ms +[2025-09-02 09:32:14] [Rank 0] step:6301/10000 train_time:482913ms step_avg:76.64ms +[2025-09-02 09:32:15] [Rank 0] step:6321/10000 train_time:484521ms step_avg:76.65ms +[2025-09-02 09:32:15] [Rank 0] step:6321/10000 train_time:484521ms step_avg:76.65ms +[2025-09-02 09:32:17] [Rank 0] step:6341/10000 train_time:486132ms step_avg:76.66ms +[2025-09-02 09:32:17] [Rank 0] step:6341/10000 train_time:486132ms step_avg:76.66ms +[2025-09-02 09:32:18] [Rank 0] step:6361/10000 train_time:487796ms step_avg:76.69ms +[2025-09-02 09:32:18] [Rank 0] step:6361/10000 train_time:487796ms step_avg:76.69ms +[2025-09-02 09:32:20] [Rank 0] step:6381/10000 train_time:489414ms step_avg:76.70ms +[2025-09-02 09:32:20] [Rank 0] step:6381/10000 train_time:489414ms step_avg:76.70ms +[2025-09-02 09:32:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:32:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:32:33] [Rank 0] PRINT: step:6400/10000 val_loss:3.8026 svd_entropy: attn_qk:H=0.7705,top10E=0.26,eRank=171.9,q75/q25=57.64 attn_vo:H=0.8488,top10E=0.13,eRank=305.8,q75/q25=40.41 mlp_w1:H=0.9085,top10E=0.14,eRank=421.1,q75/q25=4.41 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.83 vo_prod:H=0.7485,top10E=0.23,eRank=151.2,q75/q25=1760.95 train_time:491108ms step_avg:76.74ms +[2025-09-02 09:32:33] [Rank 0] PRINT: step:6400/10000 val_loss:3.8026 svd_entropy: attn_qk:H=0.7705,top10E=0.26,eRank=171.9,q75/q25=57.64 attn_vo:H=0.8488,top10E=0.13,eRank=305.8,q75/q25=40.41 mlp_w1:H=0.9085,top10E=0.14,eRank=421.1,q75/q25=4.41 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.83 vo_prod:H=0.7485,top10E=0.23,eRank=151.2,q75/q25=1760.95 train_time:491108ms step_avg:76.74ms +[2025-09-02 09:32:33] [Rank 0] step:6401/10000 train_time:491122ms step_avg:76.73ms +[2025-09-02 09:32:33] [Rank 0] step:6401/10000 train_time:491122ms step_avg:76.73ms +[2025-09-02 09:32:35] [Rank 0] step:6421/10000 train_time:492662ms step_avg:76.73ms +[2025-09-02 09:32:35] [Rank 0] step:6421/10000 train_time:492662ms step_avg:76.73ms +[2025-09-02 09:32:37] [Rank 0] step:6441/10000 train_time:494269ms step_avg:76.74ms +[2025-09-02 09:32:37] [Rank 0] step:6441/10000 train_time:494269ms step_avg:76.74ms +[2025-09-02 09:32:38] [Rank 0] step:6461/10000 train_time:495881ms step_avg:76.75ms +[2025-09-02 09:32:38] [Rank 0] step:6461/10000 train_time:495881ms step_avg:76.75ms +[2025-09-02 09:32:40] [Rank 0] step:6481/10000 train_time:497500ms step_avg:76.76ms +[2025-09-02 09:32:40] [Rank 0] step:6481/10000 train_time:497500ms step_avg:76.76ms +[2025-09-02 09:32:42] [Rank 0] step:6501/10000 train_time:499106ms step_avg:76.77ms +[2025-09-02 09:32:42] [Rank 0] step:6501/10000 train_time:499106ms step_avg:76.77ms +[2025-09-02 09:32:43] [Rank 0] step:6521/10000 train_time:500712ms step_avg:76.78ms +[2025-09-02 09:32:43] [Rank 0] step:6521/10000 train_time:500712ms step_avg:76.78ms +[2025-09-02 09:32:45] [Rank 0] step:6541/10000 train_time:502322ms step_avg:76.80ms +[2025-09-02 09:32:45] [Rank 0] step:6541/10000 train_time:502322ms step_avg:76.80ms +[2025-09-02 09:32:46] [Rank 0] step:6561/10000 train_time:503936ms step_avg:76.81ms +[2025-09-02 09:32:46] [Rank 0] step:6561/10000 train_time:503936ms step_avg:76.81ms +[2025-09-02 09:32:48] [Rank 0] step:6581/10000 train_time:505543ms step_avg:76.82ms +[2025-09-02 09:32:48] [Rank 0] step:6581/10000 train_time:505543ms step_avg:76.82ms +[2025-09-02 09:32:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:32:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:33:01] [Rank 0] PRINT: step:6600/10000 val_loss:3.7883 svd_entropy: attn_qk:H=0.7714,top10E=0.26,eRank=172.8,q75/q25=57.23 attn_vo:H=0.8496,top10E=0.13,eRank=307.2,q75/q25=40.00 mlp_w1:H=0.9094,top10E=0.14,eRank=423.6,q75/q25=4.37 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.83 vo_prod:H=0.7499,top10E=0.23,eRank=152.5,q75/q25=1738.51 train_time:507238ms step_avg:76.85ms +[2025-09-02 09:33:01] [Rank 0] PRINT: step:6600/10000 val_loss:3.7883 svd_entropy: attn_qk:H=0.7714,top10E=0.26,eRank=172.8,q75/q25=57.23 attn_vo:H=0.8496,top10E=0.13,eRank=307.2,q75/q25=40.00 mlp_w1:H=0.9094,top10E=0.14,eRank=423.6,q75/q25=4.37 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.83 vo_prod:H=0.7499,top10E=0.23,eRank=152.5,q75/q25=1738.51 train_time:507238ms step_avg:76.85ms +[2025-09-02 09:33:01] [Rank 0] step:6601/10000 train_time:507252ms step_avg:76.84ms +[2025-09-02 09:33:01] [Rank 0] step:6601/10000 train_time:507252ms step_avg:76.84ms +[2025-09-02 09:33:03] [Rank 0] step:6621/10000 train_time:508793ms step_avg:76.85ms +[2025-09-02 09:33:03] [Rank 0] step:6621/10000 train_time:508793ms step_avg:76.85ms +[2025-09-02 09:33:05] [Rank 0] step:6641/10000 train_time:510409ms step_avg:76.86ms +[2025-09-02 09:33:05] [Rank 0] step:6641/10000 train_time:510409ms step_avg:76.86ms +[2025-09-02 09:33:06] [Rank 0] step:6661/10000 train_time:512019ms step_avg:76.87ms +[2025-09-02 09:33:06] [Rank 0] step:6661/10000 train_time:512019ms step_avg:76.87ms +[2025-09-02 09:33:08] [Rank 0] step:6681/10000 train_time:513642ms step_avg:76.88ms +[2025-09-02 09:33:08] [Rank 0] step:6681/10000 train_time:513642ms step_avg:76.88ms +[2025-09-02 09:33:10] [Rank 0] step:6701/10000 train_time:515294ms step_avg:76.90ms +[2025-09-02 09:33:10] [Rank 0] step:6701/10000 train_time:515294ms step_avg:76.90ms +[2025-09-02 09:33:11] [Rank 0] step:6721/10000 train_time:516936ms step_avg:76.91ms +[2025-09-02 09:33:11] [Rank 0] step:6721/10000 train_time:516936ms step_avg:76.91ms +[2025-09-02 09:33:13] [Rank 0] step:6741/10000 train_time:518572ms step_avg:76.93ms +[2025-09-02 09:33:13] [Rank 0] step:6741/10000 train_time:518572ms step_avg:76.93ms +[2025-09-02 09:33:14] [Rank 0] step:6761/10000 train_time:520207ms step_avg:76.94ms +[2025-09-02 09:33:14] [Rank 0] step:6761/10000 train_time:520207ms step_avg:76.94ms +[2025-09-02 09:33:16] [Rank 0] step:6781/10000 train_time:521847ms step_avg:76.96ms +[2025-09-02 09:33:16] [Rank 0] step:6781/10000 train_time:521847ms step_avg:76.96ms +[2025-09-02 09:33:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:33:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:33:29] [Rank 0] PRINT: step:6800/10000 val_loss:3.7714 svd_entropy: attn_qk:H=0.7721,top10E=0.26,eRank=173.6,q75/q25=56.53 attn_vo:H=0.8504,top10E=0.13,eRank=308.4,q75/q25=39.49 mlp_w1:H=0.9103,top10E=0.14,eRank=425.9,q75/q25=4.35 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.83 vo_prod:H=0.7511,top10E=0.23,eRank=153.7,q75/q25=1679.95 train_time:523572ms step_avg:77.00ms +[2025-09-02 09:33:29] [Rank 0] PRINT: step:6800/10000 val_loss:3.7714 svd_entropy: attn_qk:H=0.7721,top10E=0.26,eRank=173.6,q75/q25=56.53 attn_vo:H=0.8504,top10E=0.13,eRank=308.4,q75/q25=39.49 mlp_w1:H=0.9103,top10E=0.14,eRank=425.9,q75/q25=4.35 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.83 vo_prod:H=0.7511,top10E=0.23,eRank=153.7,q75/q25=1679.95 train_time:523572ms step_avg:77.00ms +[2025-09-02 09:33:30] [Rank 0] step:6801/10000 train_time:523585ms step_avg:76.99ms +[2025-09-02 09:33:30] [Rank 0] step:6801/10000 train_time:523585ms step_avg:76.99ms +[2025-09-02 09:33:31] [Rank 0] step:6821/10000 train_time:525148ms step_avg:76.99ms +[2025-09-02 09:33:31] [Rank 0] step:6821/10000 train_time:525148ms step_avg:76.99ms +[2025-09-02 09:33:33] [Rank 0] step:6841/10000 train_time:526782ms step_avg:77.00ms +[2025-09-02 09:33:33] [Rank 0] step:6841/10000 train_time:526782ms step_avg:77.00ms +[2025-09-02 09:33:34] [Rank 0] step:6861/10000 train_time:528418ms step_avg:77.02ms +[2025-09-02 09:33:34] [Rank 0] step:6861/10000 train_time:528418ms step_avg:77.02ms +[2025-09-02 09:33:36] [Rank 0] step:6881/10000 train_time:530057ms step_avg:77.03ms +[2025-09-02 09:33:36] [Rank 0] step:6881/10000 train_time:530057ms step_avg:77.03ms +[2025-09-02 09:33:38] [Rank 0] step:6901/10000 train_time:531695ms step_avg:77.05ms +[2025-09-02 09:33:38] [Rank 0] step:6901/10000 train_time:531695ms step_avg:77.05ms +[2025-09-02 09:33:39] [Rank 0] step:6921/10000 train_time:533330ms step_avg:77.06ms +[2025-09-02 09:33:39] [Rank 0] step:6921/10000 train_time:533330ms step_avg:77.06ms +[2025-09-02 09:33:41] [Rank 0] step:6941/10000 train_time:534970ms step_avg:77.07ms +[2025-09-02 09:33:41] [Rank 0] step:6941/10000 train_time:534970ms step_avg:77.07ms +[2025-09-02 09:33:43] [Rank 0] step:6961/10000 train_time:536624ms step_avg:77.09ms +[2025-09-02 09:33:43] [Rank 0] step:6961/10000 train_time:536624ms step_avg:77.09ms +[2025-09-02 09:33:44] [Rank 0] step:6981/10000 train_time:538268ms step_avg:77.10ms +[2025-09-02 09:33:44] [Rank 0] step:6981/10000 train_time:538268ms step_avg:77.10ms +[2025-09-02 09:33:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:33:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:33:58] [Rank 0] PRINT: step:7000/10000 val_loss:3.7544 svd_entropy: attn_qk:H=0.7729,top10E=0.26,eRank=174.4,q75/q25=56.25 attn_vo:H=0.8511,top10E=0.13,eRank=309.6,q75/q25=39.12 mlp_w1:H=0.9110,top10E=0.14,eRank=428.0,q75/q25=4.33 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.83 vo_prod:H=0.7523,top10E=0.23,eRank=154.9,q75/q25=1658.51 train_time:539998ms step_avg:77.14ms +[2025-09-02 09:33:58] [Rank 0] PRINT: step:7000/10000 val_loss:3.7544 svd_entropy: attn_qk:H=0.7729,top10E=0.26,eRank=174.4,q75/q25=56.25 attn_vo:H=0.8511,top10E=0.13,eRank=309.6,q75/q25=39.12 mlp_w1:H=0.9110,top10E=0.14,eRank=428.0,q75/q25=4.33 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.83 vo_prod:H=0.7523,top10E=0.23,eRank=154.9,q75/q25=1658.51 train_time:539998ms step_avg:77.14ms +[2025-09-02 09:33:58] [Rank 0] step:7001/10000 train_time:540012ms step_avg:77.13ms +[2025-09-02 09:33:58] [Rank 0] step:7001/10000 train_time:540012ms step_avg:77.13ms +[2025-09-02 09:33:59] [Rank 0] step:7021/10000 train_time:541577ms step_avg:77.14ms +[2025-09-02 09:33:59] [Rank 0] step:7021/10000 train_time:541577ms step_avg:77.14ms +[2025-09-02 09:34:01] [Rank 0] step:7041/10000 train_time:543211ms step_avg:77.15ms +[2025-09-02 09:34:01] [Rank 0] step:7041/10000 train_time:543211ms step_avg:77.15ms +[2025-09-02 09:34:03] [Rank 0] step:7061/10000 train_time:544853ms step_avg:77.16ms +[2025-09-02 09:34:03] [Rank 0] step:7061/10000 train_time:544853ms step_avg:77.16ms +[2025-09-02 09:34:04] [Rank 0] step:7081/10000 train_time:546488ms step_avg:77.18ms +[2025-09-02 09:34:04] [Rank 0] step:7081/10000 train_time:546488ms step_avg:77.18ms +[2025-09-02 09:34:06] [Rank 0] step:7101/10000 train_time:548128ms step_avg:77.19ms +[2025-09-02 09:34:06] [Rank 0] step:7101/10000 train_time:548128ms step_avg:77.19ms +[2025-09-02 09:34:08] [Rank 0] step:7121/10000 train_time:549765ms step_avg:77.20ms +[2025-09-02 09:34:08] [Rank 0] step:7121/10000 train_time:549765ms step_avg:77.20ms +[2025-09-02 09:34:09] [Rank 0] step:7141/10000 train_time:551402ms step_avg:77.22ms +[2025-09-02 09:34:09] [Rank 0] step:7141/10000 train_time:551402ms step_avg:77.22ms +[2025-09-02 09:34:11] [Rank 0] step:7161/10000 train_time:553038ms step_avg:77.23ms +[2025-09-02 09:34:11] [Rank 0] step:7161/10000 train_time:553038ms step_avg:77.23ms +[2025-09-02 09:34:12] [Rank 0] step:7181/10000 train_time:554679ms step_avg:77.24ms +[2025-09-02 09:34:12] [Rank 0] step:7181/10000 train_time:554679ms step_avg:77.24ms +[2025-09-02 09:34:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:34:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:34:26] [Rank 0] PRINT: step:7200/10000 val_loss:3.7443 svd_entropy: attn_qk:H=0.7736,top10E=0.26,eRank=175.2,q75/q25=55.95 attn_vo:H=0.8518,top10E=0.13,eRank=310.7,q75/q25=38.68 mlp_w1:H=0.9117,top10E=0.13,eRank=429.9,q75/q25=4.31 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.83 vo_prod:H=0.7535,top10E=0.22,eRank=156.1,q75/q25=1628.58 train_time:556402ms step_avg:77.28ms +[2025-09-02 09:34:26] [Rank 0] PRINT: step:7200/10000 val_loss:3.7443 svd_entropy: attn_qk:H=0.7736,top10E=0.26,eRank=175.2,q75/q25=55.95 attn_vo:H=0.8518,top10E=0.13,eRank=310.7,q75/q25=38.68 mlp_w1:H=0.9117,top10E=0.13,eRank=429.9,q75/q25=4.31 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.83 vo_prod:H=0.7535,top10E=0.22,eRank=156.1,q75/q25=1628.58 train_time:556402ms step_avg:77.28ms +[2025-09-02 09:34:26] [Rank 0] step:7201/10000 train_time:556417ms step_avg:77.27ms +[2025-09-02 09:34:26] [Rank 0] step:7201/10000 train_time:556417ms step_avg:77.27ms +[2025-09-02 09:34:28] [Rank 0] step:7221/10000 train_time:557978ms step_avg:77.27ms +[2025-09-02 09:34:28] [Rank 0] step:7221/10000 train_time:557978ms step_avg:77.27ms +[2025-09-02 09:34:29] [Rank 0] step:7241/10000 train_time:559611ms step_avg:77.28ms +[2025-09-02 09:34:29] [Rank 0] step:7241/10000 train_time:559611ms step_avg:77.28ms +[2025-09-02 09:34:31] [Rank 0] step:7261/10000 train_time:561241ms step_avg:77.30ms +[2025-09-02 09:34:31] [Rank 0] step:7261/10000 train_time:561241ms step_avg:77.30ms +[2025-09-02 09:34:32] [Rank 0] step:7281/10000 train_time:562883ms step_avg:77.31ms +[2025-09-02 09:34:32] [Rank 0] step:7281/10000 train_time:562883ms step_avg:77.31ms +[2025-09-02 09:34:34] [Rank 0] step:7301/10000 train_time:564518ms step_avg:77.32ms +[2025-09-02 09:34:34] [Rank 0] step:7301/10000 train_time:564518ms step_avg:77.32ms +[2025-09-02 09:34:36] [Rank 0] step:7321/10000 train_time:566164ms step_avg:77.33ms +[2025-09-02 09:34:36] [Rank 0] step:7321/10000 train_time:566164ms step_avg:77.33ms +[2025-09-02 09:34:37] [Rank 0] step:7341/10000 train_time:567802ms step_avg:77.35ms +[2025-09-02 09:34:37] [Rank 0] step:7341/10000 train_time:567802ms step_avg:77.35ms +[2025-09-02 09:34:39] [Rank 0] step:7361/10000 train_time:569445ms step_avg:77.36ms +[2025-09-02 09:34:39] [Rank 0] step:7361/10000 train_time:569445ms step_avg:77.36ms +[2025-09-02 09:34:41] [Rank 0] step:7381/10000 train_time:571088ms step_avg:77.37ms +[2025-09-02 09:34:41] [Rank 0] step:7381/10000 train_time:571088ms step_avg:77.37ms +[2025-09-02 09:34:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:34:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:34:54] [Rank 0] PRINT: step:7400/10000 val_loss:3.7260 svd_entropy: attn_qk:H=0.7742,top10E=0.26,eRank=175.7,q75/q25=55.47 attn_vo:H=0.8523,top10E=0.13,eRank=311.6,q75/q25=38.37 mlp_w1:H=0.9123,top10E=0.13,eRank=431.6,q75/q25=4.29 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.83 vo_prod:H=0.7544,top10E=0.22,eRank=156.9,q75/q25=1625.57 train_time:572795ms step_avg:77.40ms +[2025-09-02 09:34:54] [Rank 0] PRINT: step:7400/10000 val_loss:3.7260 svd_entropy: attn_qk:H=0.7742,top10E=0.26,eRank=175.7,q75/q25=55.47 attn_vo:H=0.8523,top10E=0.13,eRank=311.6,q75/q25=38.37 mlp_w1:H=0.9123,top10E=0.13,eRank=431.6,q75/q25=4.29 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.83 vo_prod:H=0.7544,top10E=0.22,eRank=156.9,q75/q25=1625.57 train_time:572795ms step_avg:77.40ms +[2025-09-02 09:34:54] [Rank 0] step:7401/10000 train_time:572810ms step_avg:77.40ms +[2025-09-02 09:34:54] [Rank 0] step:7401/10000 train_time:572810ms step_avg:77.40ms +[2025-09-02 09:34:56] [Rank 0] step:7421/10000 train_time:574374ms step_avg:77.40ms +[2025-09-02 09:34:56] [Rank 0] step:7421/10000 train_time:574374ms step_avg:77.40ms +[2025-09-02 09:34:57] [Rank 0] step:7441/10000 train_time:576012ms step_avg:77.41ms +[2025-09-02 09:34:57] [Rank 0] step:7441/10000 train_time:576012ms step_avg:77.41ms +[2025-09-02 09:34:59] [Rank 0] step:7461/10000 train_time:577650ms step_avg:77.42ms +[2025-09-02 09:34:59] [Rank 0] step:7461/10000 train_time:577650ms step_avg:77.42ms +[2025-09-02 09:35:01] [Rank 0] step:7481/10000 train_time:579296ms step_avg:77.44ms +[2025-09-02 09:35:01] [Rank 0] step:7481/10000 train_time:579296ms step_avg:77.44ms +[2025-09-02 09:35:02] [Rank 0] step:7501/10000 train_time:580940ms step_avg:77.45ms +[2025-09-02 09:35:02] [Rank 0] step:7501/10000 train_time:580940ms step_avg:77.45ms +[2025-09-02 09:35:04] [Rank 0] step:7521/10000 train_time:582586ms step_avg:77.46ms +[2025-09-02 09:35:04] [Rank 0] step:7521/10000 train_time:582586ms step_avg:77.46ms +[2025-09-02 09:35:06] [Rank 0] step:7541/10000 train_time:584241ms step_avg:77.48ms +[2025-09-02 09:35:06] [Rank 0] step:7541/10000 train_time:584241ms step_avg:77.48ms +[2025-09-02 09:35:07] [Rank 0] step:7561/10000 train_time:585869ms step_avg:77.49ms +[2025-09-02 09:35:07] [Rank 0] step:7561/10000 train_time:585869ms step_avg:77.49ms +[2025-09-02 09:35:09] [Rank 0] step:7581/10000 train_time:587522ms step_avg:77.50ms +[2025-09-02 09:35:09] [Rank 0] step:7581/10000 train_time:587522ms step_avg:77.50ms +[2025-09-02 09:35:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:35:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:35:22] [Rank 0] PRINT: step:7600/10000 val_loss:3.7188 svd_entropy: attn_qk:H=0.7748,top10E=0.26,eRank=176.4,q75/q25=55.01 attn_vo:H=0.8529,top10E=0.13,eRank=312.6,q75/q25=37.97 mlp_w1:H=0.9129,top10E=0.13,eRank=433.2,q75/q25=4.26 mlp_w2:H=0.9716,top10E=0.04,eRank=636.2,q75/q25=2.83 vo_prod:H=0.7553,top10E=0.22,eRank=157.9,q75/q25=1604.57 train_time:589257ms step_avg:77.53ms +[2025-09-02 09:35:22] [Rank 0] PRINT: step:7600/10000 val_loss:3.7188 svd_entropy: attn_qk:H=0.7748,top10E=0.26,eRank=176.4,q75/q25=55.01 attn_vo:H=0.8529,top10E=0.13,eRank=312.6,q75/q25=37.97 mlp_w1:H=0.9129,top10E=0.13,eRank=433.2,q75/q25=4.26 mlp_w2:H=0.9716,top10E=0.04,eRank=636.2,q75/q25=2.83 vo_prod:H=0.7553,top10E=0.22,eRank=157.9,q75/q25=1604.57 train_time:589257ms step_avg:77.53ms +[2025-09-02 09:35:22] [Rank 0] step:7601/10000 train_time:589271ms step_avg:77.53ms +[2025-09-02 09:35:22] [Rank 0] step:7601/10000 train_time:589271ms step_avg:77.53ms +[2025-09-02 09:35:24] [Rank 0] step:7621/10000 train_time:590848ms step_avg:77.53ms +[2025-09-02 09:35:24] [Rank 0] step:7621/10000 train_time:590848ms step_avg:77.53ms +[2025-09-02 09:35:26] [Rank 0] step:7641/10000 train_time:592485ms step_avg:77.54ms +[2025-09-02 09:35:26] [Rank 0] step:7641/10000 train_time:592485ms step_avg:77.54ms +[2025-09-02 09:35:27] [Rank 0] step:7661/10000 train_time:594125ms step_avg:77.55ms +[2025-09-02 09:35:27] [Rank 0] step:7661/10000 train_time:594125ms step_avg:77.55ms +[2025-09-02 09:35:29] [Rank 0] step:7681/10000 train_time:595760ms step_avg:77.56ms +[2025-09-02 09:35:29] [Rank 0] step:7681/10000 train_time:595760ms step_avg:77.56ms +[2025-09-02 09:35:31] [Rank 0] step:7701/10000 train_time:597397ms step_avg:77.57ms +[2025-09-02 09:35:31] [Rank 0] step:7701/10000 train_time:597397ms step_avg:77.57ms +[2025-09-02 09:35:32] [Rank 0] step:7721/10000 train_time:599051ms step_avg:77.59ms +[2025-09-02 09:35:32] [Rank 0] step:7721/10000 train_time:599051ms step_avg:77.59ms +[2025-09-02 09:35:34] [Rank 0] step:7741/10000 train_time:600694ms step_avg:77.60ms +[2025-09-02 09:35:34] [Rank 0] step:7741/10000 train_time:600694ms step_avg:77.60ms +[2025-09-02 09:35:36] [Rank 0] step:7761/10000 train_time:602391ms step_avg:77.62ms +[2025-09-02 09:35:36] [Rank 0] step:7761/10000 train_time:602391ms step_avg:77.62ms +[2025-09-02 09:35:37] [Rank 0] step:7781/10000 train_time:604037ms step_avg:77.63ms +[2025-09-02 09:35:37] [Rank 0] step:7781/10000 train_time:604037ms step_avg:77.63ms +[2025-09-02 09:35:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:35:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:35:51] [Rank 0] PRINT: step:7800/10000 val_loss:3.7049 svd_entropy: attn_qk:H=0.7753,top10E=0.26,eRank=176.9,q75/q25=54.78 attn_vo:H=0.8534,top10E=0.13,eRank=313.4,q75/q25=37.75 mlp_w1:H=0.9134,top10E=0.13,eRank=434.6,q75/q25=4.25 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.83 vo_prod:H=0.7562,top10E=0.22,eRank=158.8,q75/q25=1627.52 train_time:605774ms step_avg:77.66ms +[2025-09-02 09:35:51] [Rank 0] PRINT: step:7800/10000 val_loss:3.7049 svd_entropy: attn_qk:H=0.7753,top10E=0.26,eRank=176.9,q75/q25=54.78 attn_vo:H=0.8534,top10E=0.13,eRank=313.4,q75/q25=37.75 mlp_w1:H=0.9134,top10E=0.13,eRank=434.6,q75/q25=4.25 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.83 vo_prod:H=0.7562,top10E=0.22,eRank=158.8,q75/q25=1627.52 train_time:605774ms step_avg:77.66ms +[2025-09-02 09:35:51] [Rank 0] step:7801/10000 train_time:605788ms step_avg:77.66ms +[2025-09-02 09:35:51] [Rank 0] step:7801/10000 train_time:605788ms step_avg:77.66ms +[2025-09-02 09:35:52] [Rank 0] step:7821/10000 train_time:607351ms step_avg:77.66ms +[2025-09-02 09:35:52] [Rank 0] step:7821/10000 train_time:607351ms step_avg:77.66ms +[2025-09-02 09:35:54] [Rank 0] step:7841/10000 train_time:608989ms step_avg:77.67ms +[2025-09-02 09:35:54] [Rank 0] step:7841/10000 train_time:608989ms step_avg:77.67ms +[2025-09-02 09:35:56] [Rank 0] step:7861/10000 train_time:610638ms step_avg:77.68ms +[2025-09-02 09:35:56] [Rank 0] step:7861/10000 train_time:610638ms step_avg:77.68ms +[2025-09-02 09:35:57] [Rank 0] step:7881/10000 train_time:612290ms step_avg:77.69ms +[2025-09-02 09:35:57] [Rank 0] step:7881/10000 train_time:612290ms step_avg:77.69ms +[2025-09-02 09:35:59] [Rank 0] step:7901/10000 train_time:613931ms step_avg:77.70ms +[2025-09-02 09:35:59] [Rank 0] step:7901/10000 train_time:613931ms step_avg:77.70ms +[2025-09-02 09:36:01] [Rank 0] step:7921/10000 train_time:615580ms step_avg:77.71ms +[2025-09-02 09:36:01] [Rank 0] step:7921/10000 train_time:615580ms step_avg:77.71ms +[2025-09-02 09:36:02] [Rank 0] step:7941/10000 train_time:617233ms step_avg:77.73ms +[2025-09-02 09:36:02] [Rank 0] step:7941/10000 train_time:617233ms step_avg:77.73ms +[2025-09-02 09:36:04] [Rank 0] step:7961/10000 train_time:618883ms step_avg:77.74ms +[2025-09-02 09:36:04] [Rank 0] step:7961/10000 train_time:618883ms step_avg:77.74ms +[2025-09-02 09:36:06] [Rank 0] step:7981/10000 train_time:620524ms step_avg:77.75ms +[2025-09-02 09:36:06] [Rank 0] step:7981/10000 train_time:620524ms step_avg:77.75ms +[2025-09-02 09:36:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:36:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:36:19] [Rank 0] PRINT: step:8000/10000 val_loss:3.6895 svd_entropy: attn_qk:H=0.7757,top10E=0.26,eRank=177.4,q75/q25=54.55 attn_vo:H=0.8539,top10E=0.13,eRank=314.2,q75/q25=37.52 mlp_w1:H=0.9138,top10E=0.13,eRank=435.8,q75/q25=4.23 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.83 vo_prod:H=0.7571,top10E=0.22,eRank=159.7,q75/q25=1600.85 train_time:622258ms step_avg:77.78ms +[2025-09-02 09:36:19] [Rank 0] PRINT: step:8000/10000 val_loss:3.6895 svd_entropy: attn_qk:H=0.7757,top10E=0.26,eRank=177.4,q75/q25=54.55 attn_vo:H=0.8539,top10E=0.13,eRank=314.2,q75/q25=37.52 mlp_w1:H=0.9138,top10E=0.13,eRank=435.8,q75/q25=4.23 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.83 vo_prod:H=0.7571,top10E=0.22,eRank=159.7,q75/q25=1600.85 train_time:622258ms step_avg:77.78ms +[2025-09-02 09:36:19] [Rank 0] step:8001/10000 train_time:622272ms step_avg:77.77ms +[2025-09-02 09:36:19] [Rank 0] step:8001/10000 train_time:622272ms step_avg:77.77ms +[2025-09-02 09:36:21] [Rank 0] step:8021/10000 train_time:623843ms step_avg:77.78ms +[2025-09-02 09:36:21] [Rank 0] step:8021/10000 train_time:623843ms step_avg:77.78ms +[2025-09-02 09:36:22] [Rank 0] step:8041/10000 train_time:625492ms step_avg:77.79ms +[2025-09-02 09:36:22] [Rank 0] step:8041/10000 train_time:625492ms step_avg:77.79ms +[2025-09-02 09:36:24] [Rank 0] step:8061/10000 train_time:627133ms step_avg:77.80ms +[2025-09-02 09:36:24] [Rank 0] step:8061/10000 train_time:627133ms step_avg:77.80ms +[2025-09-02 09:36:26] [Rank 0] step:8081/10000 train_time:628768ms step_avg:77.81ms +[2025-09-02 09:36:26] [Rank 0] step:8081/10000 train_time:628768ms step_avg:77.81ms +[2025-09-02 09:36:27] [Rank 0] step:8101/10000 train_time:630419ms step_avg:77.82ms +[2025-09-02 09:36:27] [Rank 0] step:8101/10000 train_time:630419ms step_avg:77.82ms +[2025-09-02 09:36:29] [Rank 0] step:8121/10000 train_time:632065ms step_avg:77.83ms +[2025-09-02 09:36:29] [Rank 0] step:8121/10000 train_time:632065ms step_avg:77.83ms +[2025-09-02 09:36:31] [Rank 0] step:8141/10000 train_time:633883ms step_avg:77.86ms +[2025-09-02 09:36:31] [Rank 0] step:8141/10000 train_time:633883ms step_avg:77.86ms +[2025-09-02 09:36:32] [Rank 0] step:8161/10000 train_time:635540ms step_avg:77.88ms +[2025-09-02 09:36:32] [Rank 0] step:8161/10000 train_time:635540ms step_avg:77.88ms +[2025-09-02 09:36:34] [Rank 0] step:8181/10000 train_time:637212ms step_avg:77.89ms +[2025-09-02 09:36:34] [Rank 0] step:8181/10000 train_time:637212ms step_avg:77.89ms +[2025-09-02 09:36:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:36:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:36:47] [Rank 0] PRINT: step:8200/10000 val_loss:3.6792 svd_entropy: attn_qk:H=0.7761,top10E=0.26,eRank=177.8,q75/q25=54.38 attn_vo:H=0.8543,top10E=0.13,eRank=314.9,q75/q25=37.14 mlp_w1:H=0.9142,top10E=0.13,eRank=437.0,q75/q25=4.21 mlp_w2:H=0.9716,top10E=0.04,eRank=636.2,q75/q25=2.82 vo_prod:H=0.7579,top10E=0.22,eRank=160.6,q75/q25=1603.43 train_time:638994ms step_avg:77.93ms +[2025-09-02 09:36:47] [Rank 0] PRINT: step:8200/10000 val_loss:3.6792 svd_entropy: attn_qk:H=0.7761,top10E=0.26,eRank=177.8,q75/q25=54.38 attn_vo:H=0.8543,top10E=0.13,eRank=314.9,q75/q25=37.14 mlp_w1:H=0.9142,top10E=0.13,eRank=437.0,q75/q25=4.21 mlp_w2:H=0.9716,top10E=0.04,eRank=636.2,q75/q25=2.82 vo_prod:H=0.7579,top10E=0.22,eRank=160.6,q75/q25=1603.43 train_time:638994ms step_avg:77.93ms +[2025-09-02 09:36:47] [Rank 0] step:8201/10000 train_time:639008ms step_avg:77.92ms +[2025-09-02 09:36:47] [Rank 0] step:8201/10000 train_time:639008ms step_avg:77.92ms +[2025-09-02 09:36:49] [Rank 0] step:8221/10000 train_time:640604ms step_avg:77.92ms +[2025-09-02 09:36:49] [Rank 0] step:8221/10000 train_time:640604ms step_avg:77.92ms +[2025-09-02 09:36:51] [Rank 0] step:8241/10000 train_time:642280ms step_avg:77.94ms +[2025-09-02 09:36:51] [Rank 0] step:8241/10000 train_time:642280ms step_avg:77.94ms +[2025-09-02 09:36:53] [Rank 0] step:8261/10000 train_time:643954ms step_avg:77.95ms +[2025-09-02 09:36:53] [Rank 0] step:8261/10000 train_time:643954ms step_avg:77.95ms +[2025-09-02 09:36:54] [Rank 0] step:8281/10000 train_time:645623ms step_avg:77.96ms +[2025-09-02 09:36:54] [Rank 0] step:8281/10000 train_time:645623ms step_avg:77.96ms +[2025-09-02 09:36:56] [Rank 0] step:8301/10000 train_time:647293ms step_avg:77.98ms +[2025-09-02 09:36:56] [Rank 0] step:8301/10000 train_time:647293ms step_avg:77.98ms +[2025-09-02 09:36:58] [Rank 0] step:8321/10000 train_time:648956ms step_avg:77.99ms +[2025-09-02 09:36:58] [Rank 0] step:8321/10000 train_time:648956ms step_avg:77.99ms +[2025-09-02 09:36:59] [Rank 0] step:8341/10000 train_time:650631ms step_avg:78.00ms +[2025-09-02 09:36:59] [Rank 0] step:8341/10000 train_time:650631ms step_avg:78.00ms +[2025-09-02 09:37:01] [Rank 0] step:8361/10000 train_time:652306ms step_avg:78.02ms +[2025-09-02 09:37:01] [Rank 0] step:8361/10000 train_time:652306ms step_avg:78.02ms +[2025-09-02 09:37:03] [Rank 0] step:8381/10000 train_time:653972ms step_avg:78.03ms +[2025-09-02 09:37:03] [Rank 0] step:8381/10000 train_time:653972ms step_avg:78.03ms +[2025-09-02 09:37:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:37:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:37:16] [Rank 0] PRINT: step:8400/10000 val_loss:3.6674 svd_entropy: attn_qk:H=0.7764,top10E=0.26,eRank=178.2,q75/q25=53.99 attn_vo:H=0.8547,top10E=0.13,eRank=315.6,q75/q25=36.93 mlp_w1:H=0.9146,top10E=0.13,eRank=438.0,q75/q25=4.20 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.82 vo_prod:H=0.7587,top10E=0.22,eRank=161.4,q75/q25=1598.14 train_time:655726ms step_avg:78.06ms +[2025-09-02 09:37:16] [Rank 0] PRINT: step:8400/10000 val_loss:3.6674 svd_entropy: attn_qk:H=0.7764,top10E=0.26,eRank=178.2,q75/q25=53.99 attn_vo:H=0.8547,top10E=0.13,eRank=315.6,q75/q25=36.93 mlp_w1:H=0.9146,top10E=0.13,eRank=438.0,q75/q25=4.20 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.82 vo_prod:H=0.7587,top10E=0.22,eRank=161.4,q75/q25=1598.14 train_time:655726ms step_avg:78.06ms +[2025-09-02 09:37:16] [Rank 0] step:8401/10000 train_time:655739ms step_avg:78.05ms +[2025-09-02 09:37:16] [Rank 0] step:8401/10000 train_time:655739ms step_avg:78.05ms +[2025-09-02 09:37:18] [Rank 0] step:8421/10000 train_time:657340ms step_avg:78.06ms +[2025-09-02 09:37:18] [Rank 0] step:8421/10000 train_time:657340ms step_avg:78.06ms +[2025-09-02 09:37:19] [Rank 0] step:8441/10000 train_time:659004ms step_avg:78.07ms +[2025-09-02 09:37:19] [Rank 0] step:8441/10000 train_time:659004ms step_avg:78.07ms +[2025-09-02 09:37:21] [Rank 0] step:8461/10000 train_time:660672ms step_avg:78.08ms +[2025-09-02 09:37:21] [Rank 0] step:8461/10000 train_time:660672ms step_avg:78.08ms +[2025-09-02 09:37:23] [Rank 0] step:8481/10000 train_time:662345ms step_avg:78.10ms +[2025-09-02 09:37:23] [Rank 0] step:8481/10000 train_time:662345ms step_avg:78.10ms +[2025-09-02 09:37:24] [Rank 0] step:8501/10000 train_time:664036ms step_avg:78.11ms +[2025-09-02 09:37:24] [Rank 0] step:8501/10000 train_time:664036ms step_avg:78.11ms +[2025-09-02 09:37:26] [Rank 0] step:8521/10000 train_time:665710ms step_avg:78.13ms +[2025-09-02 09:37:26] [Rank 0] step:8521/10000 train_time:665710ms step_avg:78.13ms +[2025-09-02 09:37:28] [Rank 0] step:8541/10000 train_time:667391ms step_avg:78.14ms +[2025-09-02 09:37:28] [Rank 0] step:8541/10000 train_time:667391ms step_avg:78.14ms +[2025-09-02 09:37:29] [Rank 0] step:8561/10000 train_time:669062ms step_avg:78.15ms +[2025-09-02 09:37:29] [Rank 0] step:8561/10000 train_time:669062ms step_avg:78.15ms +[2025-09-02 09:37:31] [Rank 0] step:8581/10000 train_time:670735ms step_avg:78.17ms +[2025-09-02 09:37:31] [Rank 0] step:8581/10000 train_time:670735ms step_avg:78.17ms +[2025-09-02 09:37:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:37:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:37:44] [Rank 0] PRINT: step:8600/10000 val_loss:3.6574 svd_entropy: attn_qk:H=0.7767,top10E=0.26,eRank=178.5,q75/q25=53.61 attn_vo:H=0.8550,top10E=0.13,eRank=316.2,q75/q25=36.78 mlp_w1:H=0.9149,top10E=0.13,eRank=438.9,q75/q25=4.19 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.83 vo_prod:H=0.7593,top10E=0.22,eRank=162.0,q75/q25=1583.88 train_time:672482ms step_avg:78.20ms +[2025-09-02 09:37:44] [Rank 0] PRINT: step:8600/10000 val_loss:3.6574 svd_entropy: attn_qk:H=0.7767,top10E=0.26,eRank=178.5,q75/q25=53.61 attn_vo:H=0.8550,top10E=0.13,eRank=316.2,q75/q25=36.78 mlp_w1:H=0.9149,top10E=0.13,eRank=438.9,q75/q25=4.19 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.83 vo_prod:H=0.7593,top10E=0.22,eRank=162.0,q75/q25=1583.88 train_time:672482ms step_avg:78.20ms +[2025-09-02 09:37:45] [Rank 0] step:8601/10000 train_time:672496ms step_avg:78.19ms +[2025-09-02 09:37:45] [Rank 0] step:8601/10000 train_time:672496ms step_avg:78.19ms +[2025-09-02 09:37:46] [Rank 0] step:8621/10000 train_time:674094ms step_avg:78.19ms +[2025-09-02 09:37:46] [Rank 0] step:8621/10000 train_time:674094ms step_avg:78.19ms +[2025-09-02 09:37:48] [Rank 0] step:8641/10000 train_time:675764ms step_avg:78.20ms +[2025-09-02 09:37:48] [Rank 0] step:8641/10000 train_time:675764ms step_avg:78.20ms +[2025-09-02 09:37:50] [Rank 0] step:8661/10000 train_time:677434ms step_avg:78.22ms +[2025-09-02 09:37:50] [Rank 0] step:8661/10000 train_time:677434ms step_avg:78.22ms +[2025-09-02 09:37:51] [Rank 0] step:8681/10000 train_time:679106ms step_avg:78.23ms +[2025-09-02 09:37:51] [Rank 0] step:8681/10000 train_time:679106ms step_avg:78.23ms +[2025-09-02 09:37:53] [Rank 0] step:8701/10000 train_time:680769ms step_avg:78.24ms +[2025-09-02 09:37:53] [Rank 0] step:8701/10000 train_time:680769ms step_avg:78.24ms +[2025-09-02 09:37:55] [Rank 0] step:8721/10000 train_time:682442ms step_avg:78.25ms +[2025-09-02 09:37:55] [Rank 0] step:8721/10000 train_time:682442ms step_avg:78.25ms +[2025-09-02 09:37:56] [Rank 0] step:8741/10000 train_time:684102ms step_avg:78.26ms +[2025-09-02 09:37:56] [Rank 0] step:8741/10000 train_time:684102ms step_avg:78.26ms +[2025-09-02 09:37:58] [Rank 0] step:8761/10000 train_time:685774ms step_avg:78.28ms +[2025-09-02 09:37:58] [Rank 0] step:8761/10000 train_time:685774ms step_avg:78.28ms +[2025-09-02 09:38:00] [Rank 0] step:8781/10000 train_time:687451ms step_avg:78.29ms +[2025-09-02 09:38:00] [Rank 0] step:8781/10000 train_time:687451ms step_avg:78.29ms +[2025-09-02 09:38:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:38:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:38:13] [Rank 0] PRINT: step:8800/10000 val_loss:3.6480 svd_entropy: attn_qk:H=0.7770,top10E=0.26,eRank=178.8,q75/q25=53.59 attn_vo:H=0.8553,top10E=0.13,eRank=316.7,q75/q25=36.55 mlp_w1:H=0.9152,top10E=0.13,eRank=439.7,q75/q25=4.18 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.83 vo_prod:H=0.7599,top10E=0.22,eRank=162.7,q75/q25=1582.27 train_time:689213ms step_avg:78.32ms +[2025-09-02 09:38:13] [Rank 0] PRINT: step:8800/10000 val_loss:3.6480 svd_entropy: attn_qk:H=0.7770,top10E=0.26,eRank=178.8,q75/q25=53.59 attn_vo:H=0.8553,top10E=0.13,eRank=316.7,q75/q25=36.55 mlp_w1:H=0.9152,top10E=0.13,eRank=439.7,q75/q25=4.18 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.83 vo_prod:H=0.7599,top10E=0.22,eRank=162.7,q75/q25=1582.27 train_time:689213ms step_avg:78.32ms +[2025-09-02 09:38:13] [Rank 0] step:8801/10000 train_time:689227ms step_avg:78.31ms +[2025-09-02 09:38:13] [Rank 0] step:8801/10000 train_time:689227ms step_avg:78.31ms +[2025-09-02 09:38:15] [Rank 0] step:8821/10000 train_time:690820ms step_avg:78.32ms +[2025-09-02 09:38:15] [Rank 0] step:8821/10000 train_time:690820ms step_avg:78.32ms +[2025-09-02 09:38:16] [Rank 0] step:8841/10000 train_time:692510ms step_avg:78.33ms +[2025-09-02 09:38:16] [Rank 0] step:8841/10000 train_time:692510ms step_avg:78.33ms +[2025-09-02 09:38:18] [Rank 0] step:8861/10000 train_time:694179ms step_avg:78.34ms +[2025-09-02 09:38:18] [Rank 0] step:8861/10000 train_time:694179ms step_avg:78.34ms +[2025-09-02 09:38:20] [Rank 0] step:8881/10000 train_time:695852ms step_avg:78.35ms +[2025-09-02 09:38:20] [Rank 0] step:8881/10000 train_time:695852ms step_avg:78.35ms +[2025-09-02 09:38:21] [Rank 0] step:8901/10000 train_time:697524ms step_avg:78.36ms +[2025-09-02 09:38:21] [Rank 0] step:8901/10000 train_time:697524ms step_avg:78.36ms +[2025-09-02 09:38:23] [Rank 0] step:8921/10000 train_time:699210ms step_avg:78.38ms +[2025-09-02 09:38:23] [Rank 0] step:8921/10000 train_time:699210ms step_avg:78.38ms +[2025-09-02 09:38:25] [Rank 0] step:8941/10000 train_time:700889ms step_avg:78.39ms +[2025-09-02 09:38:25] [Rank 0] step:8941/10000 train_time:700889ms step_avg:78.39ms +[2025-09-02 09:38:26] [Rank 0] step:8961/10000 train_time:702556ms step_avg:78.40ms +[2025-09-02 09:38:26] [Rank 0] step:8961/10000 train_time:702556ms step_avg:78.40ms +[2025-09-02 09:38:28] [Rank 0] step:8981/10000 train_time:704227ms step_avg:78.41ms +[2025-09-02 09:38:28] [Rank 0] step:8981/10000 train_time:704227ms step_avg:78.41ms +[2025-09-02 09:38:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:38:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:38:41] [Rank 0] PRINT: step:9000/10000 val_loss:3.6388 svd_entropy: attn_qk:H=0.7772,top10E=0.26,eRank=179.0,q75/q25=53.54 attn_vo:H=0.8556,top10E=0.13,eRank=317.2,q75/q25=36.41 mlp_w1:H=0.9154,top10E=0.13,eRank=440.4,q75/q25=4.17 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.82 vo_prod:H=0.7605,top10E=0.22,eRank=163.2,q75/q25=1587.90 train_time:705983ms step_avg:78.44ms +[2025-09-02 09:38:41] [Rank 0] PRINT: step:9000/10000 val_loss:3.6388 svd_entropy: attn_qk:H=0.7772,top10E=0.26,eRank=179.0,q75/q25=53.54 attn_vo:H=0.8556,top10E=0.13,eRank=317.2,q75/q25=36.41 mlp_w1:H=0.9154,top10E=0.13,eRank=440.4,q75/q25=4.17 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.82 vo_prod:H=0.7605,top10E=0.22,eRank=163.2,q75/q25=1587.90 train_time:705983ms step_avg:78.44ms +[2025-09-02 09:38:41] [Rank 0] step:9001/10000 train_time:705998ms step_avg:78.44ms +[2025-09-02 09:38:41] [Rank 0] step:9001/10000 train_time:705998ms step_avg:78.44ms +[2025-09-02 09:38:43] [Rank 0] step:9021/10000 train_time:707601ms step_avg:78.44ms +[2025-09-02 09:38:43] [Rank 0] step:9021/10000 train_time:707601ms step_avg:78.44ms +[2025-09-02 09:38:45] [Rank 0] step:9041/10000 train_time:709278ms step_avg:78.45ms +[2025-09-02 09:38:45] [Rank 0] step:9041/10000 train_time:709278ms step_avg:78.45ms +[2025-09-02 09:38:46] [Rank 0] step:9061/10000 train_time:710958ms step_avg:78.46ms +[2025-09-02 09:38:46] [Rank 0] step:9061/10000 train_time:710958ms step_avg:78.46ms +[2025-09-02 09:38:48] [Rank 0] step:9081/10000 train_time:712644ms step_avg:78.48ms +[2025-09-02 09:38:48] [Rank 0] step:9081/10000 train_time:712644ms step_avg:78.48ms +[2025-09-02 09:38:50] [Rank 0] step:9101/10000 train_time:714334ms step_avg:78.49ms +[2025-09-02 09:38:50] [Rank 0] step:9101/10000 train_time:714334ms step_avg:78.49ms +[2025-09-02 09:38:52] [Rank 0] step:9121/10000 train_time:716011ms step_avg:78.50ms +[2025-09-02 09:38:52] [Rank 0] step:9121/10000 train_time:716011ms step_avg:78.50ms +[2025-09-02 09:38:53] [Rank 0] step:9141/10000 train_time:717677ms step_avg:78.51ms +[2025-09-02 09:38:53] [Rank 0] step:9141/10000 train_time:717677ms step_avg:78.51ms +[2025-09-02 09:38:55] [Rank 0] step:9161/10000 train_time:719348ms step_avg:78.52ms +[2025-09-02 09:38:55] [Rank 0] step:9161/10000 train_time:719348ms step_avg:78.52ms +[2025-09-02 09:38:57] [Rank 0] step:9181/10000 train_time:721055ms step_avg:78.54ms +[2025-09-02 09:38:57] [Rank 0] step:9181/10000 train_time:721055ms step_avg:78.54ms +[2025-09-02 09:38:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:38:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:39:10] [Rank 0] PRINT: step:9200/10000 val_loss:3.6305 svd_entropy: attn_qk:H=0.7774,top10E=0.26,eRank=179.2,q75/q25=53.31 attn_vo:H=0.8559,top10E=0.13,eRank=317.6,q75/q25=36.30 mlp_w1:H=0.9156,top10E=0.13,eRank=441.0,q75/q25=4.16 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.82 vo_prod:H=0.7609,top10E=0.22,eRank=163.7,q75/q25=1574.34 train_time:722808ms step_avg:78.57ms +[2025-09-02 09:39:10] [Rank 0] PRINT: step:9200/10000 val_loss:3.6305 svd_entropy: attn_qk:H=0.7774,top10E=0.26,eRank=179.2,q75/q25=53.31 attn_vo:H=0.8559,top10E=0.13,eRank=317.6,q75/q25=36.30 mlp_w1:H=0.9156,top10E=0.13,eRank=441.0,q75/q25=4.16 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.82 vo_prod:H=0.7609,top10E=0.22,eRank=163.7,q75/q25=1574.34 train_time:722808ms step_avg:78.57ms +[2025-09-02 09:39:10] [Rank 0] step:9201/10000 train_time:722822ms step_avg:78.56ms +[2025-09-02 09:39:10] [Rank 0] step:9201/10000 train_time:722822ms step_avg:78.56ms +[2025-09-02 09:39:12] [Rank 0] step:9221/10000 train_time:724422ms step_avg:78.56ms +[2025-09-02 09:39:12] [Rank 0] step:9221/10000 train_time:724422ms step_avg:78.56ms +[2025-09-02 09:39:13] [Rank 0] step:9241/10000 train_time:726103ms step_avg:78.57ms +[2025-09-02 09:39:13] [Rank 0] step:9241/10000 train_time:726103ms step_avg:78.57ms +[2025-09-02 09:39:15] [Rank 0] step:9261/10000 train_time:727785ms step_avg:78.59ms +[2025-09-02 09:39:15] [Rank 0] step:9261/10000 train_time:727785ms step_avg:78.59ms +[2025-09-02 09:39:17] [Rank 0] step:9281/10000 train_time:729451ms step_avg:78.60ms +[2025-09-02 09:39:17] [Rank 0] step:9281/10000 train_time:729451ms step_avg:78.60ms +[2025-09-02 09:39:18] [Rank 0] step:9301/10000 train_time:731128ms step_avg:78.61ms +[2025-09-02 09:39:18] [Rank 0] step:9301/10000 train_time:731128ms step_avg:78.61ms +[2025-09-02 09:39:20] [Rank 0] step:9321/10000 train_time:732808ms step_avg:78.62ms +[2025-09-02 09:39:20] [Rank 0] step:9321/10000 train_time:732808ms step_avg:78.62ms +[2025-09-02 09:39:22] [Rank 0] step:9341/10000 train_time:734483ms step_avg:78.63ms +[2025-09-02 09:39:22] [Rank 0] step:9341/10000 train_time:734483ms step_avg:78.63ms +[2025-09-02 09:39:23] [Rank 0] step:9361/10000 train_time:736167ms step_avg:78.64ms +[2025-09-02 09:39:23] [Rank 0] step:9361/10000 train_time:736167ms step_avg:78.64ms +[2025-09-02 09:39:25] [Rank 0] step:9381/10000 train_time:737854ms step_avg:78.65ms +[2025-09-02 09:39:25] [Rank 0] step:9381/10000 train_time:737854ms step_avg:78.65ms +[2025-09-02 09:39:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:39:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:39:39] [Rank 0] PRINT: step:9400/10000 val_loss:3.6227 svd_entropy: attn_qk:H=0.7776,top10E=0.26,eRank=179.3,q75/q25=53.24 attn_vo:H=0.8561,top10E=0.13,eRank=318.0,q75/q25=36.21 mlp_w1:H=0.9158,top10E=0.13,eRank=441.5,q75/q25=4.15 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.82 vo_prod:H=0.7613,top10E=0.21,eRank=164.1,q75/q25=1565.83 train_time:739624ms step_avg:78.68ms +[2025-09-02 09:39:39] [Rank 0] PRINT: step:9400/10000 val_loss:3.6227 svd_entropy: attn_qk:H=0.7776,top10E=0.26,eRank=179.3,q75/q25=53.24 attn_vo:H=0.8561,top10E=0.13,eRank=318.0,q75/q25=36.21 mlp_w1:H=0.9158,top10E=0.13,eRank=441.5,q75/q25=4.15 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.82 vo_prod:H=0.7613,top10E=0.21,eRank=164.1,q75/q25=1565.83 train_time:739624ms step_avg:78.68ms +[2025-09-02 09:39:39] [Rank 0] step:9401/10000 train_time:739638ms step_avg:78.68ms +[2025-09-02 09:39:39] [Rank 0] step:9401/10000 train_time:739638ms step_avg:78.68ms +[2025-09-02 09:39:40] [Rank 0] step:9421/10000 train_time:741232ms step_avg:78.68ms +[2025-09-02 09:39:40] [Rank 0] step:9421/10000 train_time:741232ms step_avg:78.68ms +[2025-09-02 09:39:42] [Rank 0] step:9441/10000 train_time:742907ms step_avg:78.69ms +[2025-09-02 09:39:42] [Rank 0] step:9441/10000 train_time:742907ms step_avg:78.69ms +[2025-09-02 09:39:44] [Rank 0] step:9461/10000 train_time:744586ms step_avg:78.70ms +[2025-09-02 09:39:44] [Rank 0] step:9461/10000 train_time:744586ms step_avg:78.70ms +[2025-09-02 09:39:45] [Rank 0] step:9481/10000 train_time:746262ms step_avg:78.71ms +[2025-09-02 09:39:45] [Rank 0] step:9481/10000 train_time:746262ms step_avg:78.71ms +[2025-09-02 09:39:47] [Rank 0] step:9501/10000 train_time:747953ms step_avg:78.72ms +[2025-09-02 09:39:47] [Rank 0] step:9501/10000 train_time:747953ms step_avg:78.72ms +[2025-09-02 09:39:49] [Rank 0] step:9521/10000 train_time:749627ms step_avg:78.73ms +[2025-09-02 09:39:49] [Rank 0] step:9521/10000 train_time:749627ms step_avg:78.73ms +[2025-09-02 09:39:50] [Rank 0] step:9541/10000 train_time:751303ms step_avg:78.74ms +[2025-09-02 09:39:50] [Rank 0] step:9541/10000 train_time:751303ms step_avg:78.74ms +[2025-09-02 09:39:52] [Rank 0] step:9561/10000 train_time:752973ms step_avg:78.75ms +[2025-09-02 09:39:52] [Rank 0] step:9561/10000 train_time:752973ms step_avg:78.75ms +[2025-09-02 09:39:54] [Rank 0] step:9581/10000 train_time:754646ms step_avg:78.76ms +[2025-09-02 09:39:54] [Rank 0] step:9581/10000 train_time:754646ms step_avg:78.76ms +[2025-09-02 09:39:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:39:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:40:07] [Rank 0] PRINT: step:9600/10000 val_loss:3.6165 svd_entropy: attn_qk:H=0.7777,top10E=0.26,eRank=179.5,q75/q25=53.14 attn_vo:H=0.8562,top10E=0.12,eRank=318.3,q75/q25=36.02 mlp_w1:H=0.9160,top10E=0.13,eRank=441.9,q75/q25=4.15 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.82 vo_prod:H=0.7617,top10E=0.21,eRank=164.5,q75/q25=1569.20 train_time:756418ms step_avg:78.79ms +[2025-09-02 09:40:07] [Rank 0] PRINT: step:9600/10000 val_loss:3.6165 svd_entropy: attn_qk:H=0.7777,top10E=0.26,eRank=179.5,q75/q25=53.14 attn_vo:H=0.8562,top10E=0.12,eRank=318.3,q75/q25=36.02 mlp_w1:H=0.9160,top10E=0.13,eRank=441.9,q75/q25=4.15 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.82 vo_prod:H=0.7617,top10E=0.21,eRank=164.5,q75/q25=1569.20 train_time:756418ms step_avg:78.79ms +[2025-09-02 09:40:07] [Rank 0] step:9601/10000 train_time:756433ms step_avg:78.79ms +[2025-09-02 09:40:07] [Rank 0] step:9601/10000 train_time:756433ms step_avg:78.79ms +[2025-09-02 09:40:09] [Rank 0] step:9621/10000 train_time:758031ms step_avg:78.79ms +[2025-09-02 09:40:09] [Rank 0] step:9621/10000 train_time:758031ms step_avg:78.79ms +[2025-09-02 09:40:11] [Rank 0] step:9641/10000 train_time:759711ms step_avg:78.80ms +[2025-09-02 09:40:11] [Rank 0] step:9641/10000 train_time:759711ms step_avg:78.80ms +[2025-09-02 09:40:12] [Rank 0] step:9661/10000 train_time:761414ms step_avg:78.81ms +[2025-09-02 09:40:12] [Rank 0] step:9661/10000 train_time:761414ms step_avg:78.81ms +[2025-09-02 09:40:14] [Rank 0] step:9681/10000 train_time:763108ms step_avg:78.83ms +[2025-09-02 09:40:14] [Rank 0] step:9681/10000 train_time:763108ms step_avg:78.83ms +[2025-09-02 09:40:16] [Rank 0] step:9701/10000 train_time:764820ms step_avg:78.84ms +[2025-09-02 09:40:16] [Rank 0] step:9701/10000 train_time:764820ms step_avg:78.84ms +[2025-09-02 09:40:17] [Rank 0] step:9721/10000 train_time:766512ms step_avg:78.85ms +[2025-09-02 09:40:17] [Rank 0] step:9721/10000 train_time:766512ms step_avg:78.85ms +[2025-09-02 09:40:19] [Rank 0] step:9741/10000 train_time:768232ms step_avg:78.87ms +[2025-09-02 09:40:19] [Rank 0] step:9741/10000 train_time:768232ms step_avg:78.87ms +[2025-09-02 09:40:21] [Rank 0] step:9761/10000 train_time:769934ms step_avg:78.88ms +[2025-09-02 09:40:21] [Rank 0] step:9761/10000 train_time:769934ms step_avg:78.88ms +[2025-09-02 09:40:23] [Rank 0] step:9781/10000 train_time:771641ms step_avg:78.89ms +[2025-09-02 09:40:23] [Rank 0] step:9781/10000 train_time:771641ms step_avg:78.89ms +[2025-09-02 09:40:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:40:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:40:36] [Rank 0] PRINT: step:9800/10000 val_loss:3.6092 svd_entropy: attn_qk:H=0.7778,top10E=0.26,eRank=179.6,q75/q25=53.03 attn_vo:H=0.8564,top10E=0.12,eRank=318.5,q75/q25=36.01 mlp_w1:H=0.9161,top10E=0.13,eRank=442.2,q75/q25=4.14 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.82 vo_prod:H=0.7620,top10E=0.21,eRank=164.8,q75/q25=1582.99 train_time:773442ms step_avg:78.92ms +[2025-09-02 09:40:36] [Rank 0] PRINT: step:9800/10000 val_loss:3.6092 svd_entropy: attn_qk:H=0.7778,top10E=0.26,eRank=179.6,q75/q25=53.03 attn_vo:H=0.8564,top10E=0.12,eRank=318.5,q75/q25=36.01 mlp_w1:H=0.9161,top10E=0.13,eRank=442.2,q75/q25=4.14 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.82 vo_prod:H=0.7620,top10E=0.21,eRank=164.8,q75/q25=1582.99 train_time:773442ms step_avg:78.92ms +[2025-09-02 09:40:36] [Rank 0] step:9801/10000 train_time:773457ms step_avg:78.92ms +[2025-09-02 09:40:36] [Rank 0] step:9801/10000 train_time:773457ms step_avg:78.92ms +[2025-09-02 09:40:38] [Rank 0] step:9821/10000 train_time:775087ms step_avg:78.92ms +[2025-09-02 09:40:38] [Rank 0] step:9821/10000 train_time:775087ms step_avg:78.92ms +[2025-09-02 09:40:40] [Rank 0] step:9841/10000 train_time:776799ms step_avg:78.93ms +[2025-09-02 09:40:40] [Rank 0] step:9841/10000 train_time:776799ms step_avg:78.93ms +[2025-09-02 09:40:41] [Rank 0] step:9861/10000 train_time:778489ms step_avg:78.95ms +[2025-09-02 09:40:41] [Rank 0] step:9861/10000 train_time:778489ms step_avg:78.95ms +[2025-09-02 09:40:43] [Rank 0] step:9881/10000 train_time:780178ms step_avg:78.96ms +[2025-09-02 09:40:43] [Rank 0] step:9881/10000 train_time:780178ms step_avg:78.96ms +[2025-09-02 09:40:45] [Rank 0] step:9901/10000 train_time:781878ms step_avg:78.97ms +[2025-09-02 09:40:45] [Rank 0] step:9901/10000 train_time:781878ms step_avg:78.97ms +[2025-09-02 09:40:46] [Rank 0] step:9921/10000 train_time:783578ms step_avg:78.98ms +[2025-09-02 09:40:46] [Rank 0] step:9921/10000 train_time:783578ms step_avg:78.98ms +[2025-09-02 09:40:48] [Rank 0] step:9941/10000 train_time:785280ms step_avg:78.99ms +[2025-09-02 09:40:48] [Rank 0] step:9941/10000 train_time:785280ms step_avg:78.99ms +[2025-09-02 09:40:50] [Rank 0] step:9961/10000 train_time:786980ms step_avg:79.01ms +[2025-09-02 09:40:50] [Rank 0] step:9961/10000 train_time:786980ms step_avg:79.01ms +[2025-09-02 09:40:51] [Rank 0] step:9981/10000 train_time:788723ms step_avg:79.02ms +[2025-09-02 09:40:51] [Rank 0] step:9981/10000 train_time:788723ms step_avg:79.02ms +[2025-09-02 09:40:53] [Rank 0] step:10000/10000 train_time:790342ms step_avg:79.03ms +[2025-09-02 09:40:53] [Rank 0] step:10000/10000 train_time:790342ms step_avg:79.03ms +[2025-09-02 09:40:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:40:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:41:05] [Rank 0] PRINT: step:10000/10000 val_loss:3.6035 svd_entropy: attn_qk:H=0.7778,top10E=0.26,eRank=179.6,q75/q25=53.03 attn_vo:H=0.8565,top10E=0.12,eRank=318.7,q75/q25=35.94 mlp_w1:H=0.9161,top10E=0.13,eRank=442.5,q75/q25=4.14 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.82 vo_prod:H=0.7622,top10E=0.21,eRank=165.0,q75/q25=1574.23 train_time:790525ms step_avg:79.05ms +[2025-09-02 09:41:05] [Rank 0] PRINT: step:10000/10000 val_loss:3.6035 svd_entropy: attn_qk:H=0.7778,top10E=0.26,eRank=179.6,q75/q25=53.03 attn_vo:H=0.8565,top10E=0.12,eRank=318.7,q75/q25=35.94 mlp_w1:H=0.9161,top10E=0.13,eRank=442.5,q75/q25=4.14 mlp_w2:H=0.9716,top10E=0.04,eRank=636.1,q75/q25=2.82 vo_prod:H=0.7622,top10E=0.21,eRank=165.0,q75/q25=1574.23 train_time:790525ms step_avg:79.05ms +[2025-09-02 09:41:05] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 09:41:05 2025 --- +[2025-09-02 09:41:05] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 09:41:05 2025 --- +[2025-09-02 09:41:05] [Rank 0] PRINT: Peak memory allocated: 10115 MiB reserved: 15076 MiB +[2025-09-02 09:41:05] [Rank 0] PRINT: Peak memory allocated: 10115 MiB reserved: 15076 MiB diff --git a/logs_svd_qkvo/mode_13_param_qkvo_seed_46/config.json b/logs_svd_qkvo/mode_13_param_qkvo_seed_46/config.json new file mode 100644 index 0000000000000000000000000000000000000000..0fc79a4e1946b1ba3748a022e5141a7431b7e401 --- /dev/null +++ b/logs_svd_qkvo/mode_13_param_qkvo_seed_46/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 46, + "optimizer_mode": 13, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "d5592959-1b90-4674-ab88-e1e8c69db221", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_13_param_qkvo_seed_46/training_log_d5592959-1b90-4674-ab88-e1e8c69db221.txt b/logs_svd_qkvo/mode_13_param_qkvo_seed_46/training_log_d5592959-1b90-4674-ab88-e1e8c69db221.txt new file mode 100644 index 0000000000000000000000000000000000000000..4a55844d71f53dac394a59ea66c49343ba5622d0 --- /dev/null +++ b/logs_svd_qkvo/mode_13_param_qkvo_seed_46/training_log_d5592959-1b90-4674-ab88-e1e8c69db221.txt @@ -0,0 +1,2984 @@ +[2025-09-02 13:36:21] [Rank 0] PRINT: --- Script Start: Tue Sep 2 13:36:21 2025 --- +[2025-09-02 13:36:21] [Rank 0] PRINT: --- Script Start: Tue Sep 2 13:36:21 2025 --- +[2025-09-02 13:36:22] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=13, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 13:36:22] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=13, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 13:36:22] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 13:36:22] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 13:36:22] [Rank 0] PRINT: Using fixed seed: 46 +[2025-09-02 13:36:22] [Rank 0] PRINT: Using fixed seed: 46 +[2025-09-02 13:36:22] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_13_param_qkvo_seed_46 +[2025-09-02 13:36:22] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_13_param_qkvo_seed_46 +[2025-09-02 13:36:22] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 13:36:22] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 13:36:22] [Rank 0] PRINT: Constructing model... +[2025-09-02 13:36:22] [Rank 0] PRINT: Constructing model... +[2025-09-02 13:36:23] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 13:36:23] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 13:36:23] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 13:36:23] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 13:36:23] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 13:36:23] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 13:36:23] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 13 +[2025-09-02 13:36:23] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 13 +[2025-09-02 13:36:23] [Rank 0] PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: 0.008). +[2025-09-02 13:36:23] [Rank 0] PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: 0.008). +[2025-09-02 13:36:23] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 13:36:23] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 13:36:23] [Rank 0] PRINT: Muon optimizer is active with 23 parameters. +[2025-09-02 13:36:23] [Rank 0] PRINT: Muon optimizer is active with 23 parameters. +[2025-09-02 13:36:23] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 13:36:23] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 13:36:23] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 13:36:23] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 13:36:23] [Rank 0] PRINT: Starting warmup... +[2025-09-02 13:36:23] [Rank 0] PRINT: Starting warmup... +[2025-09-02 13:44:52] [Rank 0] PRINT: Warmup complete. +[2025-09-02 13:44:52] [Rank 0] PRINT: Warmup complete. +[2025-09-02 13:44:53] [Rank 0] PRINT: Starting training... +[2025-09-02 13:44:53] [Rank 0] PRINT: Starting training... +[2025-09-02 13:44:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:44:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:52:04] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.27 attn_vo:H=0.4624,top10E=0.02,eRank=233.0,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.7,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 13:52:04] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.27 attn_vo:H=0.4624,top10E=0.02,eRank=233.0,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.7,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 13:52:05] [Rank 0] step:21/10000 train_time:1469ms step_avg:69.96ms +[2025-09-02 13:52:05] [Rank 0] step:21/10000 train_time:1469ms step_avg:69.96ms +[2025-09-02 13:52:07] [Rank 0] step:41/10000 train_time:2919ms step_avg:71.20ms +[2025-09-02 13:52:07] [Rank 0] step:41/10000 train_time:2919ms step_avg:71.20ms +[2025-09-02 13:52:08] [Rank 0] step:61/10000 train_time:4372ms step_avg:71.68ms +[2025-09-02 13:52:08] [Rank 0] step:61/10000 train_time:4372ms step_avg:71.68ms +[2025-09-02 13:52:10] [Rank 0] step:81/10000 train_time:5824ms step_avg:71.90ms +[2025-09-02 13:52:10] [Rank 0] step:81/10000 train_time:5824ms step_avg:71.90ms +[2025-09-02 13:52:11] [Rank 0] step:101/10000 train_time:7276ms step_avg:72.04ms +[2025-09-02 13:52:11] [Rank 0] step:101/10000 train_time:7276ms step_avg:72.04ms +[2025-09-02 13:52:13] [Rank 0] step:121/10000 train_time:8730ms step_avg:72.15ms +[2025-09-02 13:52:13] [Rank 0] step:121/10000 train_time:8730ms step_avg:72.15ms +[2025-09-02 13:52:14] [Rank 0] step:141/10000 train_time:10185ms step_avg:72.24ms +[2025-09-02 13:52:14] [Rank 0] step:141/10000 train_time:10185ms step_avg:72.24ms +[2025-09-02 13:52:16] [Rank 0] step:161/10000 train_time:11638ms step_avg:72.29ms +[2025-09-02 13:52:16] [Rank 0] step:161/10000 train_time:11638ms step_avg:72.29ms +[2025-09-02 13:52:17] [Rank 0] step:181/10000 train_time:13091ms step_avg:72.33ms +[2025-09-02 13:52:17] [Rank 0] step:181/10000 train_time:13091ms step_avg:72.33ms +[2025-09-02 13:52:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:52:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:52:30] [Rank 0] PRINT: step:200/10000 val_loss:6.2193 svd_entropy: attn_qk:H=0.6125,top10E=0.54,eRank=98.9,q75/q25=13.13 attn_vo:H=0.5176,top10E=0.57,eRank=77.3,q75/q25=inf mlp_w1:H=0.6638,top10E=0.51,eRank=98.9,q75/q25=2.96 mlp_w2:H=0.8071,top10E=0.18,eRank=218.1,q75/q25=16.72 vo_prod:H=0.3275,top10E=0.80,eRank=14.8,q75/q25=inf train_time:14621ms step_avg:73.10ms +[2025-09-02 13:52:30] [Rank 0] PRINT: step:200/10000 val_loss:6.2193 svd_entropy: attn_qk:H=0.6125,top10E=0.54,eRank=98.9,q75/q25=13.13 attn_vo:H=0.5176,top10E=0.57,eRank=77.3,q75/q25=inf mlp_w1:H=0.6638,top10E=0.51,eRank=98.9,q75/q25=2.96 mlp_w2:H=0.8071,top10E=0.18,eRank=218.1,q75/q25=16.72 vo_prod:H=0.3275,top10E=0.80,eRank=14.8,q75/q25=inf train_time:14621ms step_avg:73.10ms +[2025-09-02 13:52:30] [Rank 0] step:201/10000 train_time:14636ms step_avg:72.81ms +[2025-09-02 13:52:30] [Rank 0] step:201/10000 train_time:14636ms step_avg:72.81ms +[2025-09-02 13:52:32] [Rank 0] step:221/10000 train_time:16013ms step_avg:72.46ms +[2025-09-02 13:52:32] [Rank 0] step:221/10000 train_time:16013ms step_avg:72.46ms +[2025-09-02 13:52:33] [Rank 0] step:241/10000 train_time:17463ms step_avg:72.46ms +[2025-09-02 13:52:33] [Rank 0] step:241/10000 train_time:17463ms step_avg:72.46ms +[2025-09-02 13:52:35] [Rank 0] step:261/10000 train_time:18913ms step_avg:72.46ms +[2025-09-02 13:52:35] [Rank 0] step:261/10000 train_time:18913ms step_avg:72.46ms +[2025-09-02 13:52:36] [Rank 0] step:281/10000 train_time:20361ms step_avg:72.46ms +[2025-09-02 13:52:36] [Rank 0] step:281/10000 train_time:20361ms step_avg:72.46ms +[2025-09-02 13:52:38] [Rank 0] step:301/10000 train_time:21810ms step_avg:72.46ms +[2025-09-02 13:52:38] [Rank 0] step:301/10000 train_time:21810ms step_avg:72.46ms +[2025-09-02 13:52:39] [Rank 0] step:321/10000 train_time:23259ms step_avg:72.46ms +[2025-09-02 13:52:39] [Rank 0] step:321/10000 train_time:23259ms step_avg:72.46ms +[2025-09-02 13:52:40] [Rank 0] step:341/10000 train_time:24711ms step_avg:72.47ms +[2025-09-02 13:52:40] [Rank 0] step:341/10000 train_time:24711ms step_avg:72.47ms +[2025-09-02 13:52:42] [Rank 0] step:361/10000 train_time:26159ms step_avg:72.46ms +[2025-09-02 13:52:42] [Rank 0] step:361/10000 train_time:26159ms step_avg:72.46ms +[2025-09-02 13:52:43] [Rank 0] step:381/10000 train_time:27609ms step_avg:72.47ms +[2025-09-02 13:52:43] [Rank 0] step:381/10000 train_time:27609ms step_avg:72.47ms +[2025-09-02 13:52:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:52:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:52:57] [Rank 0] PRINT: step:400/10000 val_loss:5.7165 svd_entropy: attn_qk:H=0.6494,top10E=0.45,eRank=112.0,q75/q25=15.86 attn_vo:H=0.6034,top10E=0.41,eRank=106.8,q75/q25=inf mlp_w1:H=0.6875,top10E=0.41,eRank=118.0,q75/q25=4.62 mlp_w2:H=0.9310,top10E=0.06,eRank=486.8,q75/q25=6.23 vo_prod:H=0.4320,top10E=0.64,eRank=25.6,q75/q25=inf train_time:29134ms step_avg:72.84ms +[2025-09-02 13:52:57] [Rank 0] PRINT: step:400/10000 val_loss:5.7165 svd_entropy: attn_qk:H=0.6494,top10E=0.45,eRank=112.0,q75/q25=15.86 attn_vo:H=0.6034,top10E=0.41,eRank=106.8,q75/q25=inf mlp_w1:H=0.6875,top10E=0.41,eRank=118.0,q75/q25=4.62 mlp_w2:H=0.9310,top10E=0.06,eRank=486.8,q75/q25=6.23 vo_prod:H=0.4320,top10E=0.64,eRank=25.6,q75/q25=inf train_time:29134ms step_avg:72.84ms +[2025-09-02 13:52:57] [Rank 0] step:401/10000 train_time:29150ms step_avg:72.69ms +[2025-09-02 13:52:57] [Rank 0] step:401/10000 train_time:29150ms step_avg:72.69ms +[2025-09-02 13:52:58] [Rank 0] step:421/10000 train_time:30527ms step_avg:72.51ms +[2025-09-02 13:52:58] [Rank 0] step:421/10000 train_time:30527ms step_avg:72.51ms +[2025-09-02 13:53:00] [Rank 0] step:441/10000 train_time:31974ms step_avg:72.50ms +[2025-09-02 13:53:00] [Rank 0] step:441/10000 train_time:31974ms step_avg:72.50ms +[2025-09-02 13:53:01] [Rank 0] step:461/10000 train_time:33423ms step_avg:72.50ms +[2025-09-02 13:53:01] [Rank 0] step:461/10000 train_time:33423ms step_avg:72.50ms +[2025-09-02 13:53:03] [Rank 0] step:481/10000 train_time:34871ms step_avg:72.50ms +[2025-09-02 13:53:03] [Rank 0] step:481/10000 train_time:34871ms step_avg:72.50ms +[2025-09-02 13:53:04] [Rank 0] step:501/10000 train_time:36322ms step_avg:72.50ms +[2025-09-02 13:53:04] [Rank 0] step:501/10000 train_time:36322ms step_avg:72.50ms +[2025-09-02 13:53:05] [Rank 0] step:521/10000 train_time:37773ms step_avg:72.50ms +[2025-09-02 13:53:05] [Rank 0] step:521/10000 train_time:37773ms step_avg:72.50ms +[2025-09-02 13:53:07] [Rank 0] step:541/10000 train_time:39222ms step_avg:72.50ms +[2025-09-02 13:53:07] [Rank 0] step:541/10000 train_time:39222ms step_avg:72.50ms +[2025-09-02 13:53:08] [Rank 0] step:561/10000 train_time:40671ms step_avg:72.50ms +[2025-09-02 13:53:08] [Rank 0] step:561/10000 train_time:40671ms step_avg:72.50ms +[2025-09-02 13:53:10] [Rank 0] step:581/10000 train_time:42122ms step_avg:72.50ms +[2025-09-02 13:53:10] [Rank 0] step:581/10000 train_time:42122ms step_avg:72.50ms +[2025-09-02 13:53:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:53:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:53:23] [Rank 0] PRINT: step:600/10000 val_loss:5.4243 svd_entropy: attn_qk:H=0.6767,top10E=0.39,eRank=123.4,q75/q25=20.37 attn_vo:H=0.6501,top10E=0.34,eRank=131.1,q75/q25=inf mlp_w1:H=0.7307,top10E=0.35,eRank=148.3,q75/q25=6.36 mlp_w2:H=0.9492,top10E=0.05,eRank=548.7,q75/q25=4.47 vo_prod:H=0.4852,top10E=0.54,eRank=35.3,q75/q25=inf train_time:43646ms step_avg:72.74ms +[2025-09-02 13:53:23] [Rank 0] PRINT: step:600/10000 val_loss:5.4243 svd_entropy: attn_qk:H=0.6767,top10E=0.39,eRank=123.4,q75/q25=20.37 attn_vo:H=0.6501,top10E=0.34,eRank=131.1,q75/q25=inf mlp_w1:H=0.7307,top10E=0.35,eRank=148.3,q75/q25=6.36 mlp_w2:H=0.9492,top10E=0.05,eRank=548.7,q75/q25=4.47 vo_prod:H=0.4852,top10E=0.54,eRank=35.3,q75/q25=inf train_time:43646ms step_avg:72.74ms +[2025-09-02 13:53:23] [Rank 0] step:601/10000 train_time:43660ms step_avg:72.65ms +[2025-09-02 13:53:23] [Rank 0] step:601/10000 train_time:43660ms step_avg:72.65ms +[2025-09-02 13:53:24] [Rank 0] step:621/10000 train_time:45041ms step_avg:72.53ms +[2025-09-02 13:53:24] [Rank 0] step:621/10000 train_time:45041ms step_avg:72.53ms +[2025-09-02 13:53:26] [Rank 0] step:641/10000 train_time:46490ms step_avg:72.53ms +[2025-09-02 13:53:26] [Rank 0] step:641/10000 train_time:46490ms step_avg:72.53ms +[2025-09-02 13:53:27] [Rank 0] step:661/10000 train_time:47939ms step_avg:72.52ms +[2025-09-02 13:53:27] [Rank 0] step:661/10000 train_time:47939ms step_avg:72.52ms +[2025-09-02 13:53:29] [Rank 0] step:681/10000 train_time:49388ms step_avg:72.52ms +[2025-09-02 13:53:29] [Rank 0] step:681/10000 train_time:49388ms step_avg:72.52ms +[2025-09-02 13:53:30] [Rank 0] step:701/10000 train_time:50839ms step_avg:72.52ms +[2025-09-02 13:53:30] [Rank 0] step:701/10000 train_time:50839ms step_avg:72.52ms +[2025-09-02 13:53:32] [Rank 0] step:721/10000 train_time:52289ms step_avg:72.52ms +[2025-09-02 13:53:32] [Rank 0] step:721/10000 train_time:52289ms step_avg:72.52ms +[2025-09-02 13:53:33] [Rank 0] step:741/10000 train_time:53739ms step_avg:72.52ms +[2025-09-02 13:53:33] [Rank 0] step:741/10000 train_time:53739ms step_avg:72.52ms +[2025-09-02 13:53:35] [Rank 0] step:761/10000 train_time:55201ms step_avg:72.54ms +[2025-09-02 13:53:35] [Rank 0] step:761/10000 train_time:55201ms step_avg:72.54ms +[2025-09-02 13:53:36] [Rank 0] step:781/10000 train_time:56665ms step_avg:72.55ms +[2025-09-02 13:53:36] [Rank 0] step:781/10000 train_time:56665ms step_avg:72.55ms +[2025-09-02 13:53:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:53:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:53:49] [Rank 0] PRINT: step:800/10000 val_loss:5.1911 svd_entropy: attn_qk:H=0.6952,top10E=0.36,eRank=132.3,q75/q25=26.54 attn_vo:H=0.6813,top10E=0.29,eRank=151.6,q75/q25=inf mlp_w1:H=0.7625,top10E=0.31,eRank=175.9,q75/q25=7.27 mlp_w2:H=0.9554,top10E=0.05,eRank=571.7,q75/q25=3.97 vo_prod:H=0.5223,top10E=0.46,eRank=44.8,q75/q25=inf train_time:58203ms step_avg:72.75ms +[2025-09-02 13:53:49] [Rank 0] PRINT: step:800/10000 val_loss:5.1911 svd_entropy: attn_qk:H=0.6952,top10E=0.36,eRank=132.3,q75/q25=26.54 attn_vo:H=0.6813,top10E=0.29,eRank=151.6,q75/q25=inf mlp_w1:H=0.7625,top10E=0.31,eRank=175.9,q75/q25=7.27 mlp_w2:H=0.9554,top10E=0.05,eRank=571.7,q75/q25=3.97 vo_prod:H=0.5223,top10E=0.46,eRank=44.8,q75/q25=inf train_time:58203ms step_avg:72.75ms +[2025-09-02 13:53:49] [Rank 0] step:801/10000 train_time:58218ms step_avg:72.68ms +[2025-09-02 13:53:49] [Rank 0] step:801/10000 train_time:58218ms step_avg:72.68ms +[2025-09-02 13:53:51] [Rank 0] step:821/10000 train_time:59720ms step_avg:72.74ms +[2025-09-02 13:53:51] [Rank 0] step:821/10000 train_time:59720ms step_avg:72.74ms +[2025-09-02 13:53:52] [Rank 0] step:841/10000 train_time:61184ms step_avg:72.75ms +[2025-09-02 13:53:52] [Rank 0] step:841/10000 train_time:61184ms step_avg:72.75ms +[2025-09-02 13:53:54] [Rank 0] step:861/10000 train_time:62670ms step_avg:72.79ms +[2025-09-02 13:53:54] [Rank 0] step:861/10000 train_time:62670ms step_avg:72.79ms +[2025-09-02 13:53:55] [Rank 0] step:881/10000 train_time:64134ms step_avg:72.80ms +[2025-09-02 13:53:55] [Rank 0] step:881/10000 train_time:64134ms step_avg:72.80ms +[2025-09-02 13:53:57] [Rank 0] step:901/10000 train_time:65697ms step_avg:72.92ms +[2025-09-02 13:53:57] [Rank 0] step:901/10000 train_time:65697ms step_avg:72.92ms +[2025-09-02 13:53:58] [Rank 0] step:921/10000 train_time:67161ms step_avg:72.92ms +[2025-09-02 13:53:58] [Rank 0] step:921/10000 train_time:67161ms step_avg:72.92ms +[2025-09-02 13:54:00] [Rank 0] step:941/10000 train_time:68627ms step_avg:72.93ms +[2025-09-02 13:54:00] [Rank 0] step:941/10000 train_time:68627ms step_avg:72.93ms +[2025-09-02 13:54:01] [Rank 0] step:961/10000 train_time:70091ms step_avg:72.94ms +[2025-09-02 13:54:01] [Rank 0] step:961/10000 train_time:70091ms step_avg:72.94ms +[2025-09-02 13:54:03] [Rank 0] step:981/10000 train_time:71555ms step_avg:72.94ms +[2025-09-02 13:54:03] [Rank 0] step:981/10000 train_time:71555ms step_avg:72.94ms +[2025-09-02 13:54:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:54:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:54:16] [Rank 0] PRINT: step:1000/10000 val_loss:5.0124 svd_entropy: attn_qk:H=0.7097,top10E=0.34,eRank=140.1,q75/q25=32.95 attn_vo:H=0.7047,top10E=0.26,eRank=169.6,q75/q25=inf mlp_w1:H=0.7871,top10E=0.28,eRank=201.6,q75/q25=7.51 mlp_w2:H=0.9599,top10E=0.04,eRank=588.8,q75/q25=3.63 vo_prod:H=0.5507,top10E=0.41,eRank=53.9,q75/q25=inf train_time:73095ms step_avg:73.10ms +[2025-09-02 13:54:16] [Rank 0] PRINT: step:1000/10000 val_loss:5.0124 svd_entropy: attn_qk:H=0.7097,top10E=0.34,eRank=140.1,q75/q25=32.95 attn_vo:H=0.7047,top10E=0.26,eRank=169.6,q75/q25=inf mlp_w1:H=0.7871,top10E=0.28,eRank=201.6,q75/q25=7.51 mlp_w2:H=0.9599,top10E=0.04,eRank=588.8,q75/q25=3.63 vo_prod:H=0.5507,top10E=0.41,eRank=53.9,q75/q25=inf train_time:73095ms step_avg:73.10ms +[2025-09-02 13:54:16] [Rank 0] step:1001/10000 train_time:73111ms step_avg:73.04ms +[2025-09-02 13:54:16] [Rank 0] step:1001/10000 train_time:73111ms step_avg:73.04ms +[2025-09-02 13:54:17] [Rank 0] step:1021/10000 train_time:74523ms step_avg:72.99ms +[2025-09-02 13:54:17] [Rank 0] step:1021/10000 train_time:74523ms step_avg:72.99ms +[2025-09-02 13:54:19] [Rank 0] step:1041/10000 train_time:75983ms step_avg:72.99ms +[2025-09-02 13:54:19] [Rank 0] step:1041/10000 train_time:75983ms step_avg:72.99ms +[2025-09-02 13:54:20] [Rank 0] step:1061/10000 train_time:77446ms step_avg:72.99ms +[2025-09-02 13:54:20] [Rank 0] step:1061/10000 train_time:77446ms step_avg:72.99ms +[2025-09-02 13:54:22] [Rank 0] step:1081/10000 train_time:78909ms step_avg:73.00ms +[2025-09-02 13:54:22] [Rank 0] step:1081/10000 train_time:78909ms step_avg:73.00ms +[2025-09-02 13:54:23] [Rank 0] step:1101/10000 train_time:80372ms step_avg:73.00ms +[2025-09-02 13:54:23] [Rank 0] step:1101/10000 train_time:80372ms step_avg:73.00ms +[2025-09-02 13:54:25] [Rank 0] step:1121/10000 train_time:81836ms step_avg:73.00ms +[2025-09-02 13:54:25] [Rank 0] step:1121/10000 train_time:81836ms step_avg:73.00ms +[2025-09-02 13:54:26] [Rank 0] step:1141/10000 train_time:83300ms step_avg:73.01ms +[2025-09-02 13:54:26] [Rank 0] step:1141/10000 train_time:83300ms step_avg:73.01ms +[2025-09-02 13:54:28] [Rank 0] step:1161/10000 train_time:84763ms step_avg:73.01ms +[2025-09-02 13:54:28] [Rank 0] step:1161/10000 train_time:84763ms step_avg:73.01ms +[2025-09-02 13:54:29] [Rank 0] step:1181/10000 train_time:86227ms step_avg:73.01ms +[2025-09-02 13:54:29] [Rank 0] step:1181/10000 train_time:86227ms step_avg:73.01ms +[2025-09-02 13:54:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:54:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:54:42] [Rank 0] PRINT: step:1200/10000 val_loss:4.8289 svd_entropy: attn_qk:H=0.7206,top10E=0.32,eRank=146.8,q75/q25=38.66 attn_vo:H=0.7231,top10E=0.23,eRank=186.1,q75/q25=inf mlp_w1:H=0.8078,top10E=0.25,eRank=226.9,q75/q25=7.44 mlp_w2:H=0.9629,top10E=0.04,eRank=600.6,q75/q25=3.42 vo_prod:H=0.5706,top10E=0.37,eRank=61.6,q75/q25=inf train_time:87766ms step_avg:73.14ms +[2025-09-02 13:54:42] [Rank 0] PRINT: step:1200/10000 val_loss:4.8289 svd_entropy: attn_qk:H=0.7206,top10E=0.32,eRank=146.8,q75/q25=38.66 attn_vo:H=0.7231,top10E=0.23,eRank=186.1,q75/q25=inf mlp_w1:H=0.8078,top10E=0.25,eRank=226.9,q75/q25=7.44 mlp_w2:H=0.9629,top10E=0.04,eRank=600.6,q75/q25=3.42 vo_prod:H=0.5706,top10E=0.37,eRank=61.6,q75/q25=inf train_time:87766ms step_avg:73.14ms +[2025-09-02 13:54:43] [Rank 0] step:1201/10000 train_time:87781ms step_avg:73.09ms +[2025-09-02 13:54:43] [Rank 0] step:1201/10000 train_time:87781ms step_avg:73.09ms +[2025-09-02 13:54:44] [Rank 0] step:1221/10000 train_time:89196ms step_avg:73.05ms +[2025-09-02 13:54:44] [Rank 0] step:1221/10000 train_time:89196ms step_avg:73.05ms +[2025-09-02 13:54:45] [Rank 0] step:1241/10000 train_time:90656ms step_avg:73.05ms +[2025-09-02 13:54:45] [Rank 0] step:1241/10000 train_time:90656ms step_avg:73.05ms +[2025-09-02 13:54:47] [Rank 0] step:1261/10000 train_time:92117ms step_avg:73.05ms +[2025-09-02 13:54:47] [Rank 0] step:1261/10000 train_time:92117ms step_avg:73.05ms +[2025-09-02 13:54:48] [Rank 0] step:1281/10000 train_time:93580ms step_avg:73.05ms +[2025-09-02 13:54:48] [Rank 0] step:1281/10000 train_time:93580ms step_avg:73.05ms +[2025-09-02 13:54:50] [Rank 0] step:1301/10000 train_time:95043ms step_avg:73.05ms +[2025-09-02 13:54:50] [Rank 0] step:1301/10000 train_time:95043ms step_avg:73.05ms +[2025-09-02 13:54:51] [Rank 0] step:1321/10000 train_time:96507ms step_avg:73.06ms +[2025-09-02 13:54:51] [Rank 0] step:1321/10000 train_time:96507ms step_avg:73.06ms +[2025-09-02 13:54:53] [Rank 0] step:1341/10000 train_time:97969ms step_avg:73.06ms +[2025-09-02 13:54:53] [Rank 0] step:1341/10000 train_time:97969ms step_avg:73.06ms +[2025-09-02 13:54:54] [Rank 0] step:1361/10000 train_time:99503ms step_avg:73.11ms +[2025-09-02 13:54:54] [Rank 0] step:1361/10000 train_time:99503ms step_avg:73.11ms +[2025-09-02 13:54:56] [Rank 0] step:1381/10000 train_time:100968ms step_avg:73.11ms +[2025-09-02 13:54:56] [Rank 0] step:1381/10000 train_time:100968ms step_avg:73.11ms +[2025-09-02 13:54:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:54:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:55:09] [Rank 0] PRINT: step:1400/10000 val_loss:4.7182 svd_entropy: attn_qk:H=0.7291,top10E=0.31,eRank=152.5,q75/q25=43.62 attn_vo:H=0.7373,top10E=0.22,eRank=200.2,q75/q25=inf mlp_w1:H=0.8234,top10E=0.23,eRank=248.5,q75/q25=7.25 mlp_w2:H=0.9650,top10E=0.04,eRank=609.0,q75/q25=3.28 vo_prod:H=0.5858,top10E=0.34,eRank=68.2,q75/q25=inf train_time:102507ms step_avg:73.22ms +[2025-09-02 13:55:09] [Rank 0] PRINT: step:1400/10000 val_loss:4.7182 svd_entropy: attn_qk:H=0.7291,top10E=0.31,eRank=152.5,q75/q25=43.62 attn_vo:H=0.7373,top10E=0.22,eRank=200.2,q75/q25=inf mlp_w1:H=0.8234,top10E=0.23,eRank=248.5,q75/q25=7.25 mlp_w2:H=0.9650,top10E=0.04,eRank=609.0,q75/q25=3.28 vo_prod:H=0.5858,top10E=0.34,eRank=68.2,q75/q25=inf train_time:102507ms step_avg:73.22ms +[2025-09-02 13:55:09] [Rank 0] step:1401/10000 train_time:102521ms step_avg:73.18ms +[2025-09-02 13:55:09] [Rank 0] step:1401/10000 train_time:102521ms step_avg:73.18ms +[2025-09-02 13:55:11] [Rank 0] step:1421/10000 train_time:103928ms step_avg:73.14ms +[2025-09-02 13:55:11] [Rank 0] step:1421/10000 train_time:103928ms step_avg:73.14ms +[2025-09-02 13:55:12] [Rank 0] step:1441/10000 train_time:105391ms step_avg:73.14ms +[2025-09-02 13:55:12] [Rank 0] step:1441/10000 train_time:105391ms step_avg:73.14ms +[2025-09-02 13:55:14] [Rank 0] step:1461/10000 train_time:106852ms step_avg:73.14ms +[2025-09-02 13:55:14] [Rank 0] step:1461/10000 train_time:106852ms step_avg:73.14ms +[2025-09-02 13:55:15] [Rank 0] step:1481/10000 train_time:108314ms step_avg:73.14ms +[2025-09-02 13:55:15] [Rank 0] step:1481/10000 train_time:108314ms step_avg:73.14ms +[2025-09-02 13:55:17] [Rank 0] step:1501/10000 train_time:109788ms step_avg:73.14ms +[2025-09-02 13:55:17] [Rank 0] step:1501/10000 train_time:109788ms step_avg:73.14ms +[2025-09-02 13:55:18] [Rank 0] step:1521/10000 train_time:111264ms step_avg:73.15ms +[2025-09-02 13:55:18] [Rank 0] step:1521/10000 train_time:111264ms step_avg:73.15ms +[2025-09-02 13:55:20] [Rank 0] step:1541/10000 train_time:112738ms step_avg:73.16ms +[2025-09-02 13:55:20] [Rank 0] step:1541/10000 train_time:112738ms step_avg:73.16ms +[2025-09-02 13:55:21] [Rank 0] step:1561/10000 train_time:114213ms step_avg:73.17ms +[2025-09-02 13:55:21] [Rank 0] step:1561/10000 train_time:114213ms step_avg:73.17ms +[2025-09-02 13:55:22] [Rank 0] step:1581/10000 train_time:115689ms step_avg:73.17ms +[2025-09-02 13:55:22] [Rank 0] step:1581/10000 train_time:115689ms step_avg:73.17ms +[2025-09-02 13:55:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:55:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:55:36] [Rank 0] PRINT: step:1600/10000 val_loss:4.5863 svd_entropy: attn_qk:H=0.7357,top10E=0.30,eRank=157.0,q75/q25=47.68 attn_vo:H=0.7488,top10E=0.20,eRank=212.5,q75/q25=inf mlp_w1:H=0.8358,top10E=0.22,eRank=267.7,q75/q25=6.99 mlp_w2:H=0.9666,top10E=0.04,eRank=615.3,q75/q25=3.17 vo_prod:H=0.5986,top10E=0.32,eRank=74.5,q75/q25=inf train_time:117241ms step_avg:73.28ms +[2025-09-02 13:55:36] [Rank 0] PRINT: step:1600/10000 val_loss:4.5863 svd_entropy: attn_qk:H=0.7357,top10E=0.30,eRank=157.0,q75/q25=47.68 attn_vo:H=0.7488,top10E=0.20,eRank=212.5,q75/q25=inf mlp_w1:H=0.8358,top10E=0.22,eRank=267.7,q75/q25=6.99 mlp_w2:H=0.9666,top10E=0.04,eRank=615.3,q75/q25=3.17 vo_prod:H=0.5986,top10E=0.32,eRank=74.5,q75/q25=inf train_time:117241ms step_avg:73.28ms +[2025-09-02 13:55:36] [Rank 0] step:1601/10000 train_time:117256ms step_avg:73.24ms +[2025-09-02 13:55:36] [Rank 0] step:1601/10000 train_time:117256ms step_avg:73.24ms +[2025-09-02 13:55:37] [Rank 0] step:1621/10000 train_time:118654ms step_avg:73.20ms +[2025-09-02 13:55:37] [Rank 0] step:1621/10000 train_time:118654ms step_avg:73.20ms +[2025-09-02 13:55:39] [Rank 0] step:1641/10000 train_time:120129ms step_avg:73.21ms +[2025-09-02 13:55:39] [Rank 0] step:1641/10000 train_time:120129ms step_avg:73.21ms +[2025-09-02 13:55:40] [Rank 0] step:1661/10000 train_time:121605ms step_avg:73.21ms +[2025-09-02 13:55:40] [Rank 0] step:1661/10000 train_time:121605ms step_avg:73.21ms +[2025-09-02 13:55:42] [Rank 0] step:1681/10000 train_time:123081ms step_avg:73.22ms +[2025-09-02 13:55:42] [Rank 0] step:1681/10000 train_time:123081ms step_avg:73.22ms +[2025-09-02 13:55:43] [Rank 0] step:1701/10000 train_time:124556ms step_avg:73.23ms +[2025-09-02 13:55:43] [Rank 0] step:1701/10000 train_time:124556ms step_avg:73.23ms +[2025-09-02 13:55:45] [Rank 0] step:1721/10000 train_time:126032ms step_avg:73.23ms +[2025-09-02 13:55:45] [Rank 0] step:1721/10000 train_time:126032ms step_avg:73.23ms +[2025-09-02 13:55:46] [Rank 0] step:1741/10000 train_time:127508ms step_avg:73.24ms +[2025-09-02 13:55:46] [Rank 0] step:1741/10000 train_time:127508ms step_avg:73.24ms +[2025-09-02 13:55:48] [Rank 0] step:1761/10000 train_time:128986ms step_avg:73.25ms +[2025-09-02 13:55:48] [Rank 0] step:1761/10000 train_time:128986ms step_avg:73.25ms +[2025-09-02 13:55:49] [Rank 0] step:1781/10000 train_time:130464ms step_avg:73.25ms +[2025-09-02 13:55:49] [Rank 0] step:1781/10000 train_time:130464ms step_avg:73.25ms +[2025-09-02 13:55:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:55:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:56:02] [Rank 0] PRINT: step:1800/10000 val_loss:4.4922 svd_entropy: attn_qk:H=0.7415,top10E=0.29,eRank=161.1,q75/q25=50.55 attn_vo:H=0.7583,top10E=0.19,eRank=223.2,q75/q25=inf mlp_w1:H=0.8459,top10E=0.21,eRank=284.4,q75/q25=6.75 mlp_w2:H=0.9677,top10E=0.04,eRank=619.7,q75/q25=3.11 vo_prod:H=0.6093,top10E=0.30,eRank=80.3,q75/q25=inf train_time:132018ms step_avg:73.34ms +[2025-09-02 13:56:02] [Rank 0] PRINT: step:1800/10000 val_loss:4.4922 svd_entropy: attn_qk:H=0.7415,top10E=0.29,eRank=161.1,q75/q25=50.55 attn_vo:H=0.7583,top10E=0.19,eRank=223.2,q75/q25=inf mlp_w1:H=0.8459,top10E=0.21,eRank=284.4,q75/q25=6.75 mlp_w2:H=0.9677,top10E=0.04,eRank=619.7,q75/q25=3.11 vo_prod:H=0.6093,top10E=0.30,eRank=80.3,q75/q25=inf train_time:132018ms step_avg:73.34ms +[2025-09-02 13:56:02] [Rank 0] step:1801/10000 train_time:132033ms step_avg:73.31ms +[2025-09-02 13:56:02] [Rank 0] step:1801/10000 train_time:132033ms step_avg:73.31ms +[2025-09-02 13:56:04] [Rank 0] step:1821/10000 train_time:133445ms step_avg:73.28ms +[2025-09-02 13:56:04] [Rank 0] step:1821/10000 train_time:133445ms step_avg:73.28ms +[2025-09-02 13:56:05] [Rank 0] step:1841/10000 train_time:134918ms step_avg:73.29ms +[2025-09-02 13:56:05] [Rank 0] step:1841/10000 train_time:134918ms step_avg:73.29ms +[2025-09-02 13:56:07] [Rank 0] step:1861/10000 train_time:136392ms step_avg:73.29ms +[2025-09-02 13:56:07] [Rank 0] step:1861/10000 train_time:136392ms step_avg:73.29ms +[2025-09-02 13:56:08] [Rank 0] step:1881/10000 train_time:137865ms step_avg:73.29ms +[2025-09-02 13:56:08] [Rank 0] step:1881/10000 train_time:137865ms step_avg:73.29ms +[2025-09-02 13:56:10] [Rank 0] step:1901/10000 train_time:139342ms step_avg:73.30ms +[2025-09-02 13:56:10] [Rank 0] step:1901/10000 train_time:139342ms step_avg:73.30ms +[2025-09-02 13:56:11] [Rank 0] step:1921/10000 train_time:140814ms step_avg:73.30ms +[2025-09-02 13:56:11] [Rank 0] step:1921/10000 train_time:140814ms step_avg:73.30ms +[2025-09-02 13:56:13] [Rank 0] step:1941/10000 train_time:142289ms step_avg:73.31ms +[2025-09-02 13:56:13] [Rank 0] step:1941/10000 train_time:142289ms step_avg:73.31ms +[2025-09-02 13:56:14] [Rank 0] step:1961/10000 train_time:143764ms step_avg:73.31ms +[2025-09-02 13:56:14] [Rank 0] step:1961/10000 train_time:143764ms step_avg:73.31ms +[2025-09-02 13:56:16] [Rank 0] step:1981/10000 train_time:145239ms step_avg:73.32ms +[2025-09-02 13:56:16] [Rank 0] step:1981/10000 train_time:145239ms step_avg:73.32ms +[2025-09-02 13:56:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:56:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:56:29] [Rank 0] PRINT: step:2000/10000 val_loss:4.4349 svd_entropy: attn_qk:H=0.7465,top10E=0.28,eRank=164.9,q75/q25=52.78 attn_vo:H=0.7661,top10E=0.18,eRank=232.5,q75/q25=inf mlp_w1:H=0.8539,top10E=0.20,eRank=298.6,q75/q25=6.48 mlp_w2:H=0.9685,top10E=0.04,eRank=623.1,q75/q25=3.06 vo_prod:H=0.6188,top10E=0.29,eRank=85.9,q75/q25=inf train_time:146791ms step_avg:73.40ms +[2025-09-02 13:56:29] [Rank 0] PRINT: step:2000/10000 val_loss:4.4349 svd_entropy: attn_qk:H=0.7465,top10E=0.28,eRank=164.9,q75/q25=52.78 attn_vo:H=0.7661,top10E=0.18,eRank=232.5,q75/q25=inf mlp_w1:H=0.8539,top10E=0.20,eRank=298.6,q75/q25=6.48 mlp_w2:H=0.9685,top10E=0.04,eRank=623.1,q75/q25=3.06 vo_prod:H=0.6188,top10E=0.29,eRank=85.9,q75/q25=inf train_time:146791ms step_avg:73.40ms +[2025-09-02 13:56:29] [Rank 0] step:2001/10000 train_time:146805ms step_avg:73.37ms +[2025-09-02 13:56:29] [Rank 0] step:2001/10000 train_time:146805ms step_avg:73.37ms +[2025-09-02 13:56:31] [Rank 0] step:2021/10000 train_time:148205ms step_avg:73.33ms +[2025-09-02 13:56:31] [Rank 0] step:2021/10000 train_time:148205ms step_avg:73.33ms +[2025-09-02 13:56:32] [Rank 0] step:2041/10000 train_time:149870ms step_avg:73.43ms +[2025-09-02 13:56:32] [Rank 0] step:2041/10000 train_time:149870ms step_avg:73.43ms +[2025-09-02 13:56:34] [Rank 0] step:2061/10000 train_time:151343ms step_avg:73.43ms +[2025-09-02 13:56:34] [Rank 0] step:2061/10000 train_time:151343ms step_avg:73.43ms +[2025-09-02 13:56:35] [Rank 0] step:2081/10000 train_time:152816ms step_avg:73.43ms +[2025-09-02 13:56:35] [Rank 0] step:2081/10000 train_time:152816ms step_avg:73.43ms +[2025-09-02 13:56:37] [Rank 0] step:2101/10000 train_time:154290ms step_avg:73.44ms +[2025-09-02 13:56:37] [Rank 0] step:2101/10000 train_time:154290ms step_avg:73.44ms +[2025-09-02 13:56:38] [Rank 0] step:2121/10000 train_time:155766ms step_avg:73.44ms +[2025-09-02 13:56:38] [Rank 0] step:2121/10000 train_time:155766ms step_avg:73.44ms +[2025-09-02 13:56:40] [Rank 0] step:2141/10000 train_time:157241ms step_avg:73.44ms +[2025-09-02 13:56:40] [Rank 0] step:2141/10000 train_time:157241ms step_avg:73.44ms +[2025-09-02 13:56:41] [Rank 0] step:2161/10000 train_time:158717ms step_avg:73.45ms +[2025-09-02 13:56:41] [Rank 0] step:2161/10000 train_time:158717ms step_avg:73.45ms +[2025-09-02 13:56:43] [Rank 0] step:2181/10000 train_time:160191ms step_avg:73.45ms +[2025-09-02 13:56:43] [Rank 0] step:2181/10000 train_time:160191ms step_avg:73.45ms +[2025-09-02 13:56:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:56:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:56:56] [Rank 0] PRINT: step:2200/10000 val_loss:4.3691 svd_entropy: attn_qk:H=0.7506,top10E=0.28,eRank=168.1,q75/q25=54.02 attn_vo:H=0.7724,top10E=0.17,eRank=240.1,q75/q25=inf mlp_w1:H=0.8603,top10E=0.19,eRank=310.6,q75/q25=6.26 mlp_w2:H=0.9691,top10E=0.04,eRank=625.6,q75/q25=3.02 vo_prod:H=0.6262,top10E=0.27,eRank=90.5,q75/q25=inf train_time:161742ms step_avg:73.52ms +[2025-09-02 13:56:56] [Rank 0] PRINT: step:2200/10000 val_loss:4.3691 svd_entropy: attn_qk:H=0.7506,top10E=0.28,eRank=168.1,q75/q25=54.02 attn_vo:H=0.7724,top10E=0.17,eRank=240.1,q75/q25=inf mlp_w1:H=0.8603,top10E=0.19,eRank=310.6,q75/q25=6.26 mlp_w2:H=0.9691,top10E=0.04,eRank=625.6,q75/q25=3.02 vo_prod:H=0.6262,top10E=0.27,eRank=90.5,q75/q25=inf train_time:161742ms step_avg:73.52ms +[2025-09-02 13:56:56] [Rank 0] step:2201/10000 train_time:161757ms step_avg:73.49ms +[2025-09-02 13:56:56] [Rank 0] step:2201/10000 train_time:161757ms step_avg:73.49ms +[2025-09-02 13:56:57] [Rank 0] step:2221/10000 train_time:163180ms step_avg:73.47ms +[2025-09-02 13:56:57] [Rank 0] step:2221/10000 train_time:163180ms step_avg:73.47ms +[2025-09-02 13:56:59] [Rank 0] step:2241/10000 train_time:164688ms step_avg:73.49ms +[2025-09-02 13:56:59] [Rank 0] step:2241/10000 train_time:164688ms step_avg:73.49ms +[2025-09-02 13:57:01] [Rank 0] step:2261/10000 train_time:166314ms step_avg:73.56ms +[2025-09-02 13:57:01] [Rank 0] step:2261/10000 train_time:166314ms step_avg:73.56ms +[2025-09-02 13:57:02] [Rank 0] step:2281/10000 train_time:167831ms step_avg:73.58ms +[2025-09-02 13:57:02] [Rank 0] step:2281/10000 train_time:167831ms step_avg:73.58ms +[2025-09-02 13:57:04] [Rank 0] step:2301/10000 train_time:169430ms step_avg:73.63ms +[2025-09-02 13:57:04] [Rank 0] step:2301/10000 train_time:169430ms step_avg:73.63ms +[2025-09-02 13:57:05] [Rank 0] step:2321/10000 train_time:170948ms step_avg:73.65ms +[2025-09-02 13:57:05] [Rank 0] step:2321/10000 train_time:170948ms step_avg:73.65ms +[2025-09-02 13:57:07] [Rank 0] step:2341/10000 train_time:172467ms step_avg:73.67ms +[2025-09-02 13:57:07] [Rank 0] step:2341/10000 train_time:172467ms step_avg:73.67ms +[2025-09-02 13:57:08] [Rank 0] step:2361/10000 train_time:173986ms step_avg:73.69ms +[2025-09-02 13:57:08] [Rank 0] step:2361/10000 train_time:173986ms step_avg:73.69ms +[2025-09-02 13:57:10] [Rank 0] step:2381/10000 train_time:175506ms step_avg:73.71ms +[2025-09-02 13:57:10] [Rank 0] step:2381/10000 train_time:175506ms step_avg:73.71ms +[2025-09-02 13:57:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:57:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:57:23] [Rank 0] PRINT: step:2400/10000 val_loss:4.2945 svd_entropy: attn_qk:H=0.7534,top10E=0.28,eRank=170.3,q75/q25=54.69 attn_vo:H=0.7778,top10E=0.17,eRank=246.9,q75/q25=inf mlp_w1:H=0.8660,top10E=0.18,eRank=321.8,q75/q25=6.07 mlp_w2:H=0.9696,top10E=0.04,eRank=627.5,q75/q25=3.00 vo_prod:H=0.6329,top10E=0.26,eRank=95.0,q75/q25=inf train_time:177206ms step_avg:73.84ms +[2025-09-02 13:57:23] [Rank 0] PRINT: step:2400/10000 val_loss:4.2945 svd_entropy: attn_qk:H=0.7534,top10E=0.28,eRank=170.3,q75/q25=54.69 attn_vo:H=0.7778,top10E=0.17,eRank=246.9,q75/q25=inf mlp_w1:H=0.8660,top10E=0.18,eRank=321.8,q75/q25=6.07 mlp_w2:H=0.9696,top10E=0.04,eRank=627.5,q75/q25=3.00 vo_prod:H=0.6329,top10E=0.26,eRank=95.0,q75/q25=inf train_time:177206ms step_avg:73.84ms +[2025-09-02 13:57:23] [Rank 0] step:2401/10000 train_time:177220ms step_avg:73.81ms +[2025-09-02 13:57:23] [Rank 0] step:2401/10000 train_time:177220ms step_avg:73.81ms +[2025-09-02 13:57:25] [Rank 0] step:2421/10000 train_time:178680ms step_avg:73.80ms +[2025-09-02 13:57:25] [Rank 0] step:2421/10000 train_time:178680ms step_avg:73.80ms +[2025-09-02 13:57:26] [Rank 0] step:2441/10000 train_time:180196ms step_avg:73.82ms +[2025-09-02 13:57:26] [Rank 0] step:2441/10000 train_time:180196ms step_avg:73.82ms +[2025-09-02 13:57:28] [Rank 0] step:2461/10000 train_time:181714ms step_avg:73.84ms +[2025-09-02 13:57:28] [Rank 0] step:2461/10000 train_time:181714ms step_avg:73.84ms +[2025-09-02 13:57:29] [Rank 0] step:2481/10000 train_time:183233ms step_avg:73.85ms +[2025-09-02 13:57:29] [Rank 0] step:2481/10000 train_time:183233ms step_avg:73.85ms +[2025-09-02 13:57:31] [Rank 0] step:2501/10000 train_time:184752ms step_avg:73.87ms +[2025-09-02 13:57:31] [Rank 0] step:2501/10000 train_time:184752ms step_avg:73.87ms +[2025-09-02 13:57:33] [Rank 0] step:2521/10000 train_time:186272ms step_avg:73.89ms +[2025-09-02 13:57:33] [Rank 0] step:2521/10000 train_time:186272ms step_avg:73.89ms +[2025-09-02 13:57:34] [Rank 0] step:2541/10000 train_time:187791ms step_avg:73.90ms +[2025-09-02 13:57:34] [Rank 0] step:2541/10000 train_time:187791ms step_avg:73.90ms +[2025-09-02 13:57:36] [Rank 0] step:2561/10000 train_time:189310ms step_avg:73.92ms +[2025-09-02 13:57:36] [Rank 0] step:2561/10000 train_time:189310ms step_avg:73.92ms +[2025-09-02 13:57:37] [Rank 0] step:2581/10000 train_time:190830ms step_avg:73.94ms +[2025-09-02 13:57:37] [Rank 0] step:2581/10000 train_time:190830ms step_avg:73.94ms +[2025-09-02 13:57:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:57:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:57:50] [Rank 0] PRINT: step:2600/10000 val_loss:4.2442 svd_entropy: attn_qk:H=0.7565,top10E=0.27,eRank=172.8,q75/q25=55.38 attn_vo:H=0.7824,top10E=0.16,eRank=253.0,q75/q25=inf mlp_w1:H=0.8709,top10E=0.18,eRank=331.7,q75/q25=5.87 mlp_w2:H=0.9699,top10E=0.04,eRank=628.8,q75/q25=2.97 vo_prod:H=0.6389,top10E=0.25,eRank=99.3,q75/q25=inf train_time:192428ms step_avg:74.01ms +[2025-09-02 13:57:50] [Rank 0] PRINT: step:2600/10000 val_loss:4.2442 svd_entropy: attn_qk:H=0.7565,top10E=0.27,eRank=172.8,q75/q25=55.38 attn_vo:H=0.7824,top10E=0.16,eRank=253.0,q75/q25=inf mlp_w1:H=0.8709,top10E=0.18,eRank=331.7,q75/q25=5.87 mlp_w2:H=0.9699,top10E=0.04,eRank=628.8,q75/q25=2.97 vo_prod:H=0.6389,top10E=0.25,eRank=99.3,q75/q25=inf train_time:192428ms step_avg:74.01ms +[2025-09-02 13:57:51] [Rank 0] step:2601/10000 train_time:192442ms step_avg:73.99ms +[2025-09-02 13:57:51] [Rank 0] step:2601/10000 train_time:192442ms step_avg:73.99ms +[2025-09-02 13:57:52] [Rank 0] step:2621/10000 train_time:193901ms step_avg:73.98ms +[2025-09-02 13:57:52] [Rank 0] step:2621/10000 train_time:193901ms step_avg:73.98ms +[2025-09-02 13:57:54] [Rank 0] step:2641/10000 train_time:195420ms step_avg:73.99ms +[2025-09-02 13:57:54] [Rank 0] step:2641/10000 train_time:195420ms step_avg:73.99ms +[2025-09-02 13:57:55] [Rank 0] step:2661/10000 train_time:196939ms step_avg:74.01ms +[2025-09-02 13:57:55] [Rank 0] step:2661/10000 train_time:196939ms step_avg:74.01ms +[2025-09-02 13:57:57] [Rank 0] step:2681/10000 train_time:198460ms step_avg:74.02ms +[2025-09-02 13:57:57] [Rank 0] step:2681/10000 train_time:198460ms step_avg:74.02ms +[2025-09-02 13:57:58] [Rank 0] step:2701/10000 train_time:199981ms step_avg:74.04ms +[2025-09-02 13:57:58] [Rank 0] step:2701/10000 train_time:199981ms step_avg:74.04ms +[2025-09-02 13:58:00] [Rank 0] step:2721/10000 train_time:201602ms step_avg:74.09ms +[2025-09-02 13:58:00] [Rank 0] step:2721/10000 train_time:201602ms step_avg:74.09ms +[2025-09-02 13:58:01] [Rank 0] step:2741/10000 train_time:203126ms step_avg:74.11ms +[2025-09-02 13:58:01] [Rank 0] step:2741/10000 train_time:203126ms step_avg:74.11ms +[2025-09-02 13:58:03] [Rank 0] step:2761/10000 train_time:204794ms step_avg:74.17ms +[2025-09-02 13:58:03] [Rank 0] step:2761/10000 train_time:204794ms step_avg:74.17ms +[2025-09-02 13:58:04] [Rank 0] step:2781/10000 train_time:206178ms step_avg:74.14ms +[2025-09-02 13:58:04] [Rank 0] step:2781/10000 train_time:206178ms step_avg:74.14ms +[2025-09-02 13:58:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:58:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:58:18] [Rank 0] PRINT: step:2800/10000 val_loss:4.2073 svd_entropy: attn_qk:H=0.7595,top10E=0.27,eRank=175.4,q75/q25=56.27 attn_vo:H=0.7867,top10E=0.15,eRank=258.7,q75/q25=inf mlp_w1:H=0.8752,top10E=0.17,eRank=340.7,q75/q25=5.74 mlp_w2:H=0.9701,top10E=0.04,eRank=629.8,q75/q25=2.95 vo_prod:H=0.6445,top10E=0.25,eRank=103.3,q75/q25=inf train_time:207779ms step_avg:74.21ms +[2025-09-02 13:58:18] [Rank 0] PRINT: step:2800/10000 val_loss:4.2073 svd_entropy: attn_qk:H=0.7595,top10E=0.27,eRank=175.4,q75/q25=56.27 attn_vo:H=0.7867,top10E=0.15,eRank=258.7,q75/q25=inf mlp_w1:H=0.8752,top10E=0.17,eRank=340.7,q75/q25=5.74 mlp_w2:H=0.9701,top10E=0.04,eRank=629.8,q75/q25=2.95 vo_prod:H=0.6445,top10E=0.25,eRank=103.3,q75/q25=inf train_time:207779ms step_avg:74.21ms +[2025-09-02 13:58:18] [Rank 0] step:2801/10000 train_time:207795ms step_avg:74.19ms +[2025-09-02 13:58:18] [Rank 0] step:2801/10000 train_time:207795ms step_avg:74.19ms +[2025-09-02 13:58:19] [Rank 0] step:2821/10000 train_time:209244ms step_avg:74.17ms +[2025-09-02 13:58:19] [Rank 0] step:2821/10000 train_time:209244ms step_avg:74.17ms +[2025-09-02 13:58:21] [Rank 0] step:2841/10000 train_time:210763ms step_avg:74.19ms +[2025-09-02 13:58:21] [Rank 0] step:2841/10000 train_time:210763ms step_avg:74.19ms +[2025-09-02 13:58:22] [Rank 0] step:2861/10000 train_time:212282ms step_avg:74.20ms +[2025-09-02 13:58:22] [Rank 0] step:2861/10000 train_time:212282ms step_avg:74.20ms +[2025-09-02 13:58:24] [Rank 0] step:2881/10000 train_time:213801ms step_avg:74.21ms +[2025-09-02 13:58:24] [Rank 0] step:2881/10000 train_time:213801ms step_avg:74.21ms +[2025-09-02 13:58:25] [Rank 0] step:2901/10000 train_time:215320ms step_avg:74.22ms +[2025-09-02 13:58:25] [Rank 0] step:2901/10000 train_time:215320ms step_avg:74.22ms +[2025-09-02 13:58:27] [Rank 0] step:2921/10000 train_time:216840ms step_avg:74.23ms +[2025-09-02 13:58:27] [Rank 0] step:2921/10000 train_time:216840ms step_avg:74.23ms +[2025-09-02 13:58:28] [Rank 0] step:2941/10000 train_time:218360ms step_avg:74.25ms +[2025-09-02 13:58:28] [Rank 0] step:2941/10000 train_time:218360ms step_avg:74.25ms +[2025-09-02 13:58:30] [Rank 0] step:2961/10000 train_time:219881ms step_avg:74.26ms +[2025-09-02 13:58:30] [Rank 0] step:2961/10000 train_time:219881ms step_avg:74.26ms +[2025-09-02 13:58:31] [Rank 0] step:2981/10000 train_time:221407ms step_avg:74.27ms +[2025-09-02 13:58:31] [Rank 0] step:2981/10000 train_time:221407ms step_avg:74.27ms +[2025-09-02 13:58:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:58:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:58:45] [Rank 0] PRINT: step:3000/10000 val_loss:4.1649 svd_entropy: attn_qk:H=0.7622,top10E=0.27,eRank=177.7,q75/q25=55.99 attn_vo:H=0.7903,top10E=0.15,eRank=263.6,q75/q25=inf mlp_w1:H=0.8788,top10E=0.17,eRank=348.7,q75/q25=5.59 mlp_w2:H=0.9703,top10E=0.04,eRank=630.7,q75/q25=2.94 vo_prod:H=0.6494,top10E=0.24,eRank=107.0,q75/q25=inf train_time:223014ms step_avg:74.34ms +[2025-09-02 13:58:45] [Rank 0] PRINT: step:3000/10000 val_loss:4.1649 svd_entropy: attn_qk:H=0.7622,top10E=0.27,eRank=177.7,q75/q25=55.99 attn_vo:H=0.7903,top10E=0.15,eRank=263.6,q75/q25=inf mlp_w1:H=0.8788,top10E=0.17,eRank=348.7,q75/q25=5.59 mlp_w2:H=0.9703,top10E=0.04,eRank=630.7,q75/q25=2.94 vo_prod:H=0.6494,top10E=0.24,eRank=107.0,q75/q25=inf train_time:223014ms step_avg:74.34ms +[2025-09-02 13:58:45] [Rank 0] step:3001/10000 train_time:223029ms step_avg:74.32ms +[2025-09-02 13:58:45] [Rank 0] step:3001/10000 train_time:223029ms step_avg:74.32ms +[2025-09-02 13:58:46] [Rank 0] step:3021/10000 train_time:224483ms step_avg:74.31ms +[2025-09-02 13:58:46] [Rank 0] step:3021/10000 train_time:224483ms step_avg:74.31ms +[2025-09-02 13:58:48] [Rank 0] step:3041/10000 train_time:226007ms step_avg:74.32ms +[2025-09-02 13:58:48] [Rank 0] step:3041/10000 train_time:226007ms step_avg:74.32ms +[2025-09-02 13:58:50] [Rank 0] step:3061/10000 train_time:227530ms step_avg:74.33ms +[2025-09-02 13:58:50] [Rank 0] step:3061/10000 train_time:227530ms step_avg:74.33ms +[2025-09-02 13:58:51] [Rank 0] step:3081/10000 train_time:229058ms step_avg:74.35ms +[2025-09-02 13:58:51] [Rank 0] step:3081/10000 train_time:229058ms step_avg:74.35ms +[2025-09-02 13:58:53] [Rank 0] step:3101/10000 train_time:230582ms step_avg:74.36ms +[2025-09-02 13:58:53] [Rank 0] step:3101/10000 train_time:230582ms step_avg:74.36ms +[2025-09-02 13:58:54] [Rank 0] step:3121/10000 train_time:232108ms step_avg:74.37ms +[2025-09-02 13:58:54] [Rank 0] step:3121/10000 train_time:232108ms step_avg:74.37ms +[2025-09-02 13:58:56] [Rank 0] step:3141/10000 train_time:233635ms step_avg:74.38ms +[2025-09-02 13:58:56] [Rank 0] step:3141/10000 train_time:233635ms step_avg:74.38ms +[2025-09-02 13:58:57] [Rank 0] step:3161/10000 train_time:235163ms step_avg:74.40ms +[2025-09-02 13:58:57] [Rank 0] step:3161/10000 train_time:235163ms step_avg:74.40ms +[2025-09-02 13:58:59] [Rank 0] step:3181/10000 train_time:236691ms step_avg:74.41ms +[2025-09-02 13:58:59] [Rank 0] step:3181/10000 train_time:236691ms step_avg:74.41ms +[2025-09-02 13:59:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:59:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:59:12] [Rank 0] PRINT: step:3200/10000 val_loss:4.1306 svd_entropy: attn_qk:H=0.7644,top10E=0.26,eRank=179.6,q75/q25=56.17 attn_vo:H=0.7935,top10E=0.15,eRank=268.1,q75/q25=inf mlp_w1:H=0.8822,top10E=0.17,eRank=356.2,q75/q25=5.46 mlp_w2:H=0.9705,top10E=0.04,eRank=631.3,q75/q25=2.93 vo_prod:H=0.6535,top10E=0.23,eRank=110.3,q75/q25=inf train_time:238298ms step_avg:74.47ms +[2025-09-02 13:59:12] [Rank 0] PRINT: step:3200/10000 val_loss:4.1306 svd_entropy: attn_qk:H=0.7644,top10E=0.26,eRank=179.6,q75/q25=56.17 attn_vo:H=0.7935,top10E=0.15,eRank=268.1,q75/q25=inf mlp_w1:H=0.8822,top10E=0.17,eRank=356.2,q75/q25=5.46 mlp_w2:H=0.9705,top10E=0.04,eRank=631.3,q75/q25=2.93 vo_prod:H=0.6535,top10E=0.23,eRank=110.3,q75/q25=inf train_time:238298ms step_avg:74.47ms +[2025-09-02 13:59:12] [Rank 0] step:3201/10000 train_time:238312ms step_avg:74.45ms +[2025-09-02 13:59:12] [Rank 0] step:3201/10000 train_time:238312ms step_avg:74.45ms +[2025-09-02 13:59:14] [Rank 0] step:3221/10000 train_time:239766ms step_avg:74.44ms +[2025-09-02 13:59:14] [Rank 0] step:3221/10000 train_time:239766ms step_avg:74.44ms +[2025-09-02 13:59:15] [Rank 0] step:3241/10000 train_time:241292ms step_avg:74.45ms +[2025-09-02 13:59:15] [Rank 0] step:3241/10000 train_time:241292ms step_avg:74.45ms +[2025-09-02 13:59:17] [Rank 0] step:3261/10000 train_time:242817ms step_avg:74.46ms +[2025-09-02 13:59:17] [Rank 0] step:3261/10000 train_time:242817ms step_avg:74.46ms +[2025-09-02 13:59:18] [Rank 0] step:3281/10000 train_time:244345ms step_avg:74.47ms +[2025-09-02 13:59:18] [Rank 0] step:3281/10000 train_time:244345ms step_avg:74.47ms +[2025-09-02 13:59:20] [Rank 0] step:3301/10000 train_time:245872ms step_avg:74.48ms +[2025-09-02 13:59:20] [Rank 0] step:3301/10000 train_time:245872ms step_avg:74.48ms +[2025-09-02 13:59:21] [Rank 0] step:3321/10000 train_time:247400ms step_avg:74.50ms +[2025-09-02 13:59:21] [Rank 0] step:3321/10000 train_time:247400ms step_avg:74.50ms +[2025-09-02 13:59:23] [Rank 0] step:3341/10000 train_time:248928ms step_avg:74.51ms +[2025-09-02 13:59:23] [Rank 0] step:3341/10000 train_time:248928ms step_avg:74.51ms +[2025-09-02 13:59:24] [Rank 0] step:3361/10000 train_time:250455ms step_avg:74.52ms +[2025-09-02 13:59:24] [Rank 0] step:3361/10000 train_time:250455ms step_avg:74.52ms +[2025-09-02 13:59:26] [Rank 0] step:3381/10000 train_time:251984ms step_avg:74.53ms +[2025-09-02 13:59:26] [Rank 0] step:3381/10000 train_time:251984ms step_avg:74.53ms +[2025-09-02 13:59:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:59:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:59:39] [Rank 0] PRINT: step:3400/10000 val_loss:4.0964 svd_entropy: attn_qk:H=0.7667,top10E=0.26,eRank=181.6,q75/q25=56.25 attn_vo:H=0.7965,top10E=0.14,eRank=272.3,q75/q25=inf mlp_w1:H=0.8853,top10E=0.16,eRank=363.2,q75/q25=5.34 mlp_w2:H=0.9706,top10E=0.04,eRank=631.7,q75/q25=2.92 vo_prod:H=0.6576,top10E=0.23,eRank=113.7,q75/q25=inf train_time:253590ms step_avg:74.59ms +[2025-09-02 13:59:39] [Rank 0] PRINT: step:3400/10000 val_loss:4.0964 svd_entropy: attn_qk:H=0.7667,top10E=0.26,eRank=181.6,q75/q25=56.25 attn_vo:H=0.7965,top10E=0.14,eRank=272.3,q75/q25=inf mlp_w1:H=0.8853,top10E=0.16,eRank=363.2,q75/q25=5.34 mlp_w2:H=0.9706,top10E=0.04,eRank=631.7,q75/q25=2.92 vo_prod:H=0.6576,top10E=0.23,eRank=113.7,q75/q25=inf train_time:253590ms step_avg:74.59ms +[2025-09-02 13:59:39] [Rank 0] step:3401/10000 train_time:253605ms step_avg:74.57ms +[2025-09-02 13:59:39] [Rank 0] step:3401/10000 train_time:253605ms step_avg:74.57ms +[2025-09-02 13:59:41] [Rank 0] step:3421/10000 train_time:255060ms step_avg:74.56ms +[2025-09-02 13:59:41] [Rank 0] step:3421/10000 train_time:255060ms step_avg:74.56ms +[2025-09-02 13:59:42] [Rank 0] step:3441/10000 train_time:256588ms step_avg:74.57ms +[2025-09-02 13:59:42] [Rank 0] step:3441/10000 train_time:256588ms step_avg:74.57ms +[2025-09-02 13:59:44] [Rank 0] step:3461/10000 train_time:258115ms step_avg:74.58ms +[2025-09-02 13:59:44] [Rank 0] step:3461/10000 train_time:258115ms step_avg:74.58ms +[2025-09-02 13:59:45] [Rank 0] step:3481/10000 train_time:259645ms step_avg:74.59ms +[2025-09-02 13:59:45] [Rank 0] step:3481/10000 train_time:259645ms step_avg:74.59ms +[2025-09-02 13:59:47] [Rank 0] step:3501/10000 train_time:261175ms step_avg:74.60ms +[2025-09-02 13:59:47] [Rank 0] step:3501/10000 train_time:261175ms step_avg:74.60ms +[2025-09-02 13:59:48] [Rank 0] step:3521/10000 train_time:262706ms step_avg:74.61ms +[2025-09-02 13:59:48] [Rank 0] step:3521/10000 train_time:262706ms step_avg:74.61ms +[2025-09-02 13:59:50] [Rank 0] step:3541/10000 train_time:264235ms step_avg:74.62ms +[2025-09-02 13:59:50] [Rank 0] step:3541/10000 train_time:264235ms step_avg:74.62ms +[2025-09-02 13:59:51] [Rank 0] step:3561/10000 train_time:265765ms step_avg:74.63ms +[2025-09-02 13:59:51] [Rank 0] step:3561/10000 train_time:265765ms step_avg:74.63ms +[2025-09-02 13:59:53] [Rank 0] step:3581/10000 train_time:267296ms step_avg:74.64ms +[2025-09-02 13:59:53] [Rank 0] step:3581/10000 train_time:267296ms step_avg:74.64ms +[2025-09-02 13:59:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 13:59:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:00:06] [Rank 0] PRINT: step:3600/10000 val_loss:4.0792 svd_entropy: attn_qk:H=0.7689,top10E=0.26,eRank=183.7,q75/q25=56.30 attn_vo:H=0.7991,top10E=0.14,eRank=276.1,q75/q25=inf mlp_w1:H=0.8880,top10E=0.16,eRank=369.5,q75/q25=5.24 mlp_w2:H=0.9707,top10E=0.04,eRank=632.0,q75/q25=2.92 vo_prod:H=0.6612,top10E=0.22,eRank=116.7,q75/q25=inf train_time:268905ms step_avg:74.70ms +[2025-09-02 14:00:06] [Rank 0] PRINT: step:3600/10000 val_loss:4.0792 svd_entropy: attn_qk:H=0.7689,top10E=0.26,eRank=183.7,q75/q25=56.30 attn_vo:H=0.7991,top10E=0.14,eRank=276.1,q75/q25=inf mlp_w1:H=0.8880,top10E=0.16,eRank=369.5,q75/q25=5.24 mlp_w2:H=0.9707,top10E=0.04,eRank=632.0,q75/q25=2.92 vo_prod:H=0.6612,top10E=0.22,eRank=116.7,q75/q25=inf train_time:268905ms step_avg:74.70ms +[2025-09-02 14:00:06] [Rank 0] step:3601/10000 train_time:268921ms step_avg:74.68ms +[2025-09-02 14:00:06] [Rank 0] step:3601/10000 train_time:268921ms step_avg:74.68ms +[2025-09-02 14:00:08] [Rank 0] step:3621/10000 train_time:270376ms step_avg:74.67ms +[2025-09-02 14:00:08] [Rank 0] step:3621/10000 train_time:270376ms step_avg:74.67ms +[2025-09-02 14:00:09] [Rank 0] step:3641/10000 train_time:271906ms step_avg:74.68ms +[2025-09-02 14:00:09] [Rank 0] step:3641/10000 train_time:271906ms step_avg:74.68ms +[2025-09-02 14:00:11] [Rank 0] step:3661/10000 train_time:273451ms step_avg:74.69ms +[2025-09-02 14:00:11] [Rank 0] step:3661/10000 train_time:273451ms step_avg:74.69ms +[2025-09-02 14:00:12] [Rank 0] step:3681/10000 train_time:274982ms step_avg:74.70ms +[2025-09-02 14:00:12] [Rank 0] step:3681/10000 train_time:274982ms step_avg:74.70ms +[2025-09-02 14:00:14] [Rank 0] step:3701/10000 train_time:276531ms step_avg:74.72ms +[2025-09-02 14:00:14] [Rank 0] step:3701/10000 train_time:276531ms step_avg:74.72ms +[2025-09-02 14:00:16] [Rank 0] step:3721/10000 train_time:278088ms step_avg:74.73ms +[2025-09-02 14:00:16] [Rank 0] step:3721/10000 train_time:278088ms step_avg:74.73ms +[2025-09-02 14:00:17] [Rank 0] step:3741/10000 train_time:279655ms step_avg:74.75ms +[2025-09-02 14:00:17] [Rank 0] step:3741/10000 train_time:279655ms step_avg:74.75ms +[2025-09-02 14:00:19] [Rank 0] step:3761/10000 train_time:281221ms step_avg:74.77ms +[2025-09-02 14:00:19] [Rank 0] step:3761/10000 train_time:281221ms step_avg:74.77ms +[2025-09-02 14:00:20] [Rank 0] step:3781/10000 train_time:282790ms step_avg:74.79ms +[2025-09-02 14:00:20] [Rank 0] step:3781/10000 train_time:282790ms step_avg:74.79ms +[2025-09-02 14:00:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:00:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:00:33] [Rank 0] PRINT: step:3800/10000 val_loss:4.0374 svd_entropy: attn_qk:H=0.7704,top10E=0.26,eRank=185.1,q75/q25=55.95 attn_vo:H=0.8014,top10E=0.14,eRank=279.5,q75/q25=inf mlp_w1:H=0.8905,top10E=0.16,eRank=375.4,q75/q25=5.14 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.91 vo_prod:H=0.6644,top10E=0.22,eRank=119.5,q75/q25=inf train_time:284439ms step_avg:74.85ms +[2025-09-02 14:00:33] [Rank 0] PRINT: step:3800/10000 val_loss:4.0374 svd_entropy: attn_qk:H=0.7704,top10E=0.26,eRank=185.1,q75/q25=55.95 attn_vo:H=0.8014,top10E=0.14,eRank=279.5,q75/q25=inf mlp_w1:H=0.8905,top10E=0.16,eRank=375.4,q75/q25=5.14 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.91 vo_prod:H=0.6644,top10E=0.22,eRank=119.5,q75/q25=inf train_time:284439ms step_avg:74.85ms +[2025-09-02 14:00:34] [Rank 0] step:3801/10000 train_time:284454ms step_avg:74.84ms +[2025-09-02 14:00:34] [Rank 0] step:3801/10000 train_time:284454ms step_avg:74.84ms +[2025-09-02 14:00:35] [Rank 0] step:3821/10000 train_time:285942ms step_avg:74.83ms +[2025-09-02 14:00:35] [Rank 0] step:3821/10000 train_time:285942ms step_avg:74.83ms +[2025-09-02 14:00:37] [Rank 0] step:3841/10000 train_time:287507ms step_avg:74.85ms +[2025-09-02 14:00:37] [Rank 0] step:3841/10000 train_time:287507ms step_avg:74.85ms +[2025-09-02 14:00:38] [Rank 0] step:3861/10000 train_time:289070ms step_avg:74.87ms +[2025-09-02 14:00:38] [Rank 0] step:3861/10000 train_time:289070ms step_avg:74.87ms +[2025-09-02 14:00:40] [Rank 0] step:3881/10000 train_time:290632ms step_avg:74.89ms +[2025-09-02 14:00:40] [Rank 0] step:3881/10000 train_time:290632ms step_avg:74.89ms +[2025-09-02 14:00:41] [Rank 0] step:3901/10000 train_time:292196ms step_avg:74.90ms +[2025-09-02 14:00:41] [Rank 0] step:3901/10000 train_time:292196ms step_avg:74.90ms +[2025-09-02 14:00:43] [Rank 0] step:3921/10000 train_time:293759ms step_avg:74.92ms +[2025-09-02 14:00:43] [Rank 0] step:3921/10000 train_time:293759ms step_avg:74.92ms +[2025-09-02 14:00:44] [Rank 0] step:3941/10000 train_time:295321ms step_avg:74.94ms +[2025-09-02 14:00:44] [Rank 0] step:3941/10000 train_time:295321ms step_avg:74.94ms +[2025-09-02 14:00:46] [Rank 0] step:3961/10000 train_time:296883ms step_avg:74.95ms +[2025-09-02 14:00:46] [Rank 0] step:3961/10000 train_time:296883ms step_avg:74.95ms +[2025-09-02 14:00:48] [Rank 0] step:3981/10000 train_time:298446ms step_avg:74.97ms +[2025-09-02 14:00:48] [Rank 0] step:3981/10000 train_time:298446ms step_avg:74.97ms +[2025-09-02 14:00:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:00:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:01:01] [Rank 0] PRINT: step:4000/10000 val_loss:4.0119 svd_entropy: attn_qk:H=0.7722,top10E=0.26,eRank=186.8,q75/q25=55.77 attn_vo:H=0.8035,top10E=0.13,eRank=282.6,q75/q25=inf mlp_w1:H=0.8929,top10E=0.15,eRank=381.0,q75/q25=5.06 mlp_w2:H=0.9708,top10E=0.04,eRank=632.6,q75/q25=2.91 vo_prod:H=0.6675,top10E=0.21,eRank=122.2,q75/q25=inf train_time:300089ms step_avg:75.02ms +[2025-09-02 14:01:01] [Rank 0] PRINT: step:4000/10000 val_loss:4.0119 svd_entropy: attn_qk:H=0.7722,top10E=0.26,eRank=186.8,q75/q25=55.77 attn_vo:H=0.8035,top10E=0.13,eRank=282.6,q75/q25=inf mlp_w1:H=0.8929,top10E=0.15,eRank=381.0,q75/q25=5.06 mlp_w2:H=0.9708,top10E=0.04,eRank=632.6,q75/q25=2.91 vo_prod:H=0.6675,top10E=0.21,eRank=122.2,q75/q25=inf train_time:300089ms step_avg:75.02ms +[2025-09-02 14:01:01] [Rank 0] step:4001/10000 train_time:300104ms step_avg:75.01ms +[2025-09-02 14:01:01] [Rank 0] step:4001/10000 train_time:300104ms step_avg:75.01ms +[2025-09-02 14:01:03] [Rank 0] step:4021/10000 train_time:301595ms step_avg:75.01ms +[2025-09-02 14:01:03] [Rank 0] step:4021/10000 train_time:301595ms step_avg:75.01ms +[2025-09-02 14:01:04] [Rank 0] step:4041/10000 train_time:303156ms step_avg:75.02ms +[2025-09-02 14:01:04] [Rank 0] step:4041/10000 train_time:303156ms step_avg:75.02ms +[2025-09-02 14:01:06] [Rank 0] step:4061/10000 train_time:304719ms step_avg:75.04ms +[2025-09-02 14:01:06] [Rank 0] step:4061/10000 train_time:304719ms step_avg:75.04ms +[2025-09-02 14:01:07] [Rank 0] step:4081/10000 train_time:306571ms step_avg:75.12ms +[2025-09-02 14:01:07] [Rank 0] step:4081/10000 train_time:306571ms step_avg:75.12ms +[2025-09-02 14:01:09] [Rank 0] step:4101/10000 train_time:308134ms step_avg:75.14ms +[2025-09-02 14:01:09] [Rank 0] step:4101/10000 train_time:308134ms step_avg:75.14ms +[2025-09-02 14:01:11] [Rank 0] step:4121/10000 train_time:309696ms step_avg:75.15ms +[2025-09-02 14:01:11] [Rank 0] step:4121/10000 train_time:309696ms step_avg:75.15ms +[2025-09-02 14:01:12] [Rank 0] step:4141/10000 train_time:311260ms step_avg:75.17ms +[2025-09-02 14:01:12] [Rank 0] step:4141/10000 train_time:311260ms step_avg:75.17ms +[2025-09-02 14:01:14] [Rank 0] step:4161/10000 train_time:312871ms step_avg:75.19ms +[2025-09-02 14:01:14] [Rank 0] step:4161/10000 train_time:312871ms step_avg:75.19ms +[2025-09-02 14:01:15] [Rank 0] step:4181/10000 train_time:314437ms step_avg:75.21ms +[2025-09-02 14:01:15] [Rank 0] step:4181/10000 train_time:314437ms step_avg:75.21ms +[2025-09-02 14:01:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:01:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:01:29] [Rank 0] PRINT: step:4200/10000 val_loss:3.9910 svd_entropy: attn_qk:H=0.7738,top10E=0.25,eRank=188.3,q75/q25=55.39 attn_vo:H=0.8054,top10E=0.13,eRank=285.5,q75/q25=inf mlp_w1:H=0.8950,top10E=0.15,eRank=386.3,q75/q25=4.99 mlp_w2:H=0.9708,top10E=0.04,eRank=632.8,q75/q25=2.90 vo_prod:H=0.6702,top10E=0.21,eRank=124.6,q75/q25=inf train_time:316093ms step_avg:75.26ms +[2025-09-02 14:01:29] [Rank 0] PRINT: step:4200/10000 val_loss:3.9910 svd_entropy: attn_qk:H=0.7738,top10E=0.25,eRank=188.3,q75/q25=55.39 attn_vo:H=0.8054,top10E=0.13,eRank=285.5,q75/q25=inf mlp_w1:H=0.8950,top10E=0.15,eRank=386.3,q75/q25=4.99 mlp_w2:H=0.9708,top10E=0.04,eRank=632.8,q75/q25=2.90 vo_prod:H=0.6702,top10E=0.21,eRank=124.6,q75/q25=inf train_time:316093ms step_avg:75.26ms +[2025-09-02 14:01:29] [Rank 0] step:4201/10000 train_time:316109ms step_avg:75.25ms +[2025-09-02 14:01:29] [Rank 0] step:4201/10000 train_time:316109ms step_avg:75.25ms +[2025-09-02 14:01:30] [Rank 0] step:4221/10000 train_time:317609ms step_avg:75.25ms +[2025-09-02 14:01:30] [Rank 0] step:4221/10000 train_time:317609ms step_avg:75.25ms +[2025-09-02 14:01:32] [Rank 0] step:4241/10000 train_time:319174ms step_avg:75.26ms +[2025-09-02 14:01:32] [Rank 0] step:4241/10000 train_time:319174ms step_avg:75.26ms +[2025-09-02 14:01:33] [Rank 0] step:4261/10000 train_time:320737ms step_avg:75.27ms +[2025-09-02 14:01:33] [Rank 0] step:4261/10000 train_time:320737ms step_avg:75.27ms +[2025-09-02 14:01:35] [Rank 0] step:4281/10000 train_time:322302ms step_avg:75.29ms +[2025-09-02 14:01:35] [Rank 0] step:4281/10000 train_time:322302ms step_avg:75.29ms +[2025-09-02 14:01:37] [Rank 0] step:4301/10000 train_time:323865ms step_avg:75.30ms +[2025-09-02 14:01:37] [Rank 0] step:4301/10000 train_time:323865ms step_avg:75.30ms +[2025-09-02 14:01:38] [Rank 0] step:4321/10000 train_time:325431ms step_avg:75.31ms +[2025-09-02 14:01:38] [Rank 0] step:4321/10000 train_time:325431ms step_avg:75.31ms +[2025-09-02 14:01:40] [Rank 0] step:4341/10000 train_time:326994ms step_avg:75.33ms +[2025-09-02 14:01:40] [Rank 0] step:4341/10000 train_time:326994ms step_avg:75.33ms +[2025-09-02 14:01:41] [Rank 0] step:4361/10000 train_time:328560ms step_avg:75.34ms +[2025-09-02 14:01:41] [Rank 0] step:4361/10000 train_time:328560ms step_avg:75.34ms +[2025-09-02 14:01:43] [Rank 0] step:4381/10000 train_time:330124ms step_avg:75.35ms +[2025-09-02 14:01:43] [Rank 0] step:4381/10000 train_time:330124ms step_avg:75.35ms +[2025-09-02 14:01:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:01:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:01:56] [Rank 0] PRINT: step:4400/10000 val_loss:3.9677 svd_entropy: attn_qk:H=0.7754,top10E=0.25,eRank=189.8,q75/q25=55.31 attn_vo:H=0.8071,top10E=0.13,eRank=288.2,q75/q25=inf mlp_w1:H=0.8970,top10E=0.15,eRank=391.3,q75/q25=4.92 mlp_w2:H=0.9709,top10E=0.04,eRank=633.0,q75/q25=2.90 vo_prod:H=0.6727,top10E=0.21,eRank=126.8,q75/q25=inf train_time:331768ms step_avg:75.40ms +[2025-09-02 14:01:56] [Rank 0] PRINT: step:4400/10000 val_loss:3.9677 svd_entropy: attn_qk:H=0.7754,top10E=0.25,eRank=189.8,q75/q25=55.31 attn_vo:H=0.8071,top10E=0.13,eRank=288.2,q75/q25=inf mlp_w1:H=0.8970,top10E=0.15,eRank=391.3,q75/q25=4.92 mlp_w2:H=0.9709,top10E=0.04,eRank=633.0,q75/q25=2.90 vo_prod:H=0.6727,top10E=0.21,eRank=126.8,q75/q25=inf train_time:331768ms step_avg:75.40ms +[2025-09-02 14:01:56] [Rank 0] step:4401/10000 train_time:331784ms step_avg:75.39ms +[2025-09-02 14:01:56] [Rank 0] step:4401/10000 train_time:331784ms step_avg:75.39ms +[2025-09-02 14:01:58] [Rank 0] step:4421/10000 train_time:333295ms step_avg:75.39ms +[2025-09-02 14:01:58] [Rank 0] step:4421/10000 train_time:333295ms step_avg:75.39ms +[2025-09-02 14:01:59] [Rank 0] step:4441/10000 train_time:334856ms step_avg:75.40ms +[2025-09-02 14:01:59] [Rank 0] step:4441/10000 train_time:334856ms step_avg:75.40ms +[2025-09-02 14:02:01] [Rank 0] step:4461/10000 train_time:336424ms step_avg:75.41ms +[2025-09-02 14:02:01] [Rank 0] step:4461/10000 train_time:336424ms step_avg:75.41ms +[2025-09-02 14:02:03] [Rank 0] step:4481/10000 train_time:337994ms step_avg:75.43ms +[2025-09-02 14:02:03] [Rank 0] step:4481/10000 train_time:337994ms step_avg:75.43ms +[2025-09-02 14:02:04] [Rank 0] step:4501/10000 train_time:339564ms step_avg:75.44ms +[2025-09-02 14:02:04] [Rank 0] step:4501/10000 train_time:339564ms step_avg:75.44ms +[2025-09-02 14:02:06] [Rank 0] step:4521/10000 train_time:341132ms step_avg:75.46ms +[2025-09-02 14:02:06] [Rank 0] step:4521/10000 train_time:341132ms step_avg:75.46ms +[2025-09-02 14:02:07] [Rank 0] step:4541/10000 train_time:342705ms step_avg:75.47ms +[2025-09-02 14:02:07] [Rank 0] step:4541/10000 train_time:342705ms step_avg:75.47ms +[2025-09-02 14:02:09] [Rank 0] step:4561/10000 train_time:344276ms step_avg:75.48ms +[2025-09-02 14:02:09] [Rank 0] step:4561/10000 train_time:344276ms step_avg:75.48ms +[2025-09-02 14:02:10] [Rank 0] step:4581/10000 train_time:345851ms step_avg:75.50ms +[2025-09-02 14:02:10] [Rank 0] step:4581/10000 train_time:345851ms step_avg:75.50ms +[2025-09-02 14:02:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:02:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:02:24] [Rank 0] PRINT: step:4600/10000 val_loss:3.9430 svd_entropy: attn_qk:H=0.7770,top10E=0.25,eRank=191.4,q75/q25=54.90 attn_vo:H=0.8088,top10E=0.13,eRank=290.8,q75/q25=inf mlp_w1:H=0.8990,top10E=0.15,eRank=396.2,q75/q25=4.85 mlp_w2:H=0.9709,top10E=0.04,eRank=633.2,q75/q25=2.89 vo_prod:H=0.6750,top10E=0.21,eRank=129.1,q75/q25=inf train_time:347503ms step_avg:75.54ms +[2025-09-02 14:02:24] [Rank 0] PRINT: step:4600/10000 val_loss:3.9430 svd_entropy: attn_qk:H=0.7770,top10E=0.25,eRank=191.4,q75/q25=54.90 attn_vo:H=0.8088,top10E=0.13,eRank=290.8,q75/q25=inf mlp_w1:H=0.8990,top10E=0.15,eRank=396.2,q75/q25=4.85 mlp_w2:H=0.9709,top10E=0.04,eRank=633.2,q75/q25=2.89 vo_prod:H=0.6750,top10E=0.21,eRank=129.1,q75/q25=inf train_time:347503ms step_avg:75.54ms +[2025-09-02 14:02:24] [Rank 0] step:4601/10000 train_time:347519ms step_avg:75.53ms +[2025-09-02 14:02:24] [Rank 0] step:4601/10000 train_time:347519ms step_avg:75.53ms +[2025-09-02 14:02:25] [Rank 0] step:4621/10000 train_time:349012ms step_avg:75.53ms +[2025-09-02 14:02:25] [Rank 0] step:4621/10000 train_time:349012ms step_avg:75.53ms +[2025-09-02 14:02:27] [Rank 0] step:4641/10000 train_time:350584ms step_avg:75.54ms +[2025-09-02 14:02:27] [Rank 0] step:4641/10000 train_time:350584ms step_avg:75.54ms +[2025-09-02 14:02:29] [Rank 0] step:4661/10000 train_time:352157ms step_avg:75.55ms +[2025-09-02 14:02:29] [Rank 0] step:4661/10000 train_time:352157ms step_avg:75.55ms +[2025-09-02 14:02:30] [Rank 0] step:4681/10000 train_time:353731ms step_avg:75.57ms +[2025-09-02 14:02:30] [Rank 0] step:4681/10000 train_time:353731ms step_avg:75.57ms +[2025-09-02 14:02:32] [Rank 0] step:4701/10000 train_time:355305ms step_avg:75.58ms +[2025-09-02 14:02:32] [Rank 0] step:4701/10000 train_time:355305ms step_avg:75.58ms +[2025-09-02 14:02:33] [Rank 0] step:4721/10000 train_time:356879ms step_avg:75.59ms +[2025-09-02 14:02:33] [Rank 0] step:4721/10000 train_time:356879ms step_avg:75.59ms +[2025-09-02 14:02:35] [Rank 0] step:4741/10000 train_time:358452ms step_avg:75.61ms +[2025-09-02 14:02:35] [Rank 0] step:4741/10000 train_time:358452ms step_avg:75.61ms +[2025-09-02 14:02:36] [Rank 0] step:4761/10000 train_time:360025ms step_avg:75.62ms +[2025-09-02 14:02:36] [Rank 0] step:4761/10000 train_time:360025ms step_avg:75.62ms +[2025-09-02 14:02:38] [Rank 0] step:4781/10000 train_time:361597ms step_avg:75.63ms +[2025-09-02 14:02:38] [Rank 0] step:4781/10000 train_time:361597ms step_avg:75.63ms +[2025-09-02 14:02:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:02:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:02:51] [Rank 0] PRINT: step:4800/10000 val_loss:3.9301 svd_entropy: attn_qk:H=0.7785,top10E=0.25,eRank=192.8,q75/q25=54.87 attn_vo:H=0.8104,top10E=0.13,eRank=293.3,q75/q25=inf mlp_w1:H=0.9007,top10E=0.15,eRank=400.7,q75/q25=4.79 mlp_w2:H=0.9710,top10E=0.04,eRank=633.3,q75/q25=2.89 vo_prod:H=0.6773,top10E=0.20,eRank=131.2,q75/q25=inf train_time:363253ms step_avg:75.68ms +[2025-09-02 14:02:51] [Rank 0] PRINT: step:4800/10000 val_loss:3.9301 svd_entropy: attn_qk:H=0.7785,top10E=0.25,eRank=192.8,q75/q25=54.87 attn_vo:H=0.8104,top10E=0.13,eRank=293.3,q75/q25=inf mlp_w1:H=0.9007,top10E=0.15,eRank=400.7,q75/q25=4.79 mlp_w2:H=0.9710,top10E=0.04,eRank=633.3,q75/q25=2.89 vo_prod:H=0.6773,top10E=0.20,eRank=131.2,q75/q25=inf train_time:363253ms step_avg:75.68ms +[2025-09-02 14:02:51] [Rank 0] step:4801/10000 train_time:363268ms step_avg:75.67ms +[2025-09-02 14:02:51] [Rank 0] step:4801/10000 train_time:363268ms step_avg:75.67ms +[2025-09-02 14:02:53] [Rank 0] step:4821/10000 train_time:364783ms step_avg:75.67ms +[2025-09-02 14:02:53] [Rank 0] step:4821/10000 train_time:364783ms step_avg:75.67ms +[2025-09-02 14:02:55] [Rank 0] step:4841/10000 train_time:366349ms step_avg:75.68ms +[2025-09-02 14:02:55] [Rank 0] step:4841/10000 train_time:366349ms step_avg:75.68ms +[2025-09-02 14:02:56] [Rank 0] step:4861/10000 train_time:367921ms step_avg:75.69ms +[2025-09-02 14:02:56] [Rank 0] step:4861/10000 train_time:367921ms step_avg:75.69ms +[2025-09-02 14:02:58] [Rank 0] step:4881/10000 train_time:369489ms step_avg:75.70ms +[2025-09-02 14:02:58] [Rank 0] step:4881/10000 train_time:369489ms step_avg:75.70ms +[2025-09-02 14:02:59] [Rank 0] step:4901/10000 train_time:371057ms step_avg:75.71ms +[2025-09-02 14:02:59] [Rank 0] step:4901/10000 train_time:371057ms step_avg:75.71ms +[2025-09-02 14:03:01] [Rank 0] step:4921/10000 train_time:372630ms step_avg:75.72ms +[2025-09-02 14:03:01] [Rank 0] step:4921/10000 train_time:372630ms step_avg:75.72ms +[2025-09-02 14:03:02] [Rank 0] step:4941/10000 train_time:374203ms step_avg:75.73ms +[2025-09-02 14:03:02] [Rank 0] step:4941/10000 train_time:374203ms step_avg:75.73ms +[2025-09-02 14:03:04] [Rank 0] step:4961/10000 train_time:375771ms step_avg:75.74ms +[2025-09-02 14:03:04] [Rank 0] step:4961/10000 train_time:375771ms step_avg:75.74ms +[2025-09-02 14:03:06] [Rank 0] step:4981/10000 train_time:377343ms step_avg:75.76ms +[2025-09-02 14:03:06] [Rank 0] step:4981/10000 train_time:377343ms step_avg:75.76ms +[2025-09-02 14:03:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:03:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:03:19] [Rank 0] PRINT: step:5000/10000 val_loss:3.9081 svd_entropy: attn_qk:H=0.7800,top10E=0.25,eRank=194.3,q75/q25=54.33 attn_vo:H=0.8117,top10E=0.13,eRank=295.4,q75/q25=inf mlp_w1:H=0.9022,top10E=0.14,eRank=404.7,q75/q25=4.75 mlp_w2:H=0.9710,top10E=0.04,eRank=633.4,q75/q25=2.89 vo_prod:H=0.6792,top10E=0.20,eRank=133.0,q75/q25=inf train_time:378996ms step_avg:75.80ms +[2025-09-02 14:03:19] [Rank 0] PRINT: step:5000/10000 val_loss:3.9081 svd_entropy: attn_qk:H=0.7800,top10E=0.25,eRank=194.3,q75/q25=54.33 attn_vo:H=0.8117,top10E=0.13,eRank=295.4,q75/q25=inf mlp_w1:H=0.9022,top10E=0.14,eRank=404.7,q75/q25=4.75 mlp_w2:H=0.9710,top10E=0.04,eRank=633.4,q75/q25=2.89 vo_prod:H=0.6792,top10E=0.20,eRank=133.0,q75/q25=inf train_time:378996ms step_avg:75.80ms +[2025-09-02 14:03:19] [Rank 0] step:5001/10000 train_time:379012ms step_avg:75.79ms +[2025-09-02 14:03:19] [Rank 0] step:5001/10000 train_time:379012ms step_avg:75.79ms +[2025-09-02 14:03:21] [Rank 0] step:5021/10000 train_time:380611ms step_avg:75.80ms +[2025-09-02 14:03:21] [Rank 0] step:5021/10000 train_time:380611ms step_avg:75.80ms +[2025-09-02 14:03:22] [Rank 0] step:5041/10000 train_time:382180ms step_avg:75.81ms +[2025-09-02 14:03:22] [Rank 0] step:5041/10000 train_time:382180ms step_avg:75.81ms +[2025-09-02 14:03:24] [Rank 0] step:5061/10000 train_time:383747ms step_avg:75.82ms +[2025-09-02 14:03:24] [Rank 0] step:5061/10000 train_time:383747ms step_avg:75.82ms +[2025-09-02 14:03:25] [Rank 0] step:5081/10000 train_time:385316ms step_avg:75.83ms +[2025-09-02 14:03:25] [Rank 0] step:5081/10000 train_time:385316ms step_avg:75.83ms +[2025-09-02 14:03:27] [Rank 0] step:5101/10000 train_time:386888ms step_avg:75.85ms +[2025-09-02 14:03:27] [Rank 0] step:5101/10000 train_time:386888ms step_avg:75.85ms +[2025-09-02 14:03:29] [Rank 0] step:5121/10000 train_time:388458ms step_avg:75.86ms +[2025-09-02 14:03:29] [Rank 0] step:5121/10000 train_time:388458ms step_avg:75.86ms +[2025-09-02 14:03:30] [Rank 0] step:5141/10000 train_time:390031ms step_avg:75.87ms +[2025-09-02 14:03:30] [Rank 0] step:5141/10000 train_time:390031ms step_avg:75.87ms +[2025-09-02 14:03:32] [Rank 0] step:5161/10000 train_time:391603ms step_avg:75.88ms +[2025-09-02 14:03:32] [Rank 0] step:5161/10000 train_time:391603ms step_avg:75.88ms +[2025-09-02 14:03:33] [Rank 0] step:5181/10000 train_time:393176ms step_avg:75.89ms +[2025-09-02 14:03:33] [Rank 0] step:5181/10000 train_time:393176ms step_avg:75.89ms +[2025-09-02 14:03:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:03:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:03:47] [Rank 0] PRINT: step:5200/10000 val_loss:3.8900 svd_entropy: attn_qk:H=0.7812,top10E=0.24,eRank=195.5,q75/q25=53.92 attn_vo:H=0.8130,top10E=0.12,eRank=297.5,q75/q25=inf mlp_w1:H=0.9037,top10E=0.14,eRank=408.6,q75/q25=4.70 mlp_w2:H=0.9710,top10E=0.04,eRank=633.4,q75/q25=2.89 vo_prod:H=0.6809,top10E=0.20,eRank=134.7,q75/q25=inf train_time:394853ms step_avg:75.93ms +[2025-09-02 14:03:47] [Rank 0] PRINT: step:5200/10000 val_loss:3.8900 svd_entropy: attn_qk:H=0.7812,top10E=0.24,eRank=195.5,q75/q25=53.92 attn_vo:H=0.8130,top10E=0.12,eRank=297.5,q75/q25=inf mlp_w1:H=0.9037,top10E=0.14,eRank=408.6,q75/q25=4.70 mlp_w2:H=0.9710,top10E=0.04,eRank=633.4,q75/q25=2.89 vo_prod:H=0.6809,top10E=0.20,eRank=134.7,q75/q25=inf train_time:394853ms step_avg:75.93ms +[2025-09-02 14:03:47] [Rank 0] step:5201/10000 train_time:394872ms step_avg:75.92ms +[2025-09-02 14:03:47] [Rank 0] step:5201/10000 train_time:394872ms step_avg:75.92ms +[2025-09-02 14:03:48] [Rank 0] step:5221/10000 train_time:396392ms step_avg:75.92ms +[2025-09-02 14:03:48] [Rank 0] step:5221/10000 train_time:396392ms step_avg:75.92ms +[2025-09-02 14:03:50] [Rank 0] step:5241/10000 train_time:397995ms step_avg:75.94ms +[2025-09-02 14:03:50] [Rank 0] step:5241/10000 train_time:397995ms step_avg:75.94ms +[2025-09-02 14:03:51] [Rank 0] step:5261/10000 train_time:399596ms step_avg:75.95ms +[2025-09-02 14:03:51] [Rank 0] step:5261/10000 train_time:399596ms step_avg:75.95ms +[2025-09-02 14:03:53] [Rank 0] step:5281/10000 train_time:401201ms step_avg:75.97ms +[2025-09-02 14:03:53] [Rank 0] step:5281/10000 train_time:401201ms step_avg:75.97ms +[2025-09-02 14:03:55] [Rank 0] step:5301/10000 train_time:402811ms step_avg:75.99ms +[2025-09-02 14:03:55] [Rank 0] step:5301/10000 train_time:402811ms step_avg:75.99ms +[2025-09-02 14:03:56] [Rank 0] step:5321/10000 train_time:404414ms step_avg:76.00ms +[2025-09-02 14:03:56] [Rank 0] step:5321/10000 train_time:404414ms step_avg:76.00ms +[2025-09-02 14:03:58] [Rank 0] step:5341/10000 train_time:406015ms step_avg:76.02ms +[2025-09-02 14:03:58] [Rank 0] step:5341/10000 train_time:406015ms step_avg:76.02ms +[2025-09-02 14:04:00] [Rank 0] step:5361/10000 train_time:407621ms step_avg:76.03ms +[2025-09-02 14:04:00] [Rank 0] step:5361/10000 train_time:407621ms step_avg:76.03ms +[2025-09-02 14:04:01] [Rank 0] step:5381/10000 train_time:409227ms step_avg:76.05ms +[2025-09-02 14:04:01] [Rank 0] step:5381/10000 train_time:409227ms step_avg:76.05ms +[2025-09-02 14:04:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:04:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:04:14] [Rank 0] PRINT: step:5400/10000 val_loss:3.8727 svd_entropy: attn_qk:H=0.7823,top10E=0.24,eRank=196.6,q75/q25=53.71 attn_vo:H=0.8142,top10E=0.12,eRank=299.4,q75/q25=inf mlp_w1:H=0.9051,top10E=0.14,eRank=412.2,q75/q25=4.65 mlp_w2:H=0.9710,top10E=0.04,eRank=633.6,q75/q25=2.88 vo_prod:H=0.6826,top10E=0.20,eRank=136.3,q75/q25=inf train_time:410910ms step_avg:76.09ms +[2025-09-02 14:04:14] [Rank 0] PRINT: step:5400/10000 val_loss:3.8727 svd_entropy: attn_qk:H=0.7823,top10E=0.24,eRank=196.6,q75/q25=53.71 attn_vo:H=0.8142,top10E=0.12,eRank=299.4,q75/q25=inf mlp_w1:H=0.9051,top10E=0.14,eRank=412.2,q75/q25=4.65 mlp_w2:H=0.9710,top10E=0.04,eRank=633.6,q75/q25=2.88 vo_prod:H=0.6826,top10E=0.20,eRank=136.3,q75/q25=inf train_time:410910ms step_avg:76.09ms +[2025-09-02 14:04:15] [Rank 0] step:5401/10000 train_time:410926ms step_avg:76.08ms +[2025-09-02 14:04:15] [Rank 0] step:5401/10000 train_time:410926ms step_avg:76.08ms +[2025-09-02 14:04:16] [Rank 0] step:5421/10000 train_time:412469ms step_avg:76.09ms +[2025-09-02 14:04:16] [Rank 0] step:5421/10000 train_time:412469ms step_avg:76.09ms +[2025-09-02 14:04:18] [Rank 0] step:5441/10000 train_time:414065ms step_avg:76.10ms +[2025-09-02 14:04:18] [Rank 0] step:5441/10000 train_time:414065ms step_avg:76.10ms +[2025-09-02 14:04:19] [Rank 0] step:5461/10000 train_time:415672ms step_avg:76.12ms +[2025-09-02 14:04:19] [Rank 0] step:5461/10000 train_time:415672ms step_avg:76.12ms +[2025-09-02 14:04:21] [Rank 0] step:5481/10000 train_time:417275ms step_avg:76.13ms +[2025-09-02 14:04:21] [Rank 0] step:5481/10000 train_time:417275ms step_avg:76.13ms +[2025-09-02 14:04:23] [Rank 0] step:5501/10000 train_time:419015ms step_avg:76.17ms +[2025-09-02 14:04:23] [Rank 0] step:5501/10000 train_time:419015ms step_avg:76.17ms +[2025-09-02 14:04:24] [Rank 0] step:5521/10000 train_time:420492ms step_avg:76.16ms +[2025-09-02 14:04:24] [Rank 0] step:5521/10000 train_time:420492ms step_avg:76.16ms +[2025-09-02 14:04:26] [Rank 0] step:5541/10000 train_time:422096ms step_avg:76.18ms +[2025-09-02 14:04:26] [Rank 0] step:5541/10000 train_time:422096ms step_avg:76.18ms +[2025-09-02 14:04:27] [Rank 0] step:5561/10000 train_time:423706ms step_avg:76.19ms +[2025-09-02 14:04:27] [Rank 0] step:5561/10000 train_time:423706ms step_avg:76.19ms +[2025-09-02 14:04:29] [Rank 0] step:5581/10000 train_time:425309ms step_avg:76.21ms +[2025-09-02 14:04:29] [Rank 0] step:5581/10000 train_time:425309ms step_avg:76.21ms +[2025-09-02 14:04:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:04:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:04:42] [Rank 0] PRINT: step:5600/10000 val_loss:3.8590 svd_entropy: attn_qk:H=0.7835,top10E=0.24,eRank=197.9,q75/q25=53.28 attn_vo:H=0.8153,top10E=0.12,eRank=301.1,q75/q25=inf mlp_w1:H=0.9063,top10E=0.14,eRank=415.5,q75/q25=4.62 mlp_w2:H=0.9710,top10E=0.04,eRank=633.7,q75/q25=2.87 vo_prod:H=0.6841,top10E=0.20,eRank=137.8,q75/q25=inf train_time:427000ms step_avg:76.25ms +[2025-09-02 14:04:42] [Rank 0] PRINT: step:5600/10000 val_loss:3.8590 svd_entropy: attn_qk:H=0.7835,top10E=0.24,eRank=197.9,q75/q25=53.28 attn_vo:H=0.8153,top10E=0.12,eRank=301.1,q75/q25=inf mlp_w1:H=0.9063,top10E=0.14,eRank=415.5,q75/q25=4.62 mlp_w2:H=0.9710,top10E=0.04,eRank=633.7,q75/q25=2.87 vo_prod:H=0.6841,top10E=0.20,eRank=137.8,q75/q25=inf train_time:427000ms step_avg:76.25ms +[2025-09-02 14:04:43] [Rank 0] step:5601/10000 train_time:427016ms step_avg:76.24ms +[2025-09-02 14:04:43] [Rank 0] step:5601/10000 train_time:427016ms step_avg:76.24ms +[2025-09-02 14:04:44] [Rank 0] step:5621/10000 train_time:428542ms step_avg:76.24ms +[2025-09-02 14:04:44] [Rank 0] step:5621/10000 train_time:428542ms step_avg:76.24ms +[2025-09-02 14:04:46] [Rank 0] step:5641/10000 train_time:430146ms step_avg:76.25ms +[2025-09-02 14:04:46] [Rank 0] step:5641/10000 train_time:430146ms step_avg:76.25ms +[2025-09-02 14:04:47] [Rank 0] step:5661/10000 train_time:431748ms step_avg:76.27ms +[2025-09-02 14:04:47] [Rank 0] step:5661/10000 train_time:431748ms step_avg:76.27ms +[2025-09-02 14:04:49] [Rank 0] step:5681/10000 train_time:433360ms step_avg:76.28ms +[2025-09-02 14:04:49] [Rank 0] step:5681/10000 train_time:433360ms step_avg:76.28ms +[2025-09-02 14:04:51] [Rank 0] step:5701/10000 train_time:434965ms step_avg:76.30ms +[2025-09-02 14:04:51] [Rank 0] step:5701/10000 train_time:434965ms step_avg:76.30ms +[2025-09-02 14:04:52] [Rank 0] step:5721/10000 train_time:436573ms step_avg:76.31ms +[2025-09-02 14:04:52] [Rank 0] step:5721/10000 train_time:436573ms step_avg:76.31ms +[2025-09-02 14:04:54] [Rank 0] step:5741/10000 train_time:438180ms step_avg:76.32ms +[2025-09-02 14:04:54] [Rank 0] step:5741/10000 train_time:438180ms step_avg:76.32ms +[2025-09-02 14:04:55] [Rank 0] step:5761/10000 train_time:439789ms step_avg:76.34ms +[2025-09-02 14:04:55] [Rank 0] step:5761/10000 train_time:439789ms step_avg:76.34ms +[2025-09-02 14:04:57] [Rank 0] step:5781/10000 train_time:441399ms step_avg:76.35ms +[2025-09-02 14:04:57] [Rank 0] step:5781/10000 train_time:441399ms step_avg:76.35ms +[2025-09-02 14:04:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:04:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:05:10] [Rank 0] PRINT: step:5800/10000 val_loss:3.8499 svd_entropy: attn_qk:H=0.7847,top10E=0.24,eRank=199.1,q75/q25=53.09 attn_vo:H=0.8163,top10E=0.12,eRank=302.9,q75/q25=inf mlp_w1:H=0.9075,top10E=0.14,eRank=418.6,q75/q25=4.58 mlp_w2:H=0.9711,top10E=0.04,eRank=633.7,q75/q25=2.87 vo_prod:H=0.6856,top10E=0.19,eRank=139.3,q75/q25=inf train_time:443092ms step_avg:76.40ms +[2025-09-02 14:05:10] [Rank 0] PRINT: step:5800/10000 val_loss:3.8499 svd_entropy: attn_qk:H=0.7847,top10E=0.24,eRank=199.1,q75/q25=53.09 attn_vo:H=0.8163,top10E=0.12,eRank=302.9,q75/q25=inf mlp_w1:H=0.9075,top10E=0.14,eRank=418.6,q75/q25=4.58 mlp_w2:H=0.9711,top10E=0.04,eRank=633.7,q75/q25=2.87 vo_prod:H=0.6856,top10E=0.19,eRank=139.3,q75/q25=inf train_time:443092ms step_avg:76.40ms +[2025-09-02 14:05:11] [Rank 0] step:5801/10000 train_time:443108ms step_avg:76.38ms +[2025-09-02 14:05:11] [Rank 0] step:5801/10000 train_time:443108ms step_avg:76.38ms +[2025-09-02 14:05:12] [Rank 0] step:5821/10000 train_time:444630ms step_avg:76.38ms +[2025-09-02 14:05:12] [Rank 0] step:5821/10000 train_time:444630ms step_avg:76.38ms +[2025-09-02 14:05:14] [Rank 0] step:5841/10000 train_time:446230ms step_avg:76.40ms +[2025-09-02 14:05:14] [Rank 0] step:5841/10000 train_time:446230ms step_avg:76.40ms +[2025-09-02 14:05:15] [Rank 0] step:5861/10000 train_time:447834ms step_avg:76.41ms +[2025-09-02 14:05:15] [Rank 0] step:5861/10000 train_time:447834ms step_avg:76.41ms +[2025-09-02 14:05:17] [Rank 0] step:5881/10000 train_time:449441ms step_avg:76.42ms +[2025-09-02 14:05:17] [Rank 0] step:5881/10000 train_time:449441ms step_avg:76.42ms +[2025-09-02 14:05:19] [Rank 0] step:5901/10000 train_time:451045ms step_avg:76.44ms +[2025-09-02 14:05:19] [Rank 0] step:5901/10000 train_time:451045ms step_avg:76.44ms +[2025-09-02 14:05:20] [Rank 0] step:5921/10000 train_time:452650ms step_avg:76.45ms +[2025-09-02 14:05:20] [Rank 0] step:5921/10000 train_time:452650ms step_avg:76.45ms +[2025-09-02 14:05:22] [Rank 0] step:5941/10000 train_time:454262ms step_avg:76.46ms +[2025-09-02 14:05:22] [Rank 0] step:5941/10000 train_time:454262ms step_avg:76.46ms +[2025-09-02 14:05:23] [Rank 0] step:5961/10000 train_time:455870ms step_avg:76.48ms +[2025-09-02 14:05:23] [Rank 0] step:5961/10000 train_time:455870ms step_avg:76.48ms +[2025-09-02 14:05:25] [Rank 0] step:5981/10000 train_time:457479ms step_avg:76.49ms +[2025-09-02 14:05:25] [Rank 0] step:5981/10000 train_time:457479ms step_avg:76.49ms +[2025-09-02 14:05:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:05:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:05:38] [Rank 0] PRINT: step:6000/10000 val_loss:3.8250 svd_entropy: attn_qk:H=0.7858,top10E=0.24,eRank=200.3,q75/q25=52.87 attn_vo:H=0.8173,top10E=0.12,eRank=304.5,q75/q25=inf mlp_w1:H=0.9086,top10E=0.14,eRank=421.8,q75/q25=4.54 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.87 vo_prod:H=0.6869,top10E=0.19,eRank=140.6,q75/q25=inf train_time:459162ms step_avg:76.53ms +[2025-09-02 14:05:38] [Rank 0] PRINT: step:6000/10000 val_loss:3.8250 svd_entropy: attn_qk:H=0.7858,top10E=0.24,eRank=200.3,q75/q25=52.87 attn_vo:H=0.8173,top10E=0.12,eRank=304.5,q75/q25=inf mlp_w1:H=0.9086,top10E=0.14,eRank=421.8,q75/q25=4.54 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.87 vo_prod:H=0.6869,top10E=0.19,eRank=140.6,q75/q25=inf train_time:459162ms step_avg:76.53ms +[2025-09-02 14:05:38] [Rank 0] step:6001/10000 train_time:459178ms step_avg:76.52ms +[2025-09-02 14:05:38] [Rank 0] step:6001/10000 train_time:459178ms step_avg:76.52ms +[2025-09-02 14:05:40] [Rank 0] step:6021/10000 train_time:460720ms step_avg:76.52ms +[2025-09-02 14:05:40] [Rank 0] step:6021/10000 train_time:460720ms step_avg:76.52ms +[2025-09-02 14:05:42] [Rank 0] step:6041/10000 train_time:462327ms step_avg:76.53ms +[2025-09-02 14:05:42] [Rank 0] step:6041/10000 train_time:462327ms step_avg:76.53ms +[2025-09-02 14:05:43] [Rank 0] step:6061/10000 train_time:463935ms step_avg:76.54ms +[2025-09-02 14:05:43] [Rank 0] step:6061/10000 train_time:463935ms step_avg:76.54ms +[2025-09-02 14:05:45] [Rank 0] step:6081/10000 train_time:465544ms step_avg:76.56ms +[2025-09-02 14:05:45] [Rank 0] step:6081/10000 train_time:465544ms step_avg:76.56ms +[2025-09-02 14:05:46] [Rank 0] step:6101/10000 train_time:467150ms step_avg:76.57ms +[2025-09-02 14:05:46] [Rank 0] step:6101/10000 train_time:467150ms step_avg:76.57ms +[2025-09-02 14:05:48] [Rank 0] step:6121/10000 train_time:469029ms step_avg:76.63ms +[2025-09-02 14:05:48] [Rank 0] step:6121/10000 train_time:469029ms step_avg:76.63ms +[2025-09-02 14:05:50] [Rank 0] step:6141/10000 train_time:470649ms step_avg:76.64ms +[2025-09-02 14:05:50] [Rank 0] step:6141/10000 train_time:470649ms step_avg:76.64ms +[2025-09-02 14:05:52] [Rank 0] step:6161/10000 train_time:472258ms step_avg:76.65ms +[2025-09-02 14:05:52] [Rank 0] step:6161/10000 train_time:472258ms step_avg:76.65ms +[2025-09-02 14:05:53] [Rank 0] step:6181/10000 train_time:473865ms step_avg:76.66ms +[2025-09-02 14:05:53] [Rank 0] step:6181/10000 train_time:473865ms step_avg:76.66ms +[2025-09-02 14:05:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:05:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:06:07] [Rank 0] PRINT: step:6200/10000 val_loss:3.8100 svd_entropy: attn_qk:H=0.7867,top10E=0.24,eRank=201.2,q75/q25=52.62 attn_vo:H=0.8183,top10E=0.12,eRank=306.2,q75/q25=inf mlp_w1:H=0.9097,top10E=0.14,eRank=424.6,q75/q25=4.51 mlp_w2:H=0.9711,top10E=0.04,eRank=633.9,q75/q25=2.87 vo_prod:H=0.6883,top10E=0.19,eRank=142.0,q75/q25=inf train_time:475556ms step_avg:76.70ms +[2025-09-02 14:06:07] [Rank 0] PRINT: step:6200/10000 val_loss:3.8100 svd_entropy: attn_qk:H=0.7867,top10E=0.24,eRank=201.2,q75/q25=52.62 attn_vo:H=0.8183,top10E=0.12,eRank=306.2,q75/q25=inf mlp_w1:H=0.9097,top10E=0.14,eRank=424.6,q75/q25=4.51 mlp_w2:H=0.9711,top10E=0.04,eRank=633.9,q75/q25=2.87 vo_prod:H=0.6883,top10E=0.19,eRank=142.0,q75/q25=inf train_time:475556ms step_avg:76.70ms +[2025-09-02 14:06:07] [Rank 0] step:6201/10000 train_time:475571ms step_avg:76.69ms +[2025-09-02 14:06:07] [Rank 0] step:6201/10000 train_time:475571ms step_avg:76.69ms +[2025-09-02 14:06:08] [Rank 0] step:6221/10000 train_time:477109ms step_avg:76.69ms +[2025-09-02 14:06:08] [Rank 0] step:6221/10000 train_time:477109ms step_avg:76.69ms +[2025-09-02 14:06:10] [Rank 0] step:6241/10000 train_time:478711ms step_avg:76.70ms +[2025-09-02 14:06:10] [Rank 0] step:6241/10000 train_time:478711ms step_avg:76.70ms +[2025-09-02 14:06:11] [Rank 0] step:6261/10000 train_time:480317ms step_avg:76.72ms +[2025-09-02 14:06:11] [Rank 0] step:6261/10000 train_time:480317ms step_avg:76.72ms +[2025-09-02 14:06:13] [Rank 0] step:6281/10000 train_time:481928ms step_avg:76.73ms +[2025-09-02 14:06:13] [Rank 0] step:6281/10000 train_time:481928ms step_avg:76.73ms +[2025-09-02 14:06:15] [Rank 0] step:6301/10000 train_time:483537ms step_avg:76.74ms +[2025-09-02 14:06:15] [Rank 0] step:6301/10000 train_time:483537ms step_avg:76.74ms +[2025-09-02 14:06:16] [Rank 0] step:6321/10000 train_time:485146ms step_avg:76.75ms +[2025-09-02 14:06:16] [Rank 0] step:6321/10000 train_time:485146ms step_avg:76.75ms +[2025-09-02 14:06:18] [Rank 0] step:6341/10000 train_time:486758ms step_avg:76.76ms +[2025-09-02 14:06:18] [Rank 0] step:6341/10000 train_time:486758ms step_avg:76.76ms +[2025-09-02 14:06:20] [Rank 0] step:6361/10000 train_time:488372ms step_avg:76.78ms +[2025-09-02 14:06:20] [Rank 0] step:6361/10000 train_time:488372ms step_avg:76.78ms +[2025-09-02 14:06:21] [Rank 0] step:6381/10000 train_time:489984ms step_avg:76.79ms +[2025-09-02 14:06:21] [Rank 0] step:6381/10000 train_time:489984ms step_avg:76.79ms +[2025-09-02 14:06:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:06:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:06:35] [Rank 0] PRINT: step:6400/10000 val_loss:3.7936 svd_entropy: attn_qk:H=0.7877,top10E=0.24,eRank=202.3,q75/q25=51.97 attn_vo:H=0.8191,top10E=0.12,eRank=307.7,q75/q25=inf mlp_w1:H=0.9105,top10E=0.13,eRank=427.0,q75/q25=4.48 mlp_w2:H=0.9711,top10E=0.04,eRank=633.9,q75/q25=2.87 vo_prod:H=0.6895,top10E=0.19,eRank=143.2,q75/q25=inf train_time:491675ms step_avg:76.82ms +[2025-09-02 14:06:35] [Rank 0] PRINT: step:6400/10000 val_loss:3.7936 svd_entropy: attn_qk:H=0.7877,top10E=0.24,eRank=202.3,q75/q25=51.97 attn_vo:H=0.8191,top10E=0.12,eRank=307.7,q75/q25=inf mlp_w1:H=0.9105,top10E=0.13,eRank=427.0,q75/q25=4.48 mlp_w2:H=0.9711,top10E=0.04,eRank=633.9,q75/q25=2.87 vo_prod:H=0.6895,top10E=0.19,eRank=143.2,q75/q25=inf train_time:491675ms step_avg:76.82ms +[2025-09-02 14:06:35] [Rank 0] step:6401/10000 train_time:491691ms step_avg:76.81ms +[2025-09-02 14:06:35] [Rank 0] step:6401/10000 train_time:491691ms step_avg:76.81ms +[2025-09-02 14:06:36] [Rank 0] step:6421/10000 train_time:493232ms step_avg:76.82ms +[2025-09-02 14:06:36] [Rank 0] step:6421/10000 train_time:493232ms step_avg:76.82ms +[2025-09-02 14:06:38] [Rank 0] step:6441/10000 train_time:494841ms step_avg:76.83ms +[2025-09-02 14:06:38] [Rank 0] step:6441/10000 train_time:494841ms step_avg:76.83ms +[2025-09-02 14:06:39] [Rank 0] step:6461/10000 train_time:496456ms step_avg:76.84ms +[2025-09-02 14:06:39] [Rank 0] step:6461/10000 train_time:496456ms step_avg:76.84ms +[2025-09-02 14:06:41] [Rank 0] step:6481/10000 train_time:498071ms step_avg:76.85ms +[2025-09-02 14:06:41] [Rank 0] step:6481/10000 train_time:498071ms step_avg:76.85ms +[2025-09-02 14:06:43] [Rank 0] step:6501/10000 train_time:499678ms step_avg:76.86ms +[2025-09-02 14:06:43] [Rank 0] step:6501/10000 train_time:499678ms step_avg:76.86ms +[2025-09-02 14:06:44] [Rank 0] step:6521/10000 train_time:501284ms step_avg:76.87ms +[2025-09-02 14:06:44] [Rank 0] step:6521/10000 train_time:501284ms step_avg:76.87ms +[2025-09-02 14:06:46] [Rank 0] step:6541/10000 train_time:502895ms step_avg:76.88ms +[2025-09-02 14:06:46] [Rank 0] step:6541/10000 train_time:502895ms step_avg:76.88ms +[2025-09-02 14:06:48] [Rank 0] step:6561/10000 train_time:504511ms step_avg:76.90ms +[2025-09-02 14:06:48] [Rank 0] step:6561/10000 train_time:504511ms step_avg:76.90ms +[2025-09-02 14:06:49] [Rank 0] step:6581/10000 train_time:506118ms step_avg:76.91ms +[2025-09-02 14:06:49] [Rank 0] step:6581/10000 train_time:506118ms step_avg:76.91ms +[2025-09-02 14:06:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:06:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:07:03] [Rank 0] PRINT: step:6600/10000 val_loss:3.7794 svd_entropy: attn_qk:H=0.7886,top10E=0.24,eRank=203.2,q75/q25=52.04 attn_vo:H=0.8199,top10E=0.12,eRank=309.0,q75/q25=inf mlp_w1:H=0.9114,top10E=0.13,eRank=429.3,q75/q25=4.45 mlp_w2:H=0.9711,top10E=0.04,eRank=633.9,q75/q25=2.87 vo_prod:H=0.6906,top10E=0.19,eRank=144.4,q75/q25=inf train_time:507817ms step_avg:76.94ms +[2025-09-02 14:07:03] [Rank 0] PRINT: step:6600/10000 val_loss:3.7794 svd_entropy: attn_qk:H=0.7886,top10E=0.24,eRank=203.2,q75/q25=52.04 attn_vo:H=0.8199,top10E=0.12,eRank=309.0,q75/q25=inf mlp_w1:H=0.9114,top10E=0.13,eRank=429.3,q75/q25=4.45 mlp_w2:H=0.9711,top10E=0.04,eRank=633.9,q75/q25=2.87 vo_prod:H=0.6906,top10E=0.19,eRank=144.4,q75/q25=inf train_time:507817ms step_avg:76.94ms +[2025-09-02 14:07:03] [Rank 0] step:6601/10000 train_time:507833ms step_avg:76.93ms +[2025-09-02 14:07:03] [Rank 0] step:6601/10000 train_time:507833ms step_avg:76.93ms +[2025-09-02 14:07:04] [Rank 0] step:6621/10000 train_time:509369ms step_avg:76.93ms +[2025-09-02 14:07:04] [Rank 0] step:6621/10000 train_time:509369ms step_avg:76.93ms +[2025-09-02 14:07:06] [Rank 0] step:6641/10000 train_time:510987ms step_avg:76.94ms +[2025-09-02 14:07:06] [Rank 0] step:6641/10000 train_time:510987ms step_avg:76.94ms +[2025-09-02 14:07:08] [Rank 0] step:6661/10000 train_time:512597ms step_avg:76.95ms +[2025-09-02 14:07:08] [Rank 0] step:6661/10000 train_time:512597ms step_avg:76.95ms +[2025-09-02 14:07:09] [Rank 0] step:6681/10000 train_time:514225ms step_avg:76.97ms +[2025-09-02 14:07:09] [Rank 0] step:6681/10000 train_time:514225ms step_avg:76.97ms +[2025-09-02 14:07:11] [Rank 0] step:6701/10000 train_time:515872ms step_avg:76.98ms +[2025-09-02 14:07:11] [Rank 0] step:6701/10000 train_time:515872ms step_avg:76.98ms +[2025-09-02 14:07:12] [Rank 0] step:6721/10000 train_time:517515ms step_avg:77.00ms +[2025-09-02 14:07:12] [Rank 0] step:6721/10000 train_time:517515ms step_avg:77.00ms +[2025-09-02 14:07:14] [Rank 0] step:6741/10000 train_time:519155ms step_avg:77.01ms +[2025-09-02 14:07:14] [Rank 0] step:6741/10000 train_time:519155ms step_avg:77.01ms +[2025-09-02 14:07:16] [Rank 0] step:6761/10000 train_time:520793ms step_avg:77.03ms +[2025-09-02 14:07:16] [Rank 0] step:6761/10000 train_time:520793ms step_avg:77.03ms +[2025-09-02 14:07:17] [Rank 0] step:6781/10000 train_time:522439ms step_avg:77.04ms +[2025-09-02 14:07:17] [Rank 0] step:6781/10000 train_time:522439ms step_avg:77.04ms +[2025-09-02 14:07:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:07:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:07:31] [Rank 0] PRINT: step:6800/10000 val_loss:3.7628 svd_entropy: attn_qk:H=0.7892,top10E=0.24,eRank=203.9,q75/q25=51.53 attn_vo:H=0.8206,top10E=0.12,eRank=310.3,q75/q25=inf mlp_w1:H=0.9121,top10E=0.13,eRank=431.4,q75/q25=4.44 mlp_w2:H=0.9711,top10E=0.04,eRank=633.9,q75/q25=2.87 vo_prod:H=0.6918,top10E=0.19,eRank=145.6,q75/q25=inf train_time:524167ms step_avg:77.08ms +[2025-09-02 14:07:31] [Rank 0] PRINT: step:6800/10000 val_loss:3.7628 svd_entropy: attn_qk:H=0.7892,top10E=0.24,eRank=203.9,q75/q25=51.53 attn_vo:H=0.8206,top10E=0.12,eRank=310.3,q75/q25=inf mlp_w1:H=0.9121,top10E=0.13,eRank=431.4,q75/q25=4.44 mlp_w2:H=0.9711,top10E=0.04,eRank=633.9,q75/q25=2.87 vo_prod:H=0.6918,top10E=0.19,eRank=145.6,q75/q25=inf train_time:524167ms step_avg:77.08ms +[2025-09-02 14:07:31] [Rank 0] step:6801/10000 train_time:524184ms step_avg:77.07ms +[2025-09-02 14:07:31] [Rank 0] step:6801/10000 train_time:524184ms step_avg:77.07ms +[2025-09-02 14:07:33] [Rank 0] step:6821/10000 train_time:525797ms step_avg:77.08ms +[2025-09-02 14:07:33] [Rank 0] step:6821/10000 train_time:525797ms step_avg:77.08ms +[2025-09-02 14:07:34] [Rank 0] step:6841/10000 train_time:527427ms step_avg:77.10ms +[2025-09-02 14:07:34] [Rank 0] step:6841/10000 train_time:527427ms step_avg:77.10ms +[2025-09-02 14:07:36] [Rank 0] step:6861/10000 train_time:529062ms step_avg:77.11ms +[2025-09-02 14:07:36] [Rank 0] step:6861/10000 train_time:529062ms step_avg:77.11ms +[2025-09-02 14:07:38] [Rank 0] step:6881/10000 train_time:530698ms step_avg:77.13ms +[2025-09-02 14:07:38] [Rank 0] step:6881/10000 train_time:530698ms step_avg:77.13ms +[2025-09-02 14:07:39] [Rank 0] step:6901/10000 train_time:532335ms step_avg:77.14ms +[2025-09-02 14:07:39] [Rank 0] step:6901/10000 train_time:532335ms step_avg:77.14ms +[2025-09-02 14:07:41] [Rank 0] step:6921/10000 train_time:533973ms step_avg:77.15ms +[2025-09-02 14:07:41] [Rank 0] step:6921/10000 train_time:533973ms step_avg:77.15ms +[2025-09-02 14:07:43] [Rank 0] step:6941/10000 train_time:535617ms step_avg:77.17ms +[2025-09-02 14:07:43] [Rank 0] step:6941/10000 train_time:535617ms step_avg:77.17ms +[2025-09-02 14:07:44] [Rank 0] step:6961/10000 train_time:537269ms step_avg:77.18ms +[2025-09-02 14:07:44] [Rank 0] step:6961/10000 train_time:537269ms step_avg:77.18ms +[2025-09-02 14:07:46] [Rank 0] step:6981/10000 train_time:538913ms step_avg:77.20ms +[2025-09-02 14:07:46] [Rank 0] step:6981/10000 train_time:538913ms step_avg:77.20ms +[2025-09-02 14:07:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:07:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:07:59] [Rank 0] PRINT: step:7000/10000 val_loss:3.7484 svd_entropy: attn_qk:H=0.7899,top10E=0.23,eRank=204.6,q75/q25=51.55 attn_vo:H=0.8213,top10E=0.12,eRank=311.4,q75/q25=inf mlp_w1:H=0.9128,top10E=0.13,eRank=433.3,q75/q25=4.41 mlp_w2:H=0.9711,top10E=0.04,eRank=633.9,q75/q25=2.87 vo_prod:H=0.6927,top10E=0.19,eRank=146.5,q75/q25=inf train_time:540642ms step_avg:77.23ms +[2025-09-02 14:07:59] [Rank 0] PRINT: step:7000/10000 val_loss:3.7484 svd_entropy: attn_qk:H=0.7899,top10E=0.23,eRank=204.6,q75/q25=51.55 attn_vo:H=0.8213,top10E=0.12,eRank=311.4,q75/q25=inf mlp_w1:H=0.9128,top10E=0.13,eRank=433.3,q75/q25=4.41 mlp_w2:H=0.9711,top10E=0.04,eRank=633.9,q75/q25=2.87 vo_prod:H=0.6927,top10E=0.19,eRank=146.5,q75/q25=inf train_time:540642ms step_avg:77.23ms +[2025-09-02 14:07:59] [Rank 0] step:7001/10000 train_time:540657ms step_avg:77.23ms +[2025-09-02 14:07:59] [Rank 0] step:7001/10000 train_time:540657ms step_avg:77.23ms +[2025-09-02 14:08:01] [Rank 0] step:7021/10000 train_time:542216ms step_avg:77.23ms +[2025-09-02 14:08:01] [Rank 0] step:7021/10000 train_time:542216ms step_avg:77.23ms +[2025-09-02 14:08:03] [Rank 0] step:7041/10000 train_time:543852ms step_avg:77.24ms +[2025-09-02 14:08:03] [Rank 0] step:7041/10000 train_time:543852ms step_avg:77.24ms +[2025-09-02 14:08:04] [Rank 0] step:7061/10000 train_time:545487ms step_avg:77.25ms +[2025-09-02 14:08:04] [Rank 0] step:7061/10000 train_time:545487ms step_avg:77.25ms +[2025-09-02 14:08:06] [Rank 0] step:7081/10000 train_time:547121ms step_avg:77.27ms +[2025-09-02 14:08:06] [Rank 0] step:7081/10000 train_time:547121ms step_avg:77.27ms +[2025-09-02 14:08:07] [Rank 0] step:7101/10000 train_time:548761ms step_avg:77.28ms +[2025-09-02 14:08:07] [Rank 0] step:7101/10000 train_time:548761ms step_avg:77.28ms +[2025-09-02 14:08:09] [Rank 0] step:7121/10000 train_time:550400ms step_avg:77.29ms +[2025-09-02 14:08:09] [Rank 0] step:7121/10000 train_time:550400ms step_avg:77.29ms +[2025-09-02 14:08:11] [Rank 0] step:7141/10000 train_time:552037ms step_avg:77.31ms +[2025-09-02 14:08:11] [Rank 0] step:7141/10000 train_time:552037ms step_avg:77.31ms +[2025-09-02 14:08:12] [Rank 0] step:7161/10000 train_time:553678ms step_avg:77.32ms +[2025-09-02 14:08:12] [Rank 0] step:7161/10000 train_time:553678ms step_avg:77.32ms +[2025-09-02 14:08:14] [Rank 0] step:7181/10000 train_time:555319ms step_avg:77.33ms +[2025-09-02 14:08:14] [Rank 0] step:7181/10000 train_time:555319ms step_avg:77.33ms +[2025-09-02 14:08:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:08:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:08:27] [Rank 0] PRINT: step:7200/10000 val_loss:3.7371 svd_entropy: attn_qk:H=0.7905,top10E=0.23,eRank=205.3,q75/q25=51.14 attn_vo:H=0.8219,top10E=0.11,eRank=312.5,q75/q25=inf mlp_w1:H=0.9134,top10E=0.13,eRank=435.0,q75/q25=4.39 mlp_w2:H=0.9711,top10E=0.04,eRank=633.9,q75/q25=2.86 vo_prod:H=0.6937,top10E=0.19,eRank=147.6,q75/q25=inf train_time:557043ms step_avg:77.37ms +[2025-09-02 14:08:27] [Rank 0] PRINT: step:7200/10000 val_loss:3.7371 svd_entropy: attn_qk:H=0.7905,top10E=0.23,eRank=205.3,q75/q25=51.14 attn_vo:H=0.8219,top10E=0.11,eRank=312.5,q75/q25=inf mlp_w1:H=0.9134,top10E=0.13,eRank=435.0,q75/q25=4.39 mlp_w2:H=0.9711,top10E=0.04,eRank=633.9,q75/q25=2.86 vo_prod:H=0.6937,top10E=0.19,eRank=147.6,q75/q25=inf train_time:557043ms step_avg:77.37ms +[2025-09-02 14:08:27] [Rank 0] step:7201/10000 train_time:557058ms step_avg:77.36ms +[2025-09-02 14:08:27] [Rank 0] step:7201/10000 train_time:557058ms step_avg:77.36ms +[2025-09-02 14:08:29] [Rank 0] step:7221/10000 train_time:558642ms step_avg:77.36ms +[2025-09-02 14:08:29] [Rank 0] step:7221/10000 train_time:558642ms step_avg:77.36ms +[2025-09-02 14:08:31] [Rank 0] step:7241/10000 train_time:560271ms step_avg:77.37ms +[2025-09-02 14:08:31] [Rank 0] step:7241/10000 train_time:560271ms step_avg:77.37ms +[2025-09-02 14:08:32] [Rank 0] step:7261/10000 train_time:561905ms step_avg:77.39ms +[2025-09-02 14:08:32] [Rank 0] step:7261/10000 train_time:561905ms step_avg:77.39ms +[2025-09-02 14:08:34] [Rank 0] step:7281/10000 train_time:563549ms step_avg:77.40ms +[2025-09-02 14:08:34] [Rank 0] step:7281/10000 train_time:563549ms step_avg:77.40ms +[2025-09-02 14:08:36] [Rank 0] step:7301/10000 train_time:565269ms step_avg:77.42ms +[2025-09-02 14:08:36] [Rank 0] step:7301/10000 train_time:565269ms step_avg:77.42ms +[2025-09-02 14:08:37] [Rank 0] step:7321/10000 train_time:566903ms step_avg:77.44ms +[2025-09-02 14:08:37] [Rank 0] step:7321/10000 train_time:566903ms step_avg:77.44ms +[2025-09-02 14:08:39] [Rank 0] step:7341/10000 train_time:568542ms step_avg:77.45ms +[2025-09-02 14:08:39] [Rank 0] step:7341/10000 train_time:568542ms step_avg:77.45ms +[2025-09-02 14:08:41] [Rank 0] step:7361/10000 train_time:570187ms step_avg:77.46ms +[2025-09-02 14:08:41] [Rank 0] step:7361/10000 train_time:570187ms step_avg:77.46ms +[2025-09-02 14:08:42] [Rank 0] step:7381/10000 train_time:571833ms step_avg:77.47ms +[2025-09-02 14:08:42] [Rank 0] step:7381/10000 train_time:571833ms step_avg:77.47ms +[2025-09-02 14:08:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:08:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:08:56] [Rank 0] PRINT: step:7400/10000 val_loss:3.7194 svd_entropy: attn_qk:H=0.7910,top10E=0.23,eRank=205.8,q75/q25=50.99 attn_vo:H=0.8224,top10E=0.11,eRank=313.4,q75/q25=inf mlp_w1:H=0.9139,top10E=0.13,eRank=436.5,q75/q25=4.37 mlp_w2:H=0.9711,top10E=0.04,eRank=633.9,q75/q25=2.86 vo_prod:H=0.6946,top10E=0.19,eRank=148.5,q75/q25=inf train_time:573544ms step_avg:77.51ms +[2025-09-02 14:08:56] [Rank 0] PRINT: step:7400/10000 val_loss:3.7194 svd_entropy: attn_qk:H=0.7910,top10E=0.23,eRank=205.8,q75/q25=50.99 attn_vo:H=0.8224,top10E=0.11,eRank=313.4,q75/q25=inf mlp_w1:H=0.9139,top10E=0.13,eRank=436.5,q75/q25=4.37 mlp_w2:H=0.9711,top10E=0.04,eRank=633.9,q75/q25=2.86 vo_prod:H=0.6946,top10E=0.19,eRank=148.5,q75/q25=inf train_time:573544ms step_avg:77.51ms +[2025-09-02 14:08:56] [Rank 0] step:7401/10000 train_time:573560ms step_avg:77.50ms +[2025-09-02 14:08:56] [Rank 0] step:7401/10000 train_time:573560ms step_avg:77.50ms +[2025-09-02 14:08:57] [Rank 0] step:7421/10000 train_time:575128ms step_avg:77.50ms +[2025-09-02 14:08:57] [Rank 0] step:7421/10000 train_time:575128ms step_avg:77.50ms +[2025-09-02 14:08:59] [Rank 0] step:7441/10000 train_time:576760ms step_avg:77.51ms +[2025-09-02 14:08:59] [Rank 0] step:7441/10000 train_time:576760ms step_avg:77.51ms +[2025-09-02 14:09:01] [Rank 0] step:7461/10000 train_time:578399ms step_avg:77.52ms +[2025-09-02 14:09:01] [Rank 0] step:7461/10000 train_time:578399ms step_avg:77.52ms +[2025-09-02 14:09:02] [Rank 0] step:7481/10000 train_time:580044ms step_avg:77.54ms +[2025-09-02 14:09:02] [Rank 0] step:7481/10000 train_time:580044ms step_avg:77.54ms +[2025-09-02 14:09:04] [Rank 0] step:7501/10000 train_time:581687ms step_avg:77.55ms +[2025-09-02 14:09:04] [Rank 0] step:7501/10000 train_time:581687ms step_avg:77.55ms +[2025-09-02 14:09:06] [Rank 0] step:7521/10000 train_time:583328ms step_avg:77.56ms +[2025-09-02 14:09:06] [Rank 0] step:7521/10000 train_time:583328ms step_avg:77.56ms +[2025-09-02 14:09:07] [Rank 0] step:7541/10000 train_time:584983ms step_avg:77.57ms +[2025-09-02 14:09:07] [Rank 0] step:7541/10000 train_time:584983ms step_avg:77.57ms +[2025-09-02 14:09:09] [Rank 0] step:7561/10000 train_time:586617ms step_avg:77.58ms +[2025-09-02 14:09:09] [Rank 0] step:7561/10000 train_time:586617ms step_avg:77.58ms +[2025-09-02 14:09:11] [Rank 0] step:7581/10000 train_time:588268ms step_avg:77.60ms +[2025-09-02 14:09:11] [Rank 0] step:7581/10000 train_time:588268ms step_avg:77.60ms +[2025-09-02 14:09:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:09:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:09:24] [Rank 0] PRINT: step:7600/10000 val_loss:3.7111 svd_entropy: attn_qk:H=0.7916,top10E=0.23,eRank=206.4,q75/q25=50.53 attn_vo:H=0.8229,top10E=0.11,eRank=314.3,q75/q25=inf mlp_w1:H=0.9144,top10E=0.13,eRank=437.9,q75/q25=4.34 mlp_w2:H=0.9711,top10E=0.04,eRank=633.9,q75/q25=2.86 vo_prod:H=0.6954,top10E=0.19,eRank=149.3,q75/q25=inf train_time:590006ms step_avg:77.63ms +[2025-09-02 14:09:24] [Rank 0] PRINT: step:7600/10000 val_loss:3.7111 svd_entropy: attn_qk:H=0.7916,top10E=0.23,eRank=206.4,q75/q25=50.53 attn_vo:H=0.8229,top10E=0.11,eRank=314.3,q75/q25=inf mlp_w1:H=0.9144,top10E=0.13,eRank=437.9,q75/q25=4.34 mlp_w2:H=0.9711,top10E=0.04,eRank=633.9,q75/q25=2.86 vo_prod:H=0.6954,top10E=0.19,eRank=149.3,q75/q25=inf train_time:590006ms step_avg:77.63ms +[2025-09-02 14:09:24] [Rank 0] step:7601/10000 train_time:590021ms step_avg:77.62ms +[2025-09-02 14:09:24] [Rank 0] step:7601/10000 train_time:590021ms step_avg:77.62ms +[2025-09-02 14:09:26] [Rank 0] step:7621/10000 train_time:591584ms step_avg:77.63ms +[2025-09-02 14:09:26] [Rank 0] step:7621/10000 train_time:591584ms step_avg:77.63ms +[2025-09-02 14:09:27] [Rank 0] step:7641/10000 train_time:593220ms step_avg:77.64ms +[2025-09-02 14:09:27] [Rank 0] step:7641/10000 train_time:593220ms step_avg:77.64ms +[2025-09-02 14:09:29] [Rank 0] step:7661/10000 train_time:594867ms step_avg:77.65ms +[2025-09-02 14:09:29] [Rank 0] step:7661/10000 train_time:594867ms step_avg:77.65ms +[2025-09-02 14:09:31] [Rank 0] step:7681/10000 train_time:596506ms step_avg:77.66ms +[2025-09-02 14:09:31] [Rank 0] step:7681/10000 train_time:596506ms step_avg:77.66ms +[2025-09-02 14:09:32] [Rank 0] step:7701/10000 train_time:598143ms step_avg:77.67ms +[2025-09-02 14:09:32] [Rank 0] step:7701/10000 train_time:598143ms step_avg:77.67ms +[2025-09-02 14:09:34] [Rank 0] step:7721/10000 train_time:599797ms step_avg:77.68ms +[2025-09-02 14:09:34] [Rank 0] step:7721/10000 train_time:599797ms step_avg:77.68ms +[2025-09-02 14:09:36] [Rank 0] step:7741/10000 train_time:601440ms step_avg:77.70ms +[2025-09-02 14:09:36] [Rank 0] step:7741/10000 train_time:601440ms step_avg:77.70ms +[2025-09-02 14:09:37] [Rank 0] step:7761/10000 train_time:603092ms step_avg:77.71ms +[2025-09-02 14:09:37] [Rank 0] step:7761/10000 train_time:603092ms step_avg:77.71ms +[2025-09-02 14:09:39] [Rank 0] step:7781/10000 train_time:604741ms step_avg:77.72ms +[2025-09-02 14:09:39] [Rank 0] step:7781/10000 train_time:604741ms step_avg:77.72ms +[2025-09-02 14:09:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:09:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:09:52] [Rank 0] PRINT: step:7800/10000 val_loss:3.6985 svd_entropy: attn_qk:H=0.7919,top10E=0.23,eRank=206.8,q75/q25=50.40 attn_vo:H=0.8234,top10E=0.11,eRank=315.1,q75/q25=inf mlp_w1:H=0.9149,top10E=0.13,eRank=439.2,q75/q25=4.32 mlp_w2:H=0.9711,top10E=0.04,eRank=634.0,q75/q25=2.86 vo_prod:H=0.6961,top10E=0.18,eRank=150.1,q75/q25=inf train_time:606517ms step_avg:77.76ms +[2025-09-02 14:09:52] [Rank 0] PRINT: step:7800/10000 val_loss:3.6985 svd_entropy: attn_qk:H=0.7919,top10E=0.23,eRank=206.8,q75/q25=50.40 attn_vo:H=0.8234,top10E=0.11,eRank=315.1,q75/q25=inf mlp_w1:H=0.9149,top10E=0.13,eRank=439.2,q75/q25=4.32 mlp_w2:H=0.9711,top10E=0.04,eRank=634.0,q75/q25=2.86 vo_prod:H=0.6961,top10E=0.18,eRank=150.1,q75/q25=inf train_time:606517ms step_avg:77.76ms +[2025-09-02 14:09:52] [Rank 0] step:7801/10000 train_time:606532ms step_avg:77.75ms +[2025-09-02 14:09:52] [Rank 0] step:7801/10000 train_time:606532ms step_avg:77.75ms +[2025-09-02 14:09:54] [Rank 0] step:7821/10000 train_time:608084ms step_avg:77.75ms +[2025-09-02 14:09:54] [Rank 0] step:7821/10000 train_time:608084ms step_avg:77.75ms +[2025-09-02 14:09:56] [Rank 0] step:7841/10000 train_time:609719ms step_avg:77.76ms +[2025-09-02 14:09:56] [Rank 0] step:7841/10000 train_time:609719ms step_avg:77.76ms +[2025-09-02 14:09:57] [Rank 0] step:7861/10000 train_time:611367ms step_avg:77.77ms +[2025-09-02 14:09:57] [Rank 0] step:7861/10000 train_time:611367ms step_avg:77.77ms +[2025-09-02 14:09:59] [Rank 0] step:7881/10000 train_time:613014ms step_avg:77.78ms +[2025-09-02 14:09:59] [Rank 0] step:7881/10000 train_time:613014ms step_avg:77.78ms +[2025-09-02 14:10:01] [Rank 0] step:7901/10000 train_time:614657ms step_avg:77.79ms +[2025-09-02 14:10:01] [Rank 0] step:7901/10000 train_time:614657ms step_avg:77.79ms +[2025-09-02 14:10:02] [Rank 0] step:7921/10000 train_time:616301ms step_avg:77.81ms +[2025-09-02 14:10:02] [Rank 0] step:7921/10000 train_time:616301ms step_avg:77.81ms +[2025-09-02 14:10:04] [Rank 0] step:7941/10000 train_time:617950ms step_avg:77.82ms +[2025-09-02 14:10:04] [Rank 0] step:7941/10000 train_time:617950ms step_avg:77.82ms +[2025-09-02 14:10:05] [Rank 0] step:7961/10000 train_time:619595ms step_avg:77.83ms +[2025-09-02 14:10:05] [Rank 0] step:7961/10000 train_time:619595ms step_avg:77.83ms +[2025-09-02 14:10:07] [Rank 0] step:7981/10000 train_time:621231ms step_avg:77.84ms +[2025-09-02 14:10:07] [Rank 0] step:7981/10000 train_time:621231ms step_avg:77.84ms +[2025-09-02 14:10:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:10:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:10:21] [Rank 0] PRINT: step:8000/10000 val_loss:3.6828 svd_entropy: attn_qk:H=0.7924,top10E=0.23,eRank=207.3,q75/q25=50.22 attn_vo:H=0.8238,top10E=0.11,eRank=315.9,q75/q25=inf mlp_w1:H=0.9153,top10E=0.13,eRank=440.3,q75/q25=4.30 mlp_w2:H=0.9711,top10E=0.04,eRank=634.0,q75/q25=2.86 vo_prod:H=0.6969,top10E=0.18,eRank=151.0,q75/q25=inf train_time:622960ms step_avg:77.87ms +[2025-09-02 14:10:21] [Rank 0] PRINT: step:8000/10000 val_loss:3.6828 svd_entropy: attn_qk:H=0.7924,top10E=0.23,eRank=207.3,q75/q25=50.22 attn_vo:H=0.8238,top10E=0.11,eRank=315.9,q75/q25=inf mlp_w1:H=0.9153,top10E=0.13,eRank=440.3,q75/q25=4.30 mlp_w2:H=0.9711,top10E=0.04,eRank=634.0,q75/q25=2.86 vo_prod:H=0.6969,top10E=0.18,eRank=151.0,q75/q25=inf train_time:622960ms step_avg:77.87ms +[2025-09-02 14:10:21] [Rank 0] step:8001/10000 train_time:622977ms step_avg:77.86ms +[2025-09-02 14:10:21] [Rank 0] step:8001/10000 train_time:622977ms step_avg:77.86ms +[2025-09-02 14:10:22] [Rank 0] step:8021/10000 train_time:624529ms step_avg:77.86ms +[2025-09-02 14:10:22] [Rank 0] step:8021/10000 train_time:624529ms step_avg:77.86ms +[2025-09-02 14:10:24] [Rank 0] step:8041/10000 train_time:626176ms step_avg:77.87ms +[2025-09-02 14:10:24] [Rank 0] step:8041/10000 train_time:626176ms step_avg:77.87ms +[2025-09-02 14:10:26] [Rank 0] step:8061/10000 train_time:627818ms step_avg:77.88ms +[2025-09-02 14:10:26] [Rank 0] step:8061/10000 train_time:627818ms step_avg:77.88ms +[2025-09-02 14:10:27] [Rank 0] step:8081/10000 train_time:629451ms step_avg:77.89ms +[2025-09-02 14:10:27] [Rank 0] step:8081/10000 train_time:629451ms step_avg:77.89ms +[2025-09-02 14:10:29] [Rank 0] step:8101/10000 train_time:631102ms step_avg:77.90ms +[2025-09-02 14:10:29] [Rank 0] step:8101/10000 train_time:631102ms step_avg:77.90ms +[2025-09-02 14:10:31] [Rank 0] step:8121/10000 train_time:632740ms step_avg:77.91ms +[2025-09-02 14:10:31] [Rank 0] step:8121/10000 train_time:632740ms step_avg:77.91ms +[2025-09-02 14:10:32] [Rank 0] step:8141/10000 train_time:634572ms step_avg:77.95ms +[2025-09-02 14:10:32] [Rank 0] step:8141/10000 train_time:634572ms step_avg:77.95ms +[2025-09-02 14:10:34] [Rank 0] step:8161/10000 train_time:636228ms step_avg:77.96ms +[2025-09-02 14:10:34] [Rank 0] step:8161/10000 train_time:636228ms step_avg:77.96ms +[2025-09-02 14:10:36] [Rank 0] step:8181/10000 train_time:637900ms step_avg:77.97ms +[2025-09-02 14:10:36] [Rank 0] step:8181/10000 train_time:637900ms step_avg:77.97ms +[2025-09-02 14:10:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:10:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:10:49] [Rank 0] PRINT: step:8200/10000 val_loss:3.6728 svd_entropy: attn_qk:H=0.7927,top10E=0.23,eRank=207.6,q75/q25=50.01 attn_vo:H=0.8242,top10E=0.11,eRank=316.6,q75/q25=inf mlp_w1:H=0.9157,top10E=0.13,eRank=441.3,q75/q25=4.29 mlp_w2:H=0.9711,top10E=0.04,eRank=634.0,q75/q25=2.86 vo_prod:H=0.6975,top10E=0.18,eRank=151.7,q75/q25=inf train_time:639679ms step_avg:78.01ms +[2025-09-02 14:10:49] [Rank 0] PRINT: step:8200/10000 val_loss:3.6728 svd_entropy: attn_qk:H=0.7927,top10E=0.23,eRank=207.6,q75/q25=50.01 attn_vo:H=0.8242,top10E=0.11,eRank=316.6,q75/q25=inf mlp_w1:H=0.9157,top10E=0.13,eRank=441.3,q75/q25=4.29 mlp_w2:H=0.9711,top10E=0.04,eRank=634.0,q75/q25=2.86 vo_prod:H=0.6975,top10E=0.18,eRank=151.7,q75/q25=inf train_time:639679ms step_avg:78.01ms +[2025-09-02 14:10:49] [Rank 0] step:8201/10000 train_time:639695ms step_avg:78.00ms +[2025-09-02 14:10:49] [Rank 0] step:8201/10000 train_time:639695ms step_avg:78.00ms +[2025-09-02 14:10:51] [Rank 0] step:8221/10000 train_time:641312ms step_avg:78.01ms +[2025-09-02 14:10:51] [Rank 0] step:8221/10000 train_time:641312ms step_avg:78.01ms +[2025-09-02 14:10:53] [Rank 0] step:8241/10000 train_time:642987ms step_avg:78.02ms +[2025-09-02 14:10:53] [Rank 0] step:8241/10000 train_time:642987ms step_avg:78.02ms +[2025-09-02 14:10:55] [Rank 0] step:8261/10000 train_time:644660ms step_avg:78.04ms +[2025-09-02 14:10:55] [Rank 0] step:8261/10000 train_time:644660ms step_avg:78.04ms +[2025-09-02 14:10:56] [Rank 0] step:8281/10000 train_time:646331ms step_avg:78.05ms +[2025-09-02 14:10:56] [Rank 0] step:8281/10000 train_time:646331ms step_avg:78.05ms +[2025-09-02 14:10:58] [Rank 0] step:8301/10000 train_time:648000ms step_avg:78.06ms +[2025-09-02 14:10:58] [Rank 0] step:8301/10000 train_time:648000ms step_avg:78.06ms +[2025-09-02 14:11:00] [Rank 0] step:8321/10000 train_time:649665ms step_avg:78.08ms +[2025-09-02 14:11:00] [Rank 0] step:8321/10000 train_time:649665ms step_avg:78.08ms +[2025-09-02 14:11:01] [Rank 0] step:8341/10000 train_time:651339ms step_avg:78.09ms +[2025-09-02 14:11:01] [Rank 0] step:8341/10000 train_time:651339ms step_avg:78.09ms +[2025-09-02 14:11:03] [Rank 0] step:8361/10000 train_time:653013ms step_avg:78.10ms +[2025-09-02 14:11:03] [Rank 0] step:8361/10000 train_time:653013ms step_avg:78.10ms +[2025-09-02 14:11:05] [Rank 0] step:8381/10000 train_time:654679ms step_avg:78.11ms +[2025-09-02 14:11:05] [Rank 0] step:8381/10000 train_time:654679ms step_avg:78.11ms +[2025-09-02 14:11:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:11:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:11:18] [Rank 0] PRINT: step:8400/10000 val_loss:3.6627 svd_entropy: attn_qk:H=0.7930,top10E=0.23,eRank=207.9,q75/q25=49.79 attn_vo:H=0.8246,top10E=0.11,eRank=317.2,q75/q25=inf mlp_w1:H=0.9160,top10E=0.13,eRank=442.3,q75/q25=4.27 mlp_w2:H=0.9711,top10E=0.04,eRank=634.0,q75/q25=2.86 vo_prod:H=0.6981,top10E=0.18,eRank=152.3,q75/q25=inf train_time:656434ms step_avg:78.15ms +[2025-09-02 14:11:18] [Rank 0] PRINT: step:8400/10000 val_loss:3.6627 svd_entropy: attn_qk:H=0.7930,top10E=0.23,eRank=207.9,q75/q25=49.79 attn_vo:H=0.8246,top10E=0.11,eRank=317.2,q75/q25=inf mlp_w1:H=0.9160,top10E=0.13,eRank=442.3,q75/q25=4.27 mlp_w2:H=0.9711,top10E=0.04,eRank=634.0,q75/q25=2.86 vo_prod:H=0.6981,top10E=0.18,eRank=152.3,q75/q25=inf train_time:656434ms step_avg:78.15ms +[2025-09-02 14:11:18] [Rank 0] step:8401/10000 train_time:656451ms step_avg:78.14ms +[2025-09-02 14:11:18] [Rank 0] step:8401/10000 train_time:656451ms step_avg:78.14ms +[2025-09-02 14:11:20] [Rank 0] step:8421/10000 train_time:658028ms step_avg:78.14ms +[2025-09-02 14:11:20] [Rank 0] step:8421/10000 train_time:658028ms step_avg:78.14ms +[2025-09-02 14:11:22] [Rank 0] step:8441/10000 train_time:659694ms step_avg:78.15ms +[2025-09-02 14:11:22] [Rank 0] step:8441/10000 train_time:659694ms step_avg:78.15ms +[2025-09-02 14:11:23] [Rank 0] step:8461/10000 train_time:661361ms step_avg:78.17ms +[2025-09-02 14:11:23] [Rank 0] step:8461/10000 train_time:661361ms step_avg:78.17ms +[2025-09-02 14:11:25] [Rank 0] step:8481/10000 train_time:663034ms step_avg:78.18ms +[2025-09-02 14:11:25] [Rank 0] step:8481/10000 train_time:663034ms step_avg:78.18ms +[2025-09-02 14:11:27] [Rank 0] step:8501/10000 train_time:664727ms step_avg:78.19ms +[2025-09-02 14:11:27] [Rank 0] step:8501/10000 train_time:664727ms step_avg:78.19ms +[2025-09-02 14:11:28] [Rank 0] step:8521/10000 train_time:666402ms step_avg:78.21ms +[2025-09-02 14:11:28] [Rank 0] step:8521/10000 train_time:666402ms step_avg:78.21ms +[2025-09-02 14:11:30] [Rank 0] step:8541/10000 train_time:668086ms step_avg:78.22ms +[2025-09-02 14:11:30] [Rank 0] step:8541/10000 train_time:668086ms step_avg:78.22ms +[2025-09-02 14:11:32] [Rank 0] step:8561/10000 train_time:669760ms step_avg:78.23ms +[2025-09-02 14:11:32] [Rank 0] step:8561/10000 train_time:669760ms step_avg:78.23ms +[2025-09-02 14:11:33] [Rank 0] step:8581/10000 train_time:671435ms step_avg:78.25ms +[2025-09-02 14:11:33] [Rank 0] step:8581/10000 train_time:671435ms step_avg:78.25ms +[2025-09-02 14:11:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:11:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:11:47] [Rank 0] PRINT: step:8600/10000 val_loss:3.6518 svd_entropy: attn_qk:H=0.7932,top10E=0.23,eRank=208.2,q75/q25=49.75 attn_vo:H=0.8249,top10E=0.11,eRank=317.7,q75/q25=inf mlp_w1:H=0.9163,top10E=0.13,eRank=443.1,q75/q25=4.26 mlp_w2:H=0.9711,top10E=0.04,eRank=634.0,q75/q25=2.86 vo_prod:H=0.6987,top10E=0.18,eRank=152.8,q75/q25=inf train_time:673182ms step_avg:78.28ms +[2025-09-02 14:11:47] [Rank 0] PRINT: step:8600/10000 val_loss:3.6518 svd_entropy: attn_qk:H=0.7932,top10E=0.23,eRank=208.2,q75/q25=49.75 attn_vo:H=0.8249,top10E=0.11,eRank=317.7,q75/q25=inf mlp_w1:H=0.9163,top10E=0.13,eRank=443.1,q75/q25=4.26 mlp_w2:H=0.9711,top10E=0.04,eRank=634.0,q75/q25=2.86 vo_prod:H=0.6987,top10E=0.18,eRank=152.8,q75/q25=inf train_time:673182ms step_avg:78.28ms +[2025-09-02 14:11:47] [Rank 0] step:8601/10000 train_time:673198ms step_avg:78.27ms +[2025-09-02 14:11:47] [Rank 0] step:8601/10000 train_time:673198ms step_avg:78.27ms +[2025-09-02 14:11:49] [Rank 0] step:8621/10000 train_time:674789ms step_avg:78.27ms +[2025-09-02 14:11:49] [Rank 0] step:8621/10000 train_time:674789ms step_avg:78.27ms +[2025-09-02 14:11:50] [Rank 0] step:8641/10000 train_time:676458ms step_avg:78.28ms +[2025-09-02 14:11:50] [Rank 0] step:8641/10000 train_time:676458ms step_avg:78.28ms +[2025-09-02 14:11:52] [Rank 0] step:8661/10000 train_time:678132ms step_avg:78.30ms +[2025-09-02 14:11:52] [Rank 0] step:8661/10000 train_time:678132ms step_avg:78.30ms +[2025-09-02 14:11:54] [Rank 0] step:8681/10000 train_time:679800ms step_avg:78.31ms +[2025-09-02 14:11:54] [Rank 0] step:8681/10000 train_time:679800ms step_avg:78.31ms +[2025-09-02 14:11:55] [Rank 0] step:8701/10000 train_time:681462ms step_avg:78.32ms +[2025-09-02 14:11:55] [Rank 0] step:8701/10000 train_time:681462ms step_avg:78.32ms +[2025-09-02 14:11:57] [Rank 0] step:8721/10000 train_time:683136ms step_avg:78.33ms +[2025-09-02 14:11:57] [Rank 0] step:8721/10000 train_time:683136ms step_avg:78.33ms +[2025-09-02 14:11:59] [Rank 0] step:8741/10000 train_time:684797ms step_avg:78.34ms +[2025-09-02 14:11:59] [Rank 0] step:8741/10000 train_time:684797ms step_avg:78.34ms +[2025-09-02 14:12:00] [Rank 0] step:8761/10000 train_time:686467ms step_avg:78.35ms +[2025-09-02 14:12:00] [Rank 0] step:8761/10000 train_time:686467ms step_avg:78.35ms +[2025-09-02 14:12:02] [Rank 0] step:8781/10000 train_time:688142ms step_avg:78.37ms +[2025-09-02 14:12:02] [Rank 0] step:8781/10000 train_time:688142ms step_avg:78.37ms +[2025-09-02 14:12:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:12:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:12:15] [Rank 0] PRINT: step:8800/10000 val_loss:3.6430 svd_entropy: attn_qk:H=0.7935,top10E=0.23,eRank=208.4,q75/q25=49.60 attn_vo:H=0.8252,top10E=0.11,eRank=318.2,q75/q25=inf mlp_w1:H=0.9165,top10E=0.13,eRank=443.8,q75/q25=4.25 mlp_w2:H=0.9711,top10E=0.04,eRank=634.0,q75/q25=2.86 vo_prod:H=0.6992,top10E=0.18,eRank=153.4,q75/q25=inf train_time:689901ms step_avg:78.40ms +[2025-09-02 14:12:15] [Rank 0] PRINT: step:8800/10000 val_loss:3.6430 svd_entropy: attn_qk:H=0.7935,top10E=0.23,eRank=208.4,q75/q25=49.60 attn_vo:H=0.8252,top10E=0.11,eRank=318.2,q75/q25=inf mlp_w1:H=0.9165,top10E=0.13,eRank=443.8,q75/q25=4.25 mlp_w2:H=0.9711,top10E=0.04,eRank=634.0,q75/q25=2.86 vo_prod:H=0.6992,top10E=0.18,eRank=153.4,q75/q25=inf train_time:689901ms step_avg:78.40ms +[2025-09-02 14:12:15] [Rank 0] step:8801/10000 train_time:689917ms step_avg:78.39ms +[2025-09-02 14:12:15] [Rank 0] step:8801/10000 train_time:689917ms step_avg:78.39ms +[2025-09-02 14:12:17] [Rank 0] step:8821/10000 train_time:691508ms step_avg:78.39ms +[2025-09-02 14:12:17] [Rank 0] step:8821/10000 train_time:691508ms step_avg:78.39ms +[2025-09-02 14:12:19] [Rank 0] step:8841/10000 train_time:693199ms step_avg:78.41ms +[2025-09-02 14:12:19] [Rank 0] step:8841/10000 train_time:693199ms step_avg:78.41ms +[2025-09-02 14:12:20] [Rank 0] step:8861/10000 train_time:694864ms step_avg:78.42ms +[2025-09-02 14:12:20] [Rank 0] step:8861/10000 train_time:694864ms step_avg:78.42ms +[2025-09-02 14:12:22] [Rank 0] step:8881/10000 train_time:696536ms step_avg:78.43ms +[2025-09-02 14:12:22] [Rank 0] step:8881/10000 train_time:696536ms step_avg:78.43ms +[2025-09-02 14:12:24] [Rank 0] step:8901/10000 train_time:698207ms step_avg:78.44ms +[2025-09-02 14:12:24] [Rank 0] step:8901/10000 train_time:698207ms step_avg:78.44ms +[2025-09-02 14:12:26] [Rank 0] step:8921/10000 train_time:699892ms step_avg:78.45ms +[2025-09-02 14:12:26] [Rank 0] step:8921/10000 train_time:699892ms step_avg:78.45ms +[2025-09-02 14:12:27] [Rank 0] step:8941/10000 train_time:701572ms step_avg:78.47ms +[2025-09-02 14:12:27] [Rank 0] step:8941/10000 train_time:701572ms step_avg:78.47ms +[2025-09-02 14:12:29] [Rank 0] step:8961/10000 train_time:703242ms step_avg:78.48ms +[2025-09-02 14:12:29] [Rank 0] step:8961/10000 train_time:703242ms step_avg:78.48ms +[2025-09-02 14:12:31] [Rank 0] step:8981/10000 train_time:704913ms step_avg:78.49ms +[2025-09-02 14:12:31] [Rank 0] step:8981/10000 train_time:704913ms step_avg:78.49ms +[2025-09-02 14:12:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:12:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:12:44] [Rank 0] PRINT: step:9000/10000 val_loss:3.6339 svd_entropy: attn_qk:H=0.7937,top10E=0.23,eRank=208.7,q75/q25=49.48 attn_vo:H=0.8254,top10E=0.11,eRank=318.7,q75/q25=inf mlp_w1:H=0.9167,top10E=0.13,eRank=444.4,q75/q25=4.24 mlp_w2:H=0.9711,top10E=0.04,eRank=634.0,q75/q25=2.86 vo_prod:H=0.6997,top10E=0.18,eRank=154.0,q75/q25=inf train_time:706670ms step_avg:78.52ms +[2025-09-02 14:12:44] [Rank 0] PRINT: step:9000/10000 val_loss:3.6339 svd_entropy: attn_qk:H=0.7937,top10E=0.23,eRank=208.7,q75/q25=49.48 attn_vo:H=0.8254,top10E=0.11,eRank=318.7,q75/q25=inf mlp_w1:H=0.9167,top10E=0.13,eRank=444.4,q75/q25=4.24 mlp_w2:H=0.9711,top10E=0.04,eRank=634.0,q75/q25=2.86 vo_prod:H=0.6997,top10E=0.18,eRank=154.0,q75/q25=inf train_time:706670ms step_avg:78.52ms +[2025-09-02 14:12:44] [Rank 0] step:9001/10000 train_time:706685ms step_avg:78.51ms +[2025-09-02 14:12:44] [Rank 0] step:9001/10000 train_time:706685ms step_avg:78.51ms +[2025-09-02 14:12:46] [Rank 0] step:9021/10000 train_time:708285ms step_avg:78.52ms +[2025-09-02 14:12:46] [Rank 0] step:9021/10000 train_time:708285ms step_avg:78.52ms +[2025-09-02 14:12:47] [Rank 0] step:9041/10000 train_time:709958ms step_avg:78.53ms +[2025-09-02 14:12:47] [Rank 0] step:9041/10000 train_time:709958ms step_avg:78.53ms +[2025-09-02 14:12:49] [Rank 0] step:9061/10000 train_time:711720ms step_avg:78.55ms +[2025-09-02 14:12:49] [Rank 0] step:9061/10000 train_time:711720ms step_avg:78.55ms +[2025-09-02 14:12:51] [Rank 0] step:9081/10000 train_time:713401ms step_avg:78.56ms +[2025-09-02 14:12:51] [Rank 0] step:9081/10000 train_time:713401ms step_avg:78.56ms +[2025-09-02 14:12:53] [Rank 0] step:9101/10000 train_time:715093ms step_avg:78.57ms +[2025-09-02 14:12:53] [Rank 0] step:9101/10000 train_time:715093ms step_avg:78.57ms +[2025-09-02 14:12:54] [Rank 0] step:9121/10000 train_time:716772ms step_avg:78.58ms +[2025-09-02 14:12:54] [Rank 0] step:9121/10000 train_time:716772ms step_avg:78.58ms +[2025-09-02 14:12:56] [Rank 0] step:9141/10000 train_time:718439ms step_avg:78.60ms +[2025-09-02 14:12:56] [Rank 0] step:9141/10000 train_time:718439ms step_avg:78.60ms +[2025-09-02 14:12:58] [Rank 0] step:9161/10000 train_time:720108ms step_avg:78.61ms +[2025-09-02 14:12:58] [Rank 0] step:9161/10000 train_time:720108ms step_avg:78.61ms +[2025-09-02 14:12:59] [Rank 0] step:9181/10000 train_time:721814ms step_avg:78.62ms +[2025-09-02 14:12:59] [Rank 0] step:9181/10000 train_time:721814ms step_avg:78.62ms +[2025-09-02 14:13:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:13:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:13:13] [Rank 0] PRINT: step:9200/10000 val_loss:3.6256 svd_entropy: attn_qk:H=0.7938,top10E=0.23,eRank=208.8,q75/q25=49.24 attn_vo:H=0.8256,top10E=0.11,eRank=319.1,q75/q25=inf mlp_w1:H=0.9169,top10E=0.13,eRank=445.0,q75/q25=4.23 mlp_w2:H=0.9711,top10E=0.04,eRank=634.0,q75/q25=2.86 vo_prod:H=0.7001,top10E=0.18,eRank=154.4,q75/q25=inf train_time:723573ms step_avg:78.65ms +[2025-09-02 14:13:13] [Rank 0] PRINT: step:9200/10000 val_loss:3.6256 svd_entropy: attn_qk:H=0.7938,top10E=0.23,eRank=208.8,q75/q25=49.24 attn_vo:H=0.8256,top10E=0.11,eRank=319.1,q75/q25=inf mlp_w1:H=0.9169,top10E=0.13,eRank=445.0,q75/q25=4.23 mlp_w2:H=0.9711,top10E=0.04,eRank=634.0,q75/q25=2.86 vo_prod:H=0.7001,top10E=0.18,eRank=154.4,q75/q25=inf train_time:723573ms step_avg:78.65ms +[2025-09-02 14:13:13] [Rank 0] step:9201/10000 train_time:723589ms step_avg:78.64ms +[2025-09-02 14:13:13] [Rank 0] step:9201/10000 train_time:723589ms step_avg:78.64ms +[2025-09-02 14:13:14] [Rank 0] step:9221/10000 train_time:725193ms step_avg:78.65ms +[2025-09-02 14:13:14] [Rank 0] step:9221/10000 train_time:725193ms step_avg:78.65ms +[2025-09-02 14:13:16] [Rank 0] step:9241/10000 train_time:726876ms step_avg:78.66ms +[2025-09-02 14:13:16] [Rank 0] step:9241/10000 train_time:726876ms step_avg:78.66ms +[2025-09-02 14:13:18] [Rank 0] step:9261/10000 train_time:728558ms step_avg:78.67ms +[2025-09-02 14:13:18] [Rank 0] step:9261/10000 train_time:728558ms step_avg:78.67ms +[2025-09-02 14:13:19] [Rank 0] step:9281/10000 train_time:730224ms step_avg:78.68ms +[2025-09-02 14:13:19] [Rank 0] step:9281/10000 train_time:730224ms step_avg:78.68ms +[2025-09-02 14:13:21] [Rank 0] step:9301/10000 train_time:731897ms step_avg:78.69ms +[2025-09-02 14:13:21] [Rank 0] step:9301/10000 train_time:731897ms step_avg:78.69ms +[2025-09-02 14:13:23] [Rank 0] step:9321/10000 train_time:733574ms step_avg:78.70ms +[2025-09-02 14:13:23] [Rank 0] step:9321/10000 train_time:733574ms step_avg:78.70ms +[2025-09-02 14:13:25] [Rank 0] step:9341/10000 train_time:735248ms step_avg:78.71ms +[2025-09-02 14:13:25] [Rank 0] step:9341/10000 train_time:735248ms step_avg:78.71ms +[2025-09-02 14:13:26] [Rank 0] step:9361/10000 train_time:736928ms step_avg:78.72ms +[2025-09-02 14:13:26] [Rank 0] step:9361/10000 train_time:736928ms step_avg:78.72ms +[2025-09-02 14:13:28] [Rank 0] step:9381/10000 train_time:738616ms step_avg:78.74ms +[2025-09-02 14:13:28] [Rank 0] step:9381/10000 train_time:738616ms step_avg:78.74ms +[2025-09-02 14:13:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:13:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:13:41] [Rank 0] PRINT: step:9400/10000 val_loss:3.6178 svd_entropy: attn_qk:H=0.7940,top10E=0.23,eRank=209.0,q75/q25=49.17 attn_vo:H=0.8258,top10E=0.11,eRank=319.4,q75/q25=inf mlp_w1:H=0.9171,top10E=0.13,eRank=445.5,q75/q25=4.22 mlp_w2:H=0.9711,top10E=0.04,eRank=634.0,q75/q25=2.86 vo_prod:H=0.7004,top10E=0.18,eRank=154.8,q75/q25=inf train_time:740379ms step_avg:78.76ms +[2025-09-02 14:13:41] [Rank 0] PRINT: step:9400/10000 val_loss:3.6178 svd_entropy: attn_qk:H=0.7940,top10E=0.23,eRank=209.0,q75/q25=49.17 attn_vo:H=0.8258,top10E=0.11,eRank=319.4,q75/q25=inf mlp_w1:H=0.9171,top10E=0.13,eRank=445.5,q75/q25=4.22 mlp_w2:H=0.9711,top10E=0.04,eRank=634.0,q75/q25=2.86 vo_prod:H=0.7004,top10E=0.18,eRank=154.8,q75/q25=inf train_time:740379ms step_avg:78.76ms +[2025-09-02 14:13:41] [Rank 0] step:9401/10000 train_time:740396ms step_avg:78.76ms +[2025-09-02 14:13:41] [Rank 0] step:9401/10000 train_time:740396ms step_avg:78.76ms +[2025-09-02 14:13:43] [Rank 0] step:9421/10000 train_time:741981ms step_avg:78.76ms +[2025-09-02 14:13:43] [Rank 0] step:9421/10000 train_time:741981ms step_avg:78.76ms +[2025-09-02 14:13:45] [Rank 0] step:9441/10000 train_time:743655ms step_avg:78.77ms +[2025-09-02 14:13:45] [Rank 0] step:9441/10000 train_time:743655ms step_avg:78.77ms +[2025-09-02 14:13:46] [Rank 0] step:9461/10000 train_time:745332ms step_avg:78.78ms +[2025-09-02 14:13:46] [Rank 0] step:9461/10000 train_time:745332ms step_avg:78.78ms +[2025-09-02 14:13:48] [Rank 0] step:9481/10000 train_time:747109ms step_avg:78.80ms +[2025-09-02 14:13:48] [Rank 0] step:9481/10000 train_time:747109ms step_avg:78.80ms +[2025-09-02 14:13:50] [Rank 0] step:9501/10000 train_time:748797ms step_avg:78.81ms +[2025-09-02 14:13:50] [Rank 0] step:9501/10000 train_time:748797ms step_avg:78.81ms +[2025-09-02 14:13:52] [Rank 0] step:9521/10000 train_time:750525ms step_avg:78.83ms +[2025-09-02 14:13:52] [Rank 0] step:9521/10000 train_time:750525ms step_avg:78.83ms +[2025-09-02 14:13:53] [Rank 0] step:9541/10000 train_time:752207ms step_avg:78.84ms +[2025-09-02 14:13:53] [Rank 0] step:9541/10000 train_time:752207ms step_avg:78.84ms +[2025-09-02 14:13:55] [Rank 0] step:9561/10000 train_time:753874ms step_avg:78.85ms +[2025-09-02 14:13:55] [Rank 0] step:9561/10000 train_time:753874ms step_avg:78.85ms +[2025-09-02 14:13:57] [Rank 0] step:9581/10000 train_time:755551ms step_avg:78.86ms +[2025-09-02 14:13:57] [Rank 0] step:9581/10000 train_time:755551ms step_avg:78.86ms +[2025-09-02 14:13:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:13:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:14:10] [Rank 0] PRINT: step:9600/10000 val_loss:3.6114 svd_entropy: attn_qk:H=0.7941,top10E=0.23,eRank=209.1,q75/q25=49.11 attn_vo:H=0.8260,top10E=0.11,eRank=319.7,q75/q25=inf mlp_w1:H=0.9172,top10E=0.13,eRank=445.8,q75/q25=4.22 mlp_w2:H=0.9711,top10E=0.04,eRank=634.0,q75/q25=2.86 vo_prod:H=0.7008,top10E=0.18,eRank=155.2,q75/q25=inf train_time:757322ms step_avg:78.89ms +[2025-09-02 14:14:10] [Rank 0] PRINT: step:9600/10000 val_loss:3.6114 svd_entropy: attn_qk:H=0.7941,top10E=0.23,eRank=209.1,q75/q25=49.11 attn_vo:H=0.8260,top10E=0.11,eRank=319.7,q75/q25=inf mlp_w1:H=0.9172,top10E=0.13,eRank=445.8,q75/q25=4.22 mlp_w2:H=0.9711,top10E=0.04,eRank=634.0,q75/q25=2.86 vo_prod:H=0.7008,top10E=0.18,eRank=155.2,q75/q25=inf train_time:757322ms step_avg:78.89ms +[2025-09-02 14:14:10] [Rank 0] step:9601/10000 train_time:757337ms step_avg:78.88ms +[2025-09-02 14:14:10] [Rank 0] step:9601/10000 train_time:757337ms step_avg:78.88ms +[2025-09-02 14:14:12] [Rank 0] step:9621/10000 train_time:758952ms step_avg:78.88ms +[2025-09-02 14:14:12] [Rank 0] step:9621/10000 train_time:758952ms step_avg:78.88ms +[2025-09-02 14:14:14] [Rank 0] step:9641/10000 train_time:760631ms step_avg:78.90ms +[2025-09-02 14:14:14] [Rank 0] step:9641/10000 train_time:760631ms step_avg:78.90ms +[2025-09-02 14:14:15] [Rank 0] step:9661/10000 train_time:762336ms step_avg:78.91ms +[2025-09-02 14:14:15] [Rank 0] step:9661/10000 train_time:762336ms step_avg:78.91ms +[2025-09-02 14:14:17] [Rank 0] step:9681/10000 train_time:764033ms step_avg:78.92ms +[2025-09-02 14:14:17] [Rank 0] step:9681/10000 train_time:764033ms step_avg:78.92ms +[2025-09-02 14:14:19] [Rank 0] step:9701/10000 train_time:765744ms step_avg:78.93ms +[2025-09-02 14:14:19] [Rank 0] step:9701/10000 train_time:765744ms step_avg:78.93ms +[2025-09-02 14:14:20] [Rank 0] step:9721/10000 train_time:767437ms step_avg:78.95ms +[2025-09-02 14:14:20] [Rank 0] step:9721/10000 train_time:767437ms step_avg:78.95ms +[2025-09-02 14:14:22] [Rank 0] step:9741/10000 train_time:769155ms step_avg:78.96ms +[2025-09-02 14:14:22] [Rank 0] step:9741/10000 train_time:769155ms step_avg:78.96ms +[2025-09-02 14:14:24] [Rank 0] step:9761/10000 train_time:770860ms step_avg:78.97ms +[2025-09-02 14:14:24] [Rank 0] step:9761/10000 train_time:770860ms step_avg:78.97ms +[2025-09-02 14:14:26] [Rank 0] step:9781/10000 train_time:772572ms step_avg:78.99ms +[2025-09-02 14:14:26] [Rank 0] step:9781/10000 train_time:772572ms step_avg:78.99ms +[2025-09-02 14:14:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:14:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:14:39] [Rank 0] PRINT: step:9800/10000 val_loss:3.6050 svd_entropy: attn_qk:H=0.7942,top10E=0.23,eRank=209.2,q75/q25=49.12 attn_vo:H=0.8261,top10E=0.11,eRank=319.9,q75/q25=inf mlp_w1:H=0.9173,top10E=0.13,eRank=446.2,q75/q25=4.21 mlp_w2:H=0.9711,top10E=0.04,eRank=634.0,q75/q25=2.86 vo_prod:H=0.7010,top10E=0.18,eRank=155.5,q75/q25=inf train_time:774378ms step_avg:79.02ms +[2025-09-02 14:14:39] [Rank 0] PRINT: step:9800/10000 val_loss:3.6050 svd_entropy: attn_qk:H=0.7942,top10E=0.23,eRank=209.2,q75/q25=49.12 attn_vo:H=0.8261,top10E=0.11,eRank=319.9,q75/q25=inf mlp_w1:H=0.9173,top10E=0.13,eRank=446.2,q75/q25=4.21 mlp_w2:H=0.9711,top10E=0.04,eRank=634.0,q75/q25=2.86 vo_prod:H=0.7010,top10E=0.18,eRank=155.5,q75/q25=inf train_time:774378ms step_avg:79.02ms +[2025-09-02 14:14:39] [Rank 0] step:9801/10000 train_time:774393ms step_avg:79.01ms +[2025-09-02 14:14:39] [Rank 0] step:9801/10000 train_time:774393ms step_avg:79.01ms +[2025-09-02 14:14:41] [Rank 0] step:9821/10000 train_time:776026ms step_avg:79.02ms +[2025-09-02 14:14:41] [Rank 0] step:9821/10000 train_time:776026ms step_avg:79.02ms +[2025-09-02 14:14:43] [Rank 0] step:9841/10000 train_time:777739ms step_avg:79.03ms +[2025-09-02 14:14:43] [Rank 0] step:9841/10000 train_time:777739ms step_avg:79.03ms +[2025-09-02 14:14:44] [Rank 0] step:9861/10000 train_time:779427ms step_avg:79.04ms +[2025-09-02 14:14:44] [Rank 0] step:9861/10000 train_time:779427ms step_avg:79.04ms +[2025-09-02 14:14:46] [Rank 0] step:9881/10000 train_time:781118ms step_avg:79.05ms +[2025-09-02 14:14:46] [Rank 0] step:9881/10000 train_time:781118ms step_avg:79.05ms +[2025-09-02 14:14:48] [Rank 0] step:9901/10000 train_time:782819ms step_avg:79.06ms +[2025-09-02 14:14:48] [Rank 0] step:9901/10000 train_time:782819ms step_avg:79.06ms +[2025-09-02 14:14:49] [Rank 0] step:9921/10000 train_time:784518ms step_avg:79.08ms +[2025-09-02 14:14:49] [Rank 0] step:9921/10000 train_time:784518ms step_avg:79.08ms +[2025-09-02 14:14:51] [Rank 0] step:9941/10000 train_time:786221ms step_avg:79.09ms +[2025-09-02 14:14:51] [Rank 0] step:9941/10000 train_time:786221ms step_avg:79.09ms +[2025-09-02 14:14:53] [Rank 0] step:9961/10000 train_time:787922ms step_avg:79.10ms +[2025-09-02 14:14:53] [Rank 0] step:9961/10000 train_time:787922ms step_avg:79.10ms +[2025-09-02 14:14:54] [Rank 0] step:9981/10000 train_time:789625ms step_avg:79.11ms +[2025-09-02 14:14:54] [Rank 0] step:9981/10000 train_time:789625ms step_avg:79.11ms +[2025-09-02 14:14:56] [Rank 0] step:10000/10000 train_time:791284ms step_avg:79.13ms +[2025-09-02 14:14:56] [Rank 0] step:10000/10000 train_time:791284ms step_avg:79.13ms +[2025-09-02 14:14:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:14:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:15:08] [Rank 0] PRINT: step:10000/10000 val_loss:3.5992 svd_entropy: attn_qk:H=0.7942,top10E=0.23,eRank=209.2,q75/q25=49.05 attn_vo:H=0.8262,top10E=0.11,eRank=320.1,q75/q25=inf mlp_w1:H=0.9174,top10E=0.13,eRank=446.4,q75/q25=4.21 mlp_w2:H=0.9711,top10E=0.04,eRank=634.0,q75/q25=2.86 vo_prod:H=0.7012,top10E=0.18,eRank=155.7,q75/q25=inf train_time:791463ms step_avg:79.15ms +[2025-09-02 14:15:08] [Rank 0] PRINT: step:10000/10000 val_loss:3.5992 svd_entropy: attn_qk:H=0.7942,top10E=0.23,eRank=209.2,q75/q25=49.05 attn_vo:H=0.8262,top10E=0.11,eRank=320.1,q75/q25=inf mlp_w1:H=0.9174,top10E=0.13,eRank=446.4,q75/q25=4.21 mlp_w2:H=0.9711,top10E=0.04,eRank=634.0,q75/q25=2.86 vo_prod:H=0.7012,top10E=0.18,eRank=155.7,q75/q25=inf train_time:791463ms step_avg:79.15ms +[2025-09-02 14:15:08] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 14:15:08 2025 --- +[2025-09-02 14:15:08] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 14:15:08 2025 --- +[2025-09-02 14:15:08] [Rank 0] PRINT: Peak memory allocated: 10086 MiB reserved: 15078 MiB +[2025-09-02 14:15:08] [Rank 0] PRINT: Peak memory allocated: 10086 MiB reserved: 15078 MiB diff --git a/logs_svd_qkvo/mode_13_param_qkvo_seed_47/config.json b/logs_svd_qkvo/mode_13_param_qkvo_seed_47/config.json new file mode 100644 index 0000000000000000000000000000000000000000..1b3a0a1c18d2c54a26a7aef0d287345001b41792 --- /dev/null +++ b/logs_svd_qkvo/mode_13_param_qkvo_seed_47/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 47, + "optimizer_mode": 13, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "651fc1fa-e6eb-4b0f-9cee-eecb120baa60", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_13_param_qkvo_seed_47/training_log_651fc1fa-e6eb-4b0f-9cee-eecb120baa60.txt b/logs_svd_qkvo/mode_13_param_qkvo_seed_47/training_log_651fc1fa-e6eb-4b0f-9cee-eecb120baa60.txt new file mode 100644 index 0000000000000000000000000000000000000000..0dc3d8103c1c94ff0ac24440f2529da1e1ef0436 --- /dev/null +++ b/logs_svd_qkvo/mode_13_param_qkvo_seed_47/training_log_651fc1fa-e6eb-4b0f-9cee-eecb120baa60.txt @@ -0,0 +1,2984 @@ +[2025-09-02 15:11:40] [Rank 0] PRINT: --- Script Start: Tue Sep 2 15:11:40 2025 --- +[2025-09-02 15:11:40] [Rank 0] PRINT: --- Script Start: Tue Sep 2 15:11:40 2025 --- +[2025-09-02 15:11:40] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=13, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 15:11:40] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=13, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 15:11:41] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 15:11:41] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 15:11:41] [Rank 0] PRINT: Using fixed seed: 47 +[2025-09-02 15:11:41] [Rank 0] PRINT: Using fixed seed: 47 +[2025-09-02 15:11:41] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_13_param_qkvo_seed_47 +[2025-09-02 15:11:41] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_13_param_qkvo_seed_47 +[2025-09-02 15:11:41] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 15:11:41] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 15:11:41] [Rank 0] PRINT: Constructing model... +[2025-09-02 15:11:41] [Rank 0] PRINT: Constructing model... +[2025-09-02 15:11:43] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 15:11:43] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 15:11:43] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 15:11:43] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 15:11:43] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 15:11:43] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 15:11:43] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 13 +[2025-09-02 15:11:43] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 13 +[2025-09-02 15:11:43] [Rank 0] PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: 0.008). +[2025-09-02 15:11:43] [Rank 0] PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: 0.008). +[2025-09-02 15:11:43] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 15:11:43] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 15:11:43] [Rank 0] PRINT: Muon optimizer is active with 23 parameters. +[2025-09-02 15:11:43] [Rank 0] PRINT: Muon optimizer is active with 23 parameters. +[2025-09-02 15:11:43] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 15:11:43] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 15:11:43] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 15:11:43] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 15:11:43] [Rank 0] PRINT: Starting warmup... +[2025-09-02 15:11:43] [Rank 0] PRINT: Starting warmup... +[2025-09-02 15:12:27] [Rank 0] PRINT: Warmup complete. +[2025-09-02 15:12:27] [Rank 0] PRINT: Warmup complete. +[2025-09-02 15:12:28] [Rank 0] PRINT: Starting training... +[2025-09-02 15:12:28] [Rank 0] PRINT: Starting training... +[2025-09-02 15:12:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:12:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:12:45] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.28 attn_vo:H=0.4624,top10E=0.02,eRank=232.9,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 15:12:45] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.28 attn_vo:H=0.4624,top10E=0.02,eRank=232.9,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 15:12:46] [Rank 0] step:21/10000 train_time:1425ms step_avg:67.87ms +[2025-09-02 15:12:46] [Rank 0] step:21/10000 train_time:1425ms step_avg:67.87ms +[2025-09-02 15:12:48] [Rank 0] step:41/10000 train_time:2873ms step_avg:70.08ms +[2025-09-02 15:12:48] [Rank 0] step:41/10000 train_time:2873ms step_avg:70.08ms +[2025-09-02 15:12:49] [Rank 0] step:61/10000 train_time:4322ms step_avg:70.86ms +[2025-09-02 15:12:49] [Rank 0] step:61/10000 train_time:4322ms step_avg:70.86ms +[2025-09-02 15:12:51] [Rank 0] step:81/10000 train_time:5772ms step_avg:71.26ms +[2025-09-02 15:12:51] [Rank 0] step:81/10000 train_time:5772ms step_avg:71.26ms +[2025-09-02 15:12:52] [Rank 0] step:101/10000 train_time:7223ms step_avg:71.52ms +[2025-09-02 15:12:52] [Rank 0] step:101/10000 train_time:7223ms step_avg:71.52ms +[2025-09-02 15:12:54] [Rank 0] step:121/10000 train_time:8815ms step_avg:72.85ms +[2025-09-02 15:12:54] [Rank 0] step:121/10000 train_time:8815ms step_avg:72.85ms +[2025-09-02 15:12:55] [Rank 0] step:141/10000 train_time:10265ms step_avg:72.80ms +[2025-09-02 15:12:55] [Rank 0] step:141/10000 train_time:10265ms step_avg:72.80ms +[2025-09-02 15:12:57] [Rank 0] step:161/10000 train_time:11715ms step_avg:72.76ms +[2025-09-02 15:12:57] [Rank 0] step:161/10000 train_time:11715ms step_avg:72.76ms +[2025-09-02 15:12:58] [Rank 0] step:181/10000 train_time:13164ms step_avg:72.73ms +[2025-09-02 15:12:58] [Rank 0] step:181/10000 train_time:13164ms step_avg:72.73ms +[2025-09-02 15:12:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:12:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:13:11] [Rank 0] PRINT: step:200/10000 val_loss:6.2428 svd_entropy: attn_qk:H=0.6007,top10E=0.56,eRank=94.9,q75/q25=12.95 attn_vo:H=0.5047,top10E=0.59,eRank=73.2,q75/q25=inf mlp_w1:H=0.6528,top10E=0.53,eRank=88.8,q75/q25=2.96 mlp_w2:H=0.7901,top10E=0.19,eRank=193.2,q75/q25=17.66 vo_prod:H=0.3209,top10E=0.82,eRank=14.1,q75/q25=inf train_time:14688ms step_avg:73.44ms +[2025-09-02 15:13:11] [Rank 0] PRINT: step:200/10000 val_loss:6.2428 svd_entropy: attn_qk:H=0.6007,top10E=0.56,eRank=94.9,q75/q25=12.95 attn_vo:H=0.5047,top10E=0.59,eRank=73.2,q75/q25=inf mlp_w1:H=0.6528,top10E=0.53,eRank=88.8,q75/q25=2.96 mlp_w2:H=0.7901,top10E=0.19,eRank=193.2,q75/q25=17.66 vo_prod:H=0.3209,top10E=0.82,eRank=14.1,q75/q25=inf train_time:14688ms step_avg:73.44ms +[2025-09-02 15:13:11] [Rank 0] step:201/10000 train_time:14701ms step_avg:73.14ms +[2025-09-02 15:13:11] [Rank 0] step:201/10000 train_time:14701ms step_avg:73.14ms +[2025-09-02 15:13:13] [Rank 0] step:221/10000 train_time:16089ms step_avg:72.80ms +[2025-09-02 15:13:13] [Rank 0] step:221/10000 train_time:16089ms step_avg:72.80ms +[2025-09-02 15:13:14] [Rank 0] step:241/10000 train_time:17537ms step_avg:72.77ms +[2025-09-02 15:13:14] [Rank 0] step:241/10000 train_time:17537ms step_avg:72.77ms +[2025-09-02 15:13:16] [Rank 0] step:261/10000 train_time:18984ms step_avg:72.74ms +[2025-09-02 15:13:16] [Rank 0] step:261/10000 train_time:18984ms step_avg:72.74ms +[2025-09-02 15:13:17] [Rank 0] step:281/10000 train_time:20431ms step_avg:72.71ms +[2025-09-02 15:13:17] [Rank 0] step:281/10000 train_time:20431ms step_avg:72.71ms +[2025-09-02 15:13:19] [Rank 0] step:301/10000 train_time:21878ms step_avg:72.68ms +[2025-09-02 15:13:19] [Rank 0] step:301/10000 train_time:21878ms step_avg:72.68ms +[2025-09-02 15:13:20] [Rank 0] step:321/10000 train_time:23324ms step_avg:72.66ms +[2025-09-02 15:13:20] [Rank 0] step:321/10000 train_time:23324ms step_avg:72.66ms +[2025-09-02 15:13:22] [Rank 0] step:341/10000 train_time:24774ms step_avg:72.65ms +[2025-09-02 15:13:22] [Rank 0] step:341/10000 train_time:24774ms step_avg:72.65ms +[2025-09-02 15:13:23] [Rank 0] step:361/10000 train_time:26221ms step_avg:72.63ms +[2025-09-02 15:13:23] [Rank 0] step:361/10000 train_time:26221ms step_avg:72.63ms +[2025-09-02 15:13:24] [Rank 0] step:381/10000 train_time:27670ms step_avg:72.62ms +[2025-09-02 15:13:24] [Rank 0] step:381/10000 train_time:27670ms step_avg:72.62ms +[2025-09-02 15:13:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:13:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:13:38] [Rank 0] PRINT: step:400/10000 val_loss:5.7329 svd_entropy: attn_qk:H=0.6414,top10E=0.46,eRank=108.5,q75/q25=15.43 attn_vo:H=0.5914,top10E=0.43,eRank=100.6,q75/q25=inf mlp_w1:H=0.6811,top10E=0.42,eRank=109.7,q75/q25=4.46 mlp_w2:H=0.9234,top10E=0.07,eRank=463.3,q75/q25=7.10 vo_prod:H=0.4220,top10E=0.66,eRank=24.2,q75/q25=inf train_time:29191ms step_avg:72.98ms +[2025-09-02 15:13:38] [Rank 0] PRINT: step:400/10000 val_loss:5.7329 svd_entropy: attn_qk:H=0.6414,top10E=0.46,eRank=108.5,q75/q25=15.43 attn_vo:H=0.5914,top10E=0.43,eRank=100.6,q75/q25=inf mlp_w1:H=0.6811,top10E=0.42,eRank=109.7,q75/q25=4.46 mlp_w2:H=0.9234,top10E=0.07,eRank=463.3,q75/q25=7.10 vo_prod:H=0.4220,top10E=0.66,eRank=24.2,q75/q25=inf train_time:29191ms step_avg:72.98ms +[2025-09-02 15:13:38] [Rank 0] step:401/10000 train_time:29204ms step_avg:72.83ms +[2025-09-02 15:13:38] [Rank 0] step:401/10000 train_time:29204ms step_avg:72.83ms +[2025-09-02 15:13:39] [Rank 0] step:421/10000 train_time:30584ms step_avg:72.65ms +[2025-09-02 15:13:39] [Rank 0] step:421/10000 train_time:30584ms step_avg:72.65ms +[2025-09-02 15:13:41] [Rank 0] step:441/10000 train_time:32030ms step_avg:72.63ms +[2025-09-02 15:13:41] [Rank 0] step:441/10000 train_time:32030ms step_avg:72.63ms +[2025-09-02 15:13:42] [Rank 0] step:461/10000 train_time:33478ms step_avg:72.62ms +[2025-09-02 15:13:42] [Rank 0] step:461/10000 train_time:33478ms step_avg:72.62ms +[2025-09-02 15:13:43] [Rank 0] step:481/10000 train_time:34925ms step_avg:72.61ms +[2025-09-02 15:13:43] [Rank 0] step:481/10000 train_time:34925ms step_avg:72.61ms +[2025-09-02 15:13:45] [Rank 0] step:501/10000 train_time:36372ms step_avg:72.60ms +[2025-09-02 15:13:45] [Rank 0] step:501/10000 train_time:36372ms step_avg:72.60ms +[2025-09-02 15:13:46] [Rank 0] step:521/10000 train_time:37819ms step_avg:72.59ms +[2025-09-02 15:13:46] [Rank 0] step:521/10000 train_time:37819ms step_avg:72.59ms +[2025-09-02 15:13:48] [Rank 0] step:541/10000 train_time:39268ms step_avg:72.58ms +[2025-09-02 15:13:48] [Rank 0] step:541/10000 train_time:39268ms step_avg:72.58ms +[2025-09-02 15:13:49] [Rank 0] step:561/10000 train_time:40716ms step_avg:72.58ms +[2025-09-02 15:13:49] [Rank 0] step:561/10000 train_time:40716ms step_avg:72.58ms +[2025-09-02 15:13:51] [Rank 0] step:581/10000 train_time:42168ms step_avg:72.58ms +[2025-09-02 15:13:51] [Rank 0] step:581/10000 train_time:42168ms step_avg:72.58ms +[2025-09-02 15:13:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:13:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:14:04] [Rank 0] PRINT: step:600/10000 val_loss:5.4377 svd_entropy: attn_qk:H=0.6706,top10E=0.40,eRank=120.3,q75/q25=19.84 attn_vo:H=0.6408,top10E=0.35,eRank=125.2,q75/q25=inf mlp_w1:H=0.7246,top10E=0.35,eRank=139.9,q75/q25=6.26 mlp_w2:H=0.9460,top10E=0.05,eRank=537.2,q75/q25=4.76 vo_prod:H=0.4770,top10E=0.55,eRank=33.5,q75/q25=inf train_time:43690ms step_avg:72.82ms +[2025-09-02 15:14:04] [Rank 0] PRINT: step:600/10000 val_loss:5.4377 svd_entropy: attn_qk:H=0.6706,top10E=0.40,eRank=120.3,q75/q25=19.84 attn_vo:H=0.6408,top10E=0.35,eRank=125.2,q75/q25=inf mlp_w1:H=0.7246,top10E=0.35,eRank=139.9,q75/q25=6.26 mlp_w2:H=0.9460,top10E=0.05,eRank=537.2,q75/q25=4.76 vo_prod:H=0.4770,top10E=0.55,eRank=33.5,q75/q25=inf train_time:43690ms step_avg:72.82ms +[2025-09-02 15:14:04] [Rank 0] step:601/10000 train_time:43703ms step_avg:72.72ms +[2025-09-02 15:14:04] [Rank 0] step:601/10000 train_time:43703ms step_avg:72.72ms +[2025-09-02 15:14:05] [Rank 0] step:621/10000 train_time:45097ms step_avg:72.62ms +[2025-09-02 15:14:05] [Rank 0] step:621/10000 train_time:45097ms step_avg:72.62ms +[2025-09-02 15:14:07] [Rank 0] step:641/10000 train_time:46542ms step_avg:72.61ms +[2025-09-02 15:14:07] [Rank 0] step:641/10000 train_time:46542ms step_avg:72.61ms +[2025-09-02 15:14:08] [Rank 0] step:661/10000 train_time:47987ms step_avg:72.60ms +[2025-09-02 15:14:08] [Rank 0] step:661/10000 train_time:47987ms step_avg:72.60ms +[2025-09-02 15:14:09] [Rank 0] step:681/10000 train_time:49433ms step_avg:72.59ms +[2025-09-02 15:14:09] [Rank 0] step:681/10000 train_time:49433ms step_avg:72.59ms +[2025-09-02 15:14:11] [Rank 0] step:701/10000 train_time:50881ms step_avg:72.58ms +[2025-09-02 15:14:11] [Rank 0] step:701/10000 train_time:50881ms step_avg:72.58ms +[2025-09-02 15:14:12] [Rank 0] step:721/10000 train_time:52326ms step_avg:72.57ms +[2025-09-02 15:14:12] [Rank 0] step:721/10000 train_time:52326ms step_avg:72.57ms +[2025-09-02 15:14:14] [Rank 0] step:741/10000 train_time:53773ms step_avg:72.57ms +[2025-09-02 15:14:14] [Rank 0] step:741/10000 train_time:53773ms step_avg:72.57ms +[2025-09-02 15:14:15] [Rank 0] step:761/10000 train_time:55239ms step_avg:72.59ms +[2025-09-02 15:14:15] [Rank 0] step:761/10000 train_time:55239ms step_avg:72.59ms +[2025-09-02 15:14:17] [Rank 0] step:781/10000 train_time:56699ms step_avg:72.60ms +[2025-09-02 15:14:17] [Rank 0] step:781/10000 train_time:56699ms step_avg:72.60ms +[2025-09-02 15:14:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:14:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:14:30] [Rank 0] PRINT: step:800/10000 val_loss:5.2066 svd_entropy: attn_qk:H=0.6911,top10E=0.37,eRank=129.7,q75/q25=26.02 attn_vo:H=0.6741,top10E=0.30,eRank=146.5,q75/q25=inf mlp_w1:H=0.7570,top10E=0.31,eRank=167.6,q75/q25=7.28 mlp_w2:H=0.9537,top10E=0.05,eRank=565.1,q75/q25=4.11 vo_prod:H=0.5148,top10E=0.47,eRank=42.6,q75/q25=inf train_time:58234ms step_avg:72.79ms +[2025-09-02 15:14:30] [Rank 0] PRINT: step:800/10000 val_loss:5.2066 svd_entropy: attn_qk:H=0.6911,top10E=0.37,eRank=129.7,q75/q25=26.02 attn_vo:H=0.6741,top10E=0.30,eRank=146.5,q75/q25=inf mlp_w1:H=0.7570,top10E=0.31,eRank=167.6,q75/q25=7.28 mlp_w2:H=0.9537,top10E=0.05,eRank=565.1,q75/q25=4.11 vo_prod:H=0.5148,top10E=0.47,eRank=42.6,q75/q25=inf train_time:58234ms step_avg:72.79ms +[2025-09-02 15:14:30] [Rank 0] step:801/10000 train_time:58247ms step_avg:72.72ms +[2025-09-02 15:14:30] [Rank 0] step:801/10000 train_time:58247ms step_avg:72.72ms +[2025-09-02 15:14:32] [Rank 0] step:821/10000 train_time:59649ms step_avg:72.65ms +[2025-09-02 15:14:32] [Rank 0] step:821/10000 train_time:59649ms step_avg:72.65ms +[2025-09-02 15:14:33] [Rank 0] step:841/10000 train_time:61107ms step_avg:72.66ms +[2025-09-02 15:14:33] [Rank 0] step:841/10000 train_time:61107ms step_avg:72.66ms +[2025-09-02 15:14:34] [Rank 0] step:861/10000 train_time:62565ms step_avg:72.67ms +[2025-09-02 15:14:34] [Rank 0] step:861/10000 train_time:62565ms step_avg:72.67ms +[2025-09-02 15:14:36] [Rank 0] step:881/10000 train_time:64024ms step_avg:72.67ms +[2025-09-02 15:14:36] [Rank 0] step:881/10000 train_time:64024ms step_avg:72.67ms +[2025-09-02 15:14:37] [Rank 0] step:901/10000 train_time:65483ms step_avg:72.68ms +[2025-09-02 15:14:37] [Rank 0] step:901/10000 train_time:65483ms step_avg:72.68ms +[2025-09-02 15:14:39] [Rank 0] step:921/10000 train_time:66942ms step_avg:72.68ms +[2025-09-02 15:14:39] [Rank 0] step:921/10000 train_time:66942ms step_avg:72.68ms +[2025-09-02 15:14:40] [Rank 0] step:941/10000 train_time:68401ms step_avg:72.69ms +[2025-09-02 15:14:40] [Rank 0] step:941/10000 train_time:68401ms step_avg:72.69ms +[2025-09-02 15:14:42] [Rank 0] step:961/10000 train_time:69861ms step_avg:72.70ms +[2025-09-02 15:14:42] [Rank 0] step:961/10000 train_time:69861ms step_avg:72.70ms +[2025-09-02 15:14:43] [Rank 0] step:981/10000 train_time:71321ms step_avg:72.70ms +[2025-09-02 15:14:43] [Rank 0] step:981/10000 train_time:71321ms step_avg:72.70ms +[2025-09-02 15:14:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:14:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:14:56] [Rank 0] PRINT: step:1000/10000 val_loss:5.0328 svd_entropy: attn_qk:H=0.7065,top10E=0.34,eRank=137.9,q75/q25=32.43 attn_vo:H=0.6979,top10E=0.27,eRank=164.6,q75/q25=inf mlp_w1:H=0.7813,top10E=0.28,eRank=192.4,q75/q25=7.58 mlp_w2:H=0.9588,top10E=0.05,eRank=584.5,q75/q25=3.71 vo_prod:H=0.5418,top10E=0.42,eRank=50.6,q75/q25=inf train_time:72855ms step_avg:72.86ms +[2025-09-02 15:14:56] [Rank 0] PRINT: step:1000/10000 val_loss:5.0328 svd_entropy: attn_qk:H=0.7065,top10E=0.34,eRank=137.9,q75/q25=32.43 attn_vo:H=0.6979,top10E=0.27,eRank=164.6,q75/q25=inf mlp_w1:H=0.7813,top10E=0.28,eRank=192.4,q75/q25=7.58 mlp_w2:H=0.9588,top10E=0.05,eRank=584.5,q75/q25=3.71 vo_prod:H=0.5418,top10E=0.42,eRank=50.6,q75/q25=inf train_time:72855ms step_avg:72.86ms +[2025-09-02 15:14:57] [Rank 0] step:1001/10000 train_time:72869ms step_avg:72.80ms +[2025-09-02 15:14:57] [Rank 0] step:1001/10000 train_time:72869ms step_avg:72.80ms +[2025-09-02 15:14:58] [Rank 0] step:1021/10000 train_time:74261ms step_avg:72.73ms +[2025-09-02 15:14:58] [Rank 0] step:1021/10000 train_time:74261ms step_avg:72.73ms +[2025-09-02 15:15:00] [Rank 0] step:1041/10000 train_time:75827ms step_avg:72.84ms +[2025-09-02 15:15:00] [Rank 0] step:1041/10000 train_time:75827ms step_avg:72.84ms +[2025-09-02 15:15:01] [Rank 0] step:1061/10000 train_time:77299ms step_avg:72.85ms +[2025-09-02 15:15:01] [Rank 0] step:1061/10000 train_time:77299ms step_avg:72.85ms +[2025-09-02 15:15:03] [Rank 0] step:1081/10000 train_time:78800ms step_avg:72.90ms +[2025-09-02 15:15:03] [Rank 0] step:1081/10000 train_time:78800ms step_avg:72.90ms +[2025-09-02 15:15:04] [Rank 0] step:1101/10000 train_time:80261ms step_avg:72.90ms +[2025-09-02 15:15:04] [Rank 0] step:1101/10000 train_time:80261ms step_avg:72.90ms +[2025-09-02 15:15:05] [Rank 0] step:1121/10000 train_time:81726ms step_avg:72.90ms +[2025-09-02 15:15:05] [Rank 0] step:1121/10000 train_time:81726ms step_avg:72.90ms +[2025-09-02 15:15:07] [Rank 0] step:1141/10000 train_time:83186ms step_avg:72.91ms +[2025-09-02 15:15:07] [Rank 0] step:1141/10000 train_time:83186ms step_avg:72.91ms +[2025-09-02 15:15:08] [Rank 0] step:1161/10000 train_time:84648ms step_avg:72.91ms +[2025-09-02 15:15:08] [Rank 0] step:1161/10000 train_time:84648ms step_avg:72.91ms +[2025-09-02 15:15:10] [Rank 0] step:1181/10000 train_time:86107ms step_avg:72.91ms +[2025-09-02 15:15:10] [Rank 0] step:1181/10000 train_time:86107ms step_avg:72.91ms +[2025-09-02 15:15:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:15:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:15:23] [Rank 0] PRINT: step:1200/10000 val_loss:4.8494 svd_entropy: attn_qk:H=0.6963,top10E=0.36,eRank=111.2,q75/q25=39.97 attn_vo:H=0.7495,top10E=0.26,eRank=184.7,q75/q25=51.17 mlp_w1:H=0.8014,top10E=0.26,eRank=216.4,q75/q25=7.56 mlp_w2:H=0.9621,top10E=0.04,eRank=597.5,q75/q25=3.47 vo_prod:H=0.6237,top10E=0.42,eRank=65.9,q75/q25=2666.23 train_time:87643ms step_avg:73.04ms +[2025-09-02 15:15:23] [Rank 0] PRINT: step:1200/10000 val_loss:4.8494 svd_entropy: attn_qk:H=0.6963,top10E=0.36,eRank=111.2,q75/q25=39.97 attn_vo:H=0.7495,top10E=0.26,eRank=184.7,q75/q25=51.17 mlp_w1:H=0.8014,top10E=0.26,eRank=216.4,q75/q25=7.56 mlp_w2:H=0.9621,top10E=0.04,eRank=597.5,q75/q25=3.47 vo_prod:H=0.6237,top10E=0.42,eRank=65.9,q75/q25=2666.23 train_time:87643ms step_avg:73.04ms +[2025-09-02 15:15:23] [Rank 0] step:1201/10000 train_time:87657ms step_avg:72.99ms +[2025-09-02 15:15:23] [Rank 0] step:1201/10000 train_time:87657ms step_avg:72.99ms +[2025-09-02 15:15:25] [Rank 0] step:1221/10000 train_time:89063ms step_avg:72.94ms +[2025-09-02 15:15:25] [Rank 0] step:1221/10000 train_time:89063ms step_avg:72.94ms +[2025-09-02 15:15:26] [Rank 0] step:1241/10000 train_time:90524ms step_avg:72.94ms +[2025-09-02 15:15:26] [Rank 0] step:1241/10000 train_time:90524ms step_avg:72.94ms +[2025-09-02 15:15:27] [Rank 0] step:1261/10000 train_time:91986ms step_avg:72.95ms +[2025-09-02 15:15:27] [Rank 0] step:1261/10000 train_time:91986ms step_avg:72.95ms +[2025-09-02 15:15:29] [Rank 0] step:1281/10000 train_time:93450ms step_avg:72.95ms +[2025-09-02 15:15:29] [Rank 0] step:1281/10000 train_time:93450ms step_avg:72.95ms +[2025-09-02 15:15:30] [Rank 0] step:1301/10000 train_time:94914ms step_avg:72.95ms +[2025-09-02 15:15:30] [Rank 0] step:1301/10000 train_time:94914ms step_avg:72.95ms +[2025-09-02 15:15:32] [Rank 0] step:1321/10000 train_time:96378ms step_avg:72.96ms +[2025-09-02 15:15:32] [Rank 0] step:1321/10000 train_time:96378ms step_avg:72.96ms +[2025-09-02 15:15:33] [Rank 0] step:1341/10000 train_time:97841ms step_avg:72.96ms +[2025-09-02 15:15:33] [Rank 0] step:1341/10000 train_time:97841ms step_avg:72.96ms +[2025-09-02 15:15:35] [Rank 0] step:1361/10000 train_time:99305ms step_avg:72.97ms +[2025-09-02 15:15:35] [Rank 0] step:1361/10000 train_time:99305ms step_avg:72.97ms +[2025-09-02 15:15:36] [Rank 0] step:1381/10000 train_time:100769ms step_avg:72.97ms +[2025-09-02 15:15:36] [Rank 0] step:1381/10000 train_time:100769ms step_avg:72.97ms +[2025-09-02 15:15:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:15:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:15:49] [Rank 0] PRINT: step:1400/10000 val_loss:4.7288 svd_entropy: attn_qk:H=0.7053,top10E=0.34,eRank=117.1,q75/q25=46.05 attn_vo:H=0.7639,top10E=0.24,eRank=198.1,q75/q25=55.11 mlp_w1:H=0.8170,top10E=0.24,eRank=237.4,q75/q25=7.39 mlp_w2:H=0.9644,top10E=0.04,eRank=606.7,q75/q25=3.31 vo_prod:H=0.6378,top10E=0.39,eRank=71.6,q75/q25=3626.97 train_time:102307ms step_avg:73.08ms +[2025-09-02 15:15:49] [Rank 0] PRINT: step:1400/10000 val_loss:4.7288 svd_entropy: attn_qk:H=0.7053,top10E=0.34,eRank=117.1,q75/q25=46.05 attn_vo:H=0.7639,top10E=0.24,eRank=198.1,q75/q25=55.11 mlp_w1:H=0.8170,top10E=0.24,eRank=237.4,q75/q25=7.39 mlp_w2:H=0.9644,top10E=0.04,eRank=606.7,q75/q25=3.31 vo_prod:H=0.6378,top10E=0.39,eRank=71.6,q75/q25=3626.97 train_time:102307ms step_avg:73.08ms +[2025-09-02 15:15:49] [Rank 0] step:1401/10000 train_time:102320ms step_avg:73.03ms +[2025-09-02 15:15:49] [Rank 0] step:1401/10000 train_time:102320ms step_avg:73.03ms +[2025-09-02 15:15:51] [Rank 0] step:1421/10000 train_time:103713ms step_avg:72.99ms +[2025-09-02 15:15:51] [Rank 0] step:1421/10000 train_time:103713ms step_avg:72.99ms +[2025-09-02 15:15:52] [Rank 0] step:1441/10000 train_time:105172ms step_avg:72.99ms +[2025-09-02 15:15:52] [Rank 0] step:1441/10000 train_time:105172ms step_avg:72.99ms +[2025-09-02 15:15:54] [Rank 0] step:1461/10000 train_time:106633ms step_avg:72.99ms +[2025-09-02 15:15:54] [Rank 0] step:1461/10000 train_time:106633ms step_avg:72.99ms +[2025-09-02 15:15:55] [Rank 0] step:1481/10000 train_time:108094ms step_avg:72.99ms +[2025-09-02 15:15:55] [Rank 0] step:1481/10000 train_time:108094ms step_avg:72.99ms +[2025-09-02 15:15:57] [Rank 0] step:1501/10000 train_time:109564ms step_avg:72.99ms +[2025-09-02 15:15:57] [Rank 0] step:1501/10000 train_time:109564ms step_avg:72.99ms +[2025-09-02 15:15:58] [Rank 0] step:1521/10000 train_time:111036ms step_avg:73.00ms +[2025-09-02 15:15:58] [Rank 0] step:1521/10000 train_time:111036ms step_avg:73.00ms +[2025-09-02 15:16:00] [Rank 0] step:1541/10000 train_time:112507ms step_avg:73.01ms +[2025-09-02 15:16:00] [Rank 0] step:1541/10000 train_time:112507ms step_avg:73.01ms +[2025-09-02 15:16:01] [Rank 0] step:1561/10000 train_time:113980ms step_avg:73.02ms +[2025-09-02 15:16:01] [Rank 0] step:1561/10000 train_time:113980ms step_avg:73.02ms +[2025-09-02 15:16:03] [Rank 0] step:1581/10000 train_time:115466ms step_avg:73.03ms +[2025-09-02 15:16:03] [Rank 0] step:1581/10000 train_time:115466ms step_avg:73.03ms +[2025-09-02 15:16:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:16:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:16:16] [Rank 0] PRINT: step:1600/10000 val_loss:4.5999 svd_entropy: attn_qk:H=0.7123,top10E=0.34,eRank=121.8,q75/q25=50.78 attn_vo:H=0.7760,top10E=0.22,eRank=210.6,q75/q25=57.19 mlp_w1:H=0.8295,top10E=0.23,eRank=256.1,q75/q25=7.17 mlp_w2:H=0.9661,top10E=0.04,eRank=613.4,q75/q25=3.20 vo_prod:H=0.6519,top10E=0.37,eRank=78.2,q75/q25=4137.16 train_time:117062ms step_avg:73.16ms +[2025-09-02 15:16:16] [Rank 0] PRINT: step:1600/10000 val_loss:4.5999 svd_entropy: attn_qk:H=0.7123,top10E=0.34,eRank=121.8,q75/q25=50.78 attn_vo:H=0.7760,top10E=0.22,eRank=210.6,q75/q25=57.19 mlp_w1:H=0.8295,top10E=0.23,eRank=256.1,q75/q25=7.17 mlp_w2:H=0.9661,top10E=0.04,eRank=613.4,q75/q25=3.20 vo_prod:H=0.6519,top10E=0.37,eRank=78.2,q75/q25=4137.16 train_time:117062ms step_avg:73.16ms +[2025-09-02 15:16:16] [Rank 0] step:1601/10000 train_time:117075ms step_avg:73.13ms +[2025-09-02 15:16:16] [Rank 0] step:1601/10000 train_time:117075ms step_avg:73.13ms +[2025-09-02 15:16:18] [Rank 0] step:1621/10000 train_time:118477ms step_avg:73.09ms +[2025-09-02 15:16:18] [Rank 0] step:1621/10000 train_time:118477ms step_avg:73.09ms +[2025-09-02 15:16:19] [Rank 0] step:1641/10000 train_time:119947ms step_avg:73.09ms +[2025-09-02 15:16:19] [Rank 0] step:1641/10000 train_time:119947ms step_avg:73.09ms +[2025-09-02 15:16:20] [Rank 0] step:1661/10000 train_time:121418ms step_avg:73.10ms +[2025-09-02 15:16:20] [Rank 0] step:1661/10000 train_time:121418ms step_avg:73.10ms +[2025-09-02 15:16:22] [Rank 0] step:1681/10000 train_time:122889ms step_avg:73.10ms +[2025-09-02 15:16:22] [Rank 0] step:1681/10000 train_time:122889ms step_avg:73.10ms +[2025-09-02 15:16:23] [Rank 0] step:1701/10000 train_time:124361ms step_avg:73.11ms +[2025-09-02 15:16:23] [Rank 0] step:1701/10000 train_time:124361ms step_avg:73.11ms +[2025-09-02 15:16:25] [Rank 0] step:1721/10000 train_time:125834ms step_avg:73.12ms +[2025-09-02 15:16:25] [Rank 0] step:1721/10000 train_time:125834ms step_avg:73.12ms +[2025-09-02 15:16:26] [Rank 0] step:1741/10000 train_time:127307ms step_avg:73.12ms +[2025-09-02 15:16:26] [Rank 0] step:1741/10000 train_time:127307ms step_avg:73.12ms +[2025-09-02 15:16:28] [Rank 0] step:1761/10000 train_time:128782ms step_avg:73.13ms +[2025-09-02 15:16:28] [Rank 0] step:1761/10000 train_time:128782ms step_avg:73.13ms +[2025-09-02 15:16:29] [Rank 0] step:1781/10000 train_time:130256ms step_avg:73.14ms +[2025-09-02 15:16:29] [Rank 0] step:1781/10000 train_time:130256ms step_avg:73.14ms +[2025-09-02 15:16:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:16:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:16:43] [Rank 0] PRINT: step:1800/10000 val_loss:4.5061 svd_entropy: attn_qk:H=0.7185,top10E=0.33,eRank=126.3,q75/q25=53.77 attn_vo:H=0.7850,top10E=0.21,eRank=219.2,q75/q25=57.37 mlp_w1:H=0.8397,top10E=0.21,eRank=272.4,q75/q25=6.89 mlp_w2:H=0.9673,top10E=0.04,eRank=618.1,q75/q25=3.13 vo_prod:H=0.6630,top10E=0.35,eRank=84.0,q75/q25=4219.85 train_time:131803ms step_avg:73.22ms +[2025-09-02 15:16:43] [Rank 0] PRINT: step:1800/10000 val_loss:4.5061 svd_entropy: attn_qk:H=0.7185,top10E=0.33,eRank=126.3,q75/q25=53.77 attn_vo:H=0.7850,top10E=0.21,eRank=219.2,q75/q25=57.37 mlp_w1:H=0.8397,top10E=0.21,eRank=272.4,q75/q25=6.89 mlp_w2:H=0.9673,top10E=0.04,eRank=618.1,q75/q25=3.13 vo_prod:H=0.6630,top10E=0.35,eRank=84.0,q75/q25=4219.85 train_time:131803ms step_avg:73.22ms +[2025-09-02 15:16:43] [Rank 0] step:1801/10000 train_time:131817ms step_avg:73.19ms +[2025-09-02 15:16:43] [Rank 0] step:1801/10000 train_time:131817ms step_avg:73.19ms +[2025-09-02 15:16:44] [Rank 0] step:1821/10000 train_time:133233ms step_avg:73.16ms +[2025-09-02 15:16:44] [Rank 0] step:1821/10000 train_time:133233ms step_avg:73.16ms +[2025-09-02 15:16:46] [Rank 0] step:1841/10000 train_time:134701ms step_avg:73.17ms +[2025-09-02 15:16:46] [Rank 0] step:1841/10000 train_time:134701ms step_avg:73.17ms +[2025-09-02 15:16:47] [Rank 0] step:1861/10000 train_time:136171ms step_avg:73.17ms +[2025-09-02 15:16:47] [Rank 0] step:1861/10000 train_time:136171ms step_avg:73.17ms +[2025-09-02 15:16:49] [Rank 0] step:1881/10000 train_time:137642ms step_avg:73.17ms +[2025-09-02 15:16:49] [Rank 0] step:1881/10000 train_time:137642ms step_avg:73.17ms +[2025-09-02 15:16:50] [Rank 0] step:1901/10000 train_time:139114ms step_avg:73.18ms +[2025-09-02 15:16:50] [Rank 0] step:1901/10000 train_time:139114ms step_avg:73.18ms +[2025-09-02 15:16:52] [Rank 0] step:1921/10000 train_time:140587ms step_avg:73.18ms +[2025-09-02 15:16:52] [Rank 0] step:1921/10000 train_time:140587ms step_avg:73.18ms +[2025-09-02 15:16:53] [Rank 0] step:1941/10000 train_time:142061ms step_avg:73.19ms +[2025-09-02 15:16:53] [Rank 0] step:1941/10000 train_time:142061ms step_avg:73.19ms +[2025-09-02 15:16:54] [Rank 0] step:1961/10000 train_time:143533ms step_avg:73.19ms +[2025-09-02 15:16:54] [Rank 0] step:1961/10000 train_time:143533ms step_avg:73.19ms +[2025-09-02 15:16:56] [Rank 0] step:1981/10000 train_time:145006ms step_avg:73.20ms +[2025-09-02 15:16:56] [Rank 0] step:1981/10000 train_time:145006ms step_avg:73.20ms +[2025-09-02 15:16:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:16:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:17:09] [Rank 0] PRINT: step:2000/10000 val_loss:4.4459 svd_entropy: attn_qk:H=0.7233,top10E=0.32,eRank=129.8,q75/q25=56.56 attn_vo:H=0.7931,top10E=0.20,eRank=228.4,q75/q25=57.49 mlp_w1:H=0.8478,top10E=0.21,eRank=286.4,q75/q25=6.65 mlp_w2:H=0.9681,top10E=0.04,eRank=621.6,q75/q25=3.08 vo_prod:H=0.6715,top10E=0.33,eRank=88.9,q75/q25=4202.31 train_time:146553ms step_avg:73.28ms +[2025-09-02 15:17:09] [Rank 0] PRINT: step:2000/10000 val_loss:4.4459 svd_entropy: attn_qk:H=0.7233,top10E=0.32,eRank=129.8,q75/q25=56.56 attn_vo:H=0.7931,top10E=0.20,eRank=228.4,q75/q25=57.49 mlp_w1:H=0.8478,top10E=0.21,eRank=286.4,q75/q25=6.65 mlp_w2:H=0.9681,top10E=0.04,eRank=621.6,q75/q25=3.08 vo_prod:H=0.6715,top10E=0.33,eRank=88.9,q75/q25=4202.31 train_time:146553ms step_avg:73.28ms +[2025-09-02 15:17:09] [Rank 0] step:2001/10000 train_time:146566ms step_avg:73.25ms +[2025-09-02 15:17:09] [Rank 0] step:2001/10000 train_time:146566ms step_avg:73.25ms +[2025-09-02 15:17:11] [Rank 0] step:2021/10000 train_time:147978ms step_avg:73.22ms +[2025-09-02 15:17:11] [Rank 0] step:2021/10000 train_time:147978ms step_avg:73.22ms +[2025-09-02 15:17:12] [Rank 0] step:2041/10000 train_time:149632ms step_avg:73.31ms +[2025-09-02 15:17:12] [Rank 0] step:2041/10000 train_time:149632ms step_avg:73.31ms +[2025-09-02 15:17:14] [Rank 0] step:2061/10000 train_time:151103ms step_avg:73.32ms +[2025-09-02 15:17:14] [Rank 0] step:2061/10000 train_time:151103ms step_avg:73.32ms +[2025-09-02 15:17:15] [Rank 0] step:2081/10000 train_time:152576ms step_avg:73.32ms +[2025-09-02 15:17:15] [Rank 0] step:2081/10000 train_time:152576ms step_avg:73.32ms +[2025-09-02 15:17:17] [Rank 0] step:2101/10000 train_time:154048ms step_avg:73.32ms +[2025-09-02 15:17:17] [Rank 0] step:2101/10000 train_time:154048ms step_avg:73.32ms +[2025-09-02 15:17:18] [Rank 0] step:2121/10000 train_time:155520ms step_avg:73.32ms +[2025-09-02 15:17:18] [Rank 0] step:2121/10000 train_time:155520ms step_avg:73.32ms +[2025-09-02 15:17:20] [Rank 0] step:2141/10000 train_time:156994ms step_avg:73.33ms +[2025-09-02 15:17:20] [Rank 0] step:2141/10000 train_time:156994ms step_avg:73.33ms +[2025-09-02 15:17:21] [Rank 0] step:2161/10000 train_time:158467ms step_avg:73.33ms +[2025-09-02 15:17:21] [Rank 0] step:2161/10000 train_time:158467ms step_avg:73.33ms +[2025-09-02 15:17:23] [Rank 0] step:2181/10000 train_time:159940ms step_avg:73.33ms +[2025-09-02 15:17:23] [Rank 0] step:2181/10000 train_time:159940ms step_avg:73.33ms +[2025-09-02 15:17:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:17:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:17:35] [Rank 0] PRINT: step:2200/10000 val_loss:4.3789 svd_entropy: attn_qk:H=0.7278,top10E=0.31,eRank=133.4,q75/q25=58.46 attn_vo:H=0.7996,top10E=0.19,eRank=236.1,q75/q25=57.36 mlp_w1:H=0.8545,top10E=0.20,eRank=298.4,q75/q25=6.41 mlp_w2:H=0.9688,top10E=0.04,eRank=624.3,q75/q25=3.03 vo_prod:H=0.6793,top10E=0.32,eRank=93.7,q75/q25=4060.53 train_time:161488ms step_avg:73.40ms +[2025-09-02 15:17:35] [Rank 0] PRINT: step:2200/10000 val_loss:4.3789 svd_entropy: attn_qk:H=0.7278,top10E=0.31,eRank=133.4,q75/q25=58.46 attn_vo:H=0.7996,top10E=0.19,eRank=236.1,q75/q25=57.36 mlp_w1:H=0.8545,top10E=0.20,eRank=298.4,q75/q25=6.41 mlp_w2:H=0.9688,top10E=0.04,eRank=624.3,q75/q25=3.03 vo_prod:H=0.6793,top10E=0.32,eRank=93.7,q75/q25=4060.53 train_time:161488ms step_avg:73.40ms +[2025-09-02 15:17:35] [Rank 0] step:2201/10000 train_time:161502ms step_avg:73.38ms +[2025-09-02 15:17:35] [Rank 0] step:2201/10000 train_time:161502ms step_avg:73.38ms +[2025-09-02 15:17:37] [Rank 0] step:2221/10000 train_time:162928ms step_avg:73.36ms +[2025-09-02 15:17:37] [Rank 0] step:2221/10000 train_time:162928ms step_avg:73.36ms +[2025-09-02 15:17:38] [Rank 0] step:2241/10000 train_time:164436ms step_avg:73.38ms +[2025-09-02 15:17:38] [Rank 0] step:2241/10000 train_time:164436ms step_avg:73.38ms +[2025-09-02 15:17:40] [Rank 0] step:2261/10000 train_time:165951ms step_avg:73.40ms +[2025-09-02 15:17:40] [Rank 0] step:2261/10000 train_time:165951ms step_avg:73.40ms +[2025-09-02 15:17:42] [Rank 0] step:2281/10000 train_time:167469ms step_avg:73.42ms +[2025-09-02 15:17:42] [Rank 0] step:2281/10000 train_time:167469ms step_avg:73.42ms +[2025-09-02 15:17:43] [Rank 0] step:2301/10000 train_time:168986ms step_avg:73.44ms +[2025-09-02 15:17:43] [Rank 0] step:2301/10000 train_time:168986ms step_avg:73.44ms +[2025-09-02 15:17:45] [Rank 0] step:2321/10000 train_time:170504ms step_avg:73.46ms +[2025-09-02 15:17:45] [Rank 0] step:2321/10000 train_time:170504ms step_avg:73.46ms +[2025-09-02 15:17:46] [Rank 0] step:2341/10000 train_time:172023ms step_avg:73.48ms +[2025-09-02 15:17:46] [Rank 0] step:2341/10000 train_time:172023ms step_avg:73.48ms +[2025-09-02 15:17:48] [Rank 0] step:2361/10000 train_time:173542ms step_avg:73.50ms +[2025-09-02 15:17:48] [Rank 0] step:2361/10000 train_time:173542ms step_avg:73.50ms +[2025-09-02 15:17:49] [Rank 0] step:2381/10000 train_time:175063ms step_avg:73.53ms +[2025-09-02 15:17:49] [Rank 0] step:2381/10000 train_time:175063ms step_avg:73.53ms +[2025-09-02 15:17:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:17:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:18:02] [Rank 0] PRINT: step:2400/10000 val_loss:4.3013 svd_entropy: attn_qk:H=0.7312,top10E=0.31,eRank=136.0,q75/q25=59.40 attn_vo:H=0.8052,top10E=0.19,eRank=242.9,q75/q25=56.56 mlp_w1:H=0.8603,top10E=0.19,eRank=309.5,q75/q25=6.21 mlp_w2:H=0.9693,top10E=0.04,eRank=626.1,q75/q25=3.01 vo_prod:H=0.6860,top10E=0.31,eRank=98.2,q75/q25=3810.32 train_time:176661ms step_avg:73.61ms +[2025-09-02 15:18:02] [Rank 0] PRINT: step:2400/10000 val_loss:4.3013 svd_entropy: attn_qk:H=0.7312,top10E=0.31,eRank=136.0,q75/q25=59.40 attn_vo:H=0.8052,top10E=0.19,eRank=242.9,q75/q25=56.56 mlp_w1:H=0.8603,top10E=0.19,eRank=309.5,q75/q25=6.21 mlp_w2:H=0.9693,top10E=0.04,eRank=626.1,q75/q25=3.01 vo_prod:H=0.6860,top10E=0.31,eRank=98.2,q75/q25=3810.32 train_time:176661ms step_avg:73.61ms +[2025-09-02 15:18:02] [Rank 0] step:2401/10000 train_time:176675ms step_avg:73.58ms +[2025-09-02 15:18:02] [Rank 0] step:2401/10000 train_time:176675ms step_avg:73.58ms +[2025-09-02 15:18:04] [Rank 0] step:2421/10000 train_time:178133ms step_avg:73.58ms +[2025-09-02 15:18:04] [Rank 0] step:2421/10000 train_time:178133ms step_avg:73.58ms +[2025-09-02 15:18:05] [Rank 0] step:2441/10000 train_time:179647ms step_avg:73.60ms +[2025-09-02 15:18:05] [Rank 0] step:2441/10000 train_time:179647ms step_avg:73.60ms +[2025-09-02 15:18:07] [Rank 0] step:2461/10000 train_time:181162ms step_avg:73.61ms +[2025-09-02 15:18:07] [Rank 0] step:2461/10000 train_time:181162ms step_avg:73.61ms +[2025-09-02 15:18:08] [Rank 0] step:2481/10000 train_time:182676ms step_avg:73.63ms +[2025-09-02 15:18:08] [Rank 0] step:2481/10000 train_time:182676ms step_avg:73.63ms +[2025-09-02 15:18:10] [Rank 0] step:2501/10000 train_time:184250ms step_avg:73.67ms +[2025-09-02 15:18:10] [Rank 0] step:2501/10000 train_time:184250ms step_avg:73.67ms +[2025-09-02 15:18:11] [Rank 0] step:2521/10000 train_time:185767ms step_avg:73.69ms +[2025-09-02 15:18:11] [Rank 0] step:2521/10000 train_time:185767ms step_avg:73.69ms +[2025-09-02 15:18:13] [Rank 0] step:2541/10000 train_time:187327ms step_avg:73.72ms +[2025-09-02 15:18:13] [Rank 0] step:2541/10000 train_time:187327ms step_avg:73.72ms +[2025-09-02 15:18:15] [Rank 0] step:2561/10000 train_time:188843ms step_avg:73.74ms +[2025-09-02 15:18:15] [Rank 0] step:2561/10000 train_time:188843ms step_avg:73.74ms +[2025-09-02 15:18:16] [Rank 0] step:2581/10000 train_time:190360ms step_avg:73.75ms +[2025-09-02 15:18:16] [Rank 0] step:2581/10000 train_time:190360ms step_avg:73.75ms +[2025-09-02 15:18:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:18:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:18:29] [Rank 0] PRINT: step:2600/10000 val_loss:4.2533 svd_entropy: attn_qk:H=0.7351,top10E=0.31,eRank=139.1,q75/q25=60.11 attn_vo:H=0.8100,top10E=0.18,eRank=248.9,q75/q25=55.69 mlp_w1:H=0.8653,top10E=0.19,eRank=319.4,q75/q25=6.04 mlp_w2:H=0.9696,top10E=0.04,eRank=627.6,q75/q25=2.99 vo_prod:H=0.6920,top10E=0.30,eRank=102.3,q75/q25=3572.01 train_time:191953ms step_avg:73.83ms +[2025-09-02 15:18:29] [Rank 0] PRINT: step:2600/10000 val_loss:4.2533 svd_entropy: attn_qk:H=0.7351,top10E=0.31,eRank=139.1,q75/q25=60.11 attn_vo:H=0.8100,top10E=0.18,eRank=248.9,q75/q25=55.69 mlp_w1:H=0.8653,top10E=0.19,eRank=319.4,q75/q25=6.04 mlp_w2:H=0.9696,top10E=0.04,eRank=627.6,q75/q25=2.99 vo_prod:H=0.6920,top10E=0.30,eRank=102.3,q75/q25=3572.01 train_time:191953ms step_avg:73.83ms +[2025-09-02 15:18:29] [Rank 0] step:2601/10000 train_time:191967ms step_avg:73.80ms +[2025-09-02 15:18:29] [Rank 0] step:2601/10000 train_time:191967ms step_avg:73.80ms +[2025-09-02 15:18:31] [Rank 0] step:2621/10000 train_time:193414ms step_avg:73.79ms +[2025-09-02 15:18:31] [Rank 0] step:2621/10000 train_time:193414ms step_avg:73.79ms +[2025-09-02 15:18:32] [Rank 0] step:2641/10000 train_time:194927ms step_avg:73.81ms +[2025-09-02 15:18:32] [Rank 0] step:2641/10000 train_time:194927ms step_avg:73.81ms +[2025-09-02 15:18:34] [Rank 0] step:2661/10000 train_time:196442ms step_avg:73.82ms +[2025-09-02 15:18:34] [Rank 0] step:2661/10000 train_time:196442ms step_avg:73.82ms +[2025-09-02 15:18:35] [Rank 0] step:2681/10000 train_time:197959ms step_avg:73.84ms +[2025-09-02 15:18:35] [Rank 0] step:2681/10000 train_time:197959ms step_avg:73.84ms +[2025-09-02 15:18:37] [Rank 0] step:2701/10000 train_time:199474ms step_avg:73.85ms +[2025-09-02 15:18:37] [Rank 0] step:2701/10000 train_time:199474ms step_avg:73.85ms +[2025-09-02 15:18:38] [Rank 0] step:2721/10000 train_time:200990ms step_avg:73.87ms +[2025-09-02 15:18:38] [Rank 0] step:2721/10000 train_time:200990ms step_avg:73.87ms +[2025-09-02 15:18:40] [Rank 0] step:2741/10000 train_time:202505ms step_avg:73.88ms +[2025-09-02 15:18:40] [Rank 0] step:2741/10000 train_time:202505ms step_avg:73.88ms +[2025-09-02 15:18:41] [Rank 0] step:2761/10000 train_time:204022ms step_avg:73.89ms +[2025-09-02 15:18:41] [Rank 0] step:2761/10000 train_time:204022ms step_avg:73.89ms +[2025-09-02 15:18:43] [Rank 0] step:2781/10000 train_time:205538ms step_avg:73.91ms +[2025-09-02 15:18:43] [Rank 0] step:2781/10000 train_time:205538ms step_avg:73.91ms +[2025-09-02 15:18:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:18:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:18:56] [Rank 0] PRINT: step:2800/10000 val_loss:4.2148 svd_entropy: attn_qk:H=0.7387,top10E=0.30,eRank=142.0,q75/q25=61.12 attn_vo:H=0.8142,top10E=0.17,eRank=254.4,q75/q25=54.46 mlp_w1:H=0.8697,top10E=0.18,eRank=328.3,q75/q25=5.85 mlp_w2:H=0.9699,top10E=0.04,eRank=628.7,q75/q25=2.97 vo_prod:H=0.6976,top10E=0.29,eRank=106.3,q75/q25=3386.75 train_time:207133ms step_avg:73.98ms +[2025-09-02 15:18:56] [Rank 0] PRINT: step:2800/10000 val_loss:4.2148 svd_entropy: attn_qk:H=0.7387,top10E=0.30,eRank=142.0,q75/q25=61.12 attn_vo:H=0.8142,top10E=0.17,eRank=254.4,q75/q25=54.46 mlp_w1:H=0.8697,top10E=0.18,eRank=328.3,q75/q25=5.85 mlp_w2:H=0.9699,top10E=0.04,eRank=628.7,q75/q25=2.97 vo_prod:H=0.6976,top10E=0.29,eRank=106.3,q75/q25=3386.75 train_time:207133ms step_avg:73.98ms +[2025-09-02 15:18:56] [Rank 0] step:2801/10000 train_time:207146ms step_avg:73.95ms +[2025-09-02 15:18:56] [Rank 0] step:2801/10000 train_time:207146ms step_avg:73.95ms +[2025-09-02 15:18:58] [Rank 0] step:2821/10000 train_time:208598ms step_avg:73.94ms +[2025-09-02 15:18:58] [Rank 0] step:2821/10000 train_time:208598ms step_avg:73.94ms +[2025-09-02 15:18:59] [Rank 0] step:2841/10000 train_time:210114ms step_avg:73.96ms +[2025-09-02 15:18:59] [Rank 0] step:2841/10000 train_time:210114ms step_avg:73.96ms +[2025-09-02 15:19:01] [Rank 0] step:2861/10000 train_time:211633ms step_avg:73.97ms +[2025-09-02 15:19:01] [Rank 0] step:2861/10000 train_time:211633ms step_avg:73.97ms +[2025-09-02 15:19:02] [Rank 0] step:2881/10000 train_time:213152ms step_avg:73.99ms +[2025-09-02 15:19:02] [Rank 0] step:2881/10000 train_time:213152ms step_avg:73.99ms +[2025-09-02 15:19:04] [Rank 0] step:2901/10000 train_time:214665ms step_avg:74.00ms +[2025-09-02 15:19:04] [Rank 0] step:2901/10000 train_time:214665ms step_avg:74.00ms +[2025-09-02 15:19:05] [Rank 0] step:2921/10000 train_time:216182ms step_avg:74.01ms +[2025-09-02 15:19:05] [Rank 0] step:2921/10000 train_time:216182ms step_avg:74.01ms +[2025-09-02 15:19:07] [Rank 0] step:2941/10000 train_time:217699ms step_avg:74.02ms +[2025-09-02 15:19:07] [Rank 0] step:2941/10000 train_time:217699ms step_avg:74.02ms +[2025-09-02 15:19:08] [Rank 0] step:2961/10000 train_time:219216ms step_avg:74.03ms +[2025-09-02 15:19:08] [Rank 0] step:2961/10000 train_time:219216ms step_avg:74.03ms +[2025-09-02 15:19:10] [Rank 0] step:2981/10000 train_time:220740ms step_avg:74.05ms +[2025-09-02 15:19:10] [Rank 0] step:2981/10000 train_time:220740ms step_avg:74.05ms +[2025-09-02 15:19:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:19:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:19:23] [Rank 0] PRINT: step:3000/10000 val_loss:4.1732 svd_entropy: attn_qk:H=0.7420,top10E=0.30,eRank=144.9,q75/q25=61.04 attn_vo:H=0.8179,top10E=0.17,eRank=259.3,q75/q25=53.45 mlp_w1:H=0.8734,top10E=0.18,eRank=336.1,q75/q25=5.71 mlp_w2:H=0.9701,top10E=0.04,eRank=629.5,q75/q25=2.96 vo_prod:H=0.7025,top10E=0.29,eRank=110.1,q75/q25=3065.36 train_time:222344ms step_avg:74.11ms +[2025-09-02 15:19:23] [Rank 0] PRINT: step:3000/10000 val_loss:4.1732 svd_entropy: attn_qk:H=0.7420,top10E=0.30,eRank=144.9,q75/q25=61.04 attn_vo:H=0.8179,top10E=0.17,eRank=259.3,q75/q25=53.45 mlp_w1:H=0.8734,top10E=0.18,eRank=336.1,q75/q25=5.71 mlp_w2:H=0.9701,top10E=0.04,eRank=629.5,q75/q25=2.96 vo_prod:H=0.7025,top10E=0.29,eRank=110.1,q75/q25=3065.36 train_time:222344ms step_avg:74.11ms +[2025-09-02 15:19:23] [Rank 0] step:3001/10000 train_time:222357ms step_avg:74.09ms +[2025-09-02 15:19:23] [Rank 0] step:3001/10000 train_time:222357ms step_avg:74.09ms +[2025-09-02 15:19:24] [Rank 0] step:3021/10000 train_time:223805ms step_avg:74.08ms +[2025-09-02 15:19:24] [Rank 0] step:3021/10000 train_time:223805ms step_avg:74.08ms +[2025-09-02 15:19:26] [Rank 0] step:3041/10000 train_time:225327ms step_avg:74.10ms +[2025-09-02 15:19:26] [Rank 0] step:3041/10000 train_time:225327ms step_avg:74.10ms +[2025-09-02 15:19:28] [Rank 0] step:3061/10000 train_time:226849ms step_avg:74.11ms +[2025-09-02 15:19:28] [Rank 0] step:3061/10000 train_time:226849ms step_avg:74.11ms +[2025-09-02 15:19:29] [Rank 0] step:3081/10000 train_time:228372ms step_avg:74.12ms +[2025-09-02 15:19:29] [Rank 0] step:3081/10000 train_time:228372ms step_avg:74.12ms +[2025-09-02 15:19:31] [Rank 0] step:3101/10000 train_time:229897ms step_avg:74.14ms +[2025-09-02 15:19:31] [Rank 0] step:3101/10000 train_time:229897ms step_avg:74.14ms +[2025-09-02 15:19:32] [Rank 0] step:3121/10000 train_time:231423ms step_avg:74.15ms +[2025-09-02 15:19:32] [Rank 0] step:3121/10000 train_time:231423ms step_avg:74.15ms +[2025-09-02 15:19:34] [Rank 0] step:3141/10000 train_time:232949ms step_avg:74.16ms +[2025-09-02 15:19:34] [Rank 0] step:3141/10000 train_time:232949ms step_avg:74.16ms +[2025-09-02 15:19:35] [Rank 0] step:3161/10000 train_time:234474ms step_avg:74.18ms +[2025-09-02 15:19:35] [Rank 0] step:3161/10000 train_time:234474ms step_avg:74.18ms +[2025-09-02 15:19:37] [Rank 0] step:3181/10000 train_time:235998ms step_avg:74.19ms +[2025-09-02 15:19:37] [Rank 0] step:3181/10000 train_time:235998ms step_avg:74.19ms +[2025-09-02 15:19:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:19:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:19:50] [Rank 0] PRINT: step:3200/10000 val_loss:4.1378 svd_entropy: attn_qk:H=0.7449,top10E=0.29,eRank=147.3,q75/q25=61.22 attn_vo:H=0.8211,top10E=0.17,eRank=263.7,q75/q25=52.26 mlp_w1:H=0.8768,top10E=0.17,eRank=343.5,q75/q25=5.58 mlp_w2:H=0.9702,top10E=0.04,eRank=630.2,q75/q25=2.95 vo_prod:H=0.7069,top10E=0.28,eRank=113.6,q75/q25=2857.26 train_time:237601ms step_avg:74.25ms +[2025-09-02 15:19:50] [Rank 0] PRINT: step:3200/10000 val_loss:4.1378 svd_entropy: attn_qk:H=0.7449,top10E=0.29,eRank=147.3,q75/q25=61.22 attn_vo:H=0.8211,top10E=0.17,eRank=263.7,q75/q25=52.26 mlp_w1:H=0.8768,top10E=0.17,eRank=343.5,q75/q25=5.58 mlp_w2:H=0.9702,top10E=0.04,eRank=630.2,q75/q25=2.95 vo_prod:H=0.7069,top10E=0.28,eRank=113.6,q75/q25=2857.26 train_time:237601ms step_avg:74.25ms +[2025-09-02 15:19:50] [Rank 0] step:3201/10000 train_time:237614ms step_avg:74.23ms +[2025-09-02 15:19:50] [Rank 0] step:3201/10000 train_time:237614ms step_avg:74.23ms +[2025-09-02 15:19:51] [Rank 0] step:3221/10000 train_time:239077ms step_avg:74.22ms +[2025-09-02 15:19:51] [Rank 0] step:3221/10000 train_time:239077ms step_avg:74.22ms +[2025-09-02 15:19:53] [Rank 0] step:3241/10000 train_time:240599ms step_avg:74.24ms +[2025-09-02 15:19:53] [Rank 0] step:3241/10000 train_time:240599ms step_avg:74.24ms +[2025-09-02 15:19:54] [Rank 0] step:3261/10000 train_time:242122ms step_avg:74.25ms +[2025-09-02 15:19:54] [Rank 0] step:3261/10000 train_time:242122ms step_avg:74.25ms +[2025-09-02 15:19:56] [Rank 0] step:3281/10000 train_time:243646ms step_avg:74.26ms +[2025-09-02 15:19:56] [Rank 0] step:3281/10000 train_time:243646ms step_avg:74.26ms +[2025-09-02 15:19:58] [Rank 0] step:3301/10000 train_time:245171ms step_avg:74.27ms +[2025-09-02 15:19:58] [Rank 0] step:3301/10000 train_time:245171ms step_avg:74.27ms +[2025-09-02 15:19:59] [Rank 0] step:3321/10000 train_time:246699ms step_avg:74.28ms +[2025-09-02 15:19:59] [Rank 0] step:3321/10000 train_time:246699ms step_avg:74.28ms +[2025-09-02 15:20:01] [Rank 0] step:3341/10000 train_time:248225ms step_avg:74.30ms +[2025-09-02 15:20:01] [Rank 0] step:3341/10000 train_time:248225ms step_avg:74.30ms +[2025-09-02 15:20:02] [Rank 0] step:3361/10000 train_time:249750ms step_avg:74.31ms +[2025-09-02 15:20:02] [Rank 0] step:3361/10000 train_time:249750ms step_avg:74.31ms +[2025-09-02 15:20:04] [Rank 0] step:3381/10000 train_time:251276ms step_avg:74.32ms +[2025-09-02 15:20:04] [Rank 0] step:3381/10000 train_time:251276ms step_avg:74.32ms +[2025-09-02 15:20:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:20:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:20:17] [Rank 0] PRINT: step:3400/10000 val_loss:4.1001 svd_entropy: attn_qk:H=0.7475,top10E=0.29,eRank=149.5,q75/q25=61.34 attn_vo:H=0.8241,top10E=0.16,eRank=267.9,q75/q25=51.18 mlp_w1:H=0.8800,top10E=0.17,eRank=350.6,q75/q25=5.47 mlp_w2:H=0.9703,top10E=0.04,eRank=630.7,q75/q25=2.94 vo_prod:H=0.7112,top10E=0.28,eRank=117.1,q75/q25=2624.21 train_time:252881ms step_avg:74.38ms +[2025-09-02 15:20:17] [Rank 0] PRINT: step:3400/10000 val_loss:4.1001 svd_entropy: attn_qk:H=0.7475,top10E=0.29,eRank=149.5,q75/q25=61.34 attn_vo:H=0.8241,top10E=0.16,eRank=267.9,q75/q25=51.18 mlp_w1:H=0.8800,top10E=0.17,eRank=350.6,q75/q25=5.47 mlp_w2:H=0.9703,top10E=0.04,eRank=630.7,q75/q25=2.94 vo_prod:H=0.7112,top10E=0.28,eRank=117.1,q75/q25=2624.21 train_time:252881ms step_avg:74.38ms +[2025-09-02 15:20:17] [Rank 0] step:3401/10000 train_time:252895ms step_avg:74.36ms +[2025-09-02 15:20:17] [Rank 0] step:3401/10000 train_time:252895ms step_avg:74.36ms +[2025-09-02 15:20:19] [Rank 0] step:3421/10000 train_time:254394ms step_avg:74.36ms +[2025-09-02 15:20:19] [Rank 0] step:3421/10000 train_time:254394ms step_avg:74.36ms +[2025-09-02 15:20:20] [Rank 0] step:3441/10000 train_time:255914ms step_avg:74.37ms +[2025-09-02 15:20:20] [Rank 0] step:3441/10000 train_time:255914ms step_avg:74.37ms +[2025-09-02 15:20:22] [Rank 0] step:3461/10000 train_time:257438ms step_avg:74.38ms +[2025-09-02 15:20:22] [Rank 0] step:3461/10000 train_time:257438ms step_avg:74.38ms +[2025-09-02 15:20:23] [Rank 0] step:3481/10000 train_time:258960ms step_avg:74.39ms +[2025-09-02 15:20:23] [Rank 0] step:3481/10000 train_time:258960ms step_avg:74.39ms +[2025-09-02 15:20:25] [Rank 0] step:3501/10000 train_time:260485ms step_avg:74.40ms +[2025-09-02 15:20:25] [Rank 0] step:3501/10000 train_time:260485ms step_avg:74.40ms +[2025-09-02 15:20:26] [Rank 0] step:3521/10000 train_time:262010ms step_avg:74.41ms +[2025-09-02 15:20:26] [Rank 0] step:3521/10000 train_time:262010ms step_avg:74.41ms +[2025-09-02 15:20:28] [Rank 0] step:3541/10000 train_time:263534ms step_avg:74.42ms +[2025-09-02 15:20:28] [Rank 0] step:3541/10000 train_time:263534ms step_avg:74.42ms +[2025-09-02 15:20:29] [Rank 0] step:3561/10000 train_time:265058ms step_avg:74.43ms +[2025-09-02 15:20:29] [Rank 0] step:3561/10000 train_time:265058ms step_avg:74.43ms +[2025-09-02 15:20:31] [Rank 0] step:3581/10000 train_time:266583ms step_avg:74.44ms +[2025-09-02 15:20:31] [Rank 0] step:3581/10000 train_time:266583ms step_avg:74.44ms +[2025-09-02 15:20:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:20:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:20:44] [Rank 0] PRINT: step:3600/10000 val_loss:4.0845 svd_entropy: attn_qk:H=0.7498,top10E=0.29,eRank=151.6,q75/q25=61.08 attn_vo:H=0.8267,top10E=0.16,eRank=271.6,q75/q25=49.84 mlp_w1:H=0.8828,top10E=0.17,eRank=356.8,q75/q25=5.36 mlp_w2:H=0.9704,top10E=0.04,eRank=631.1,q75/q25=2.93 vo_prod:H=0.7148,top10E=0.27,eRank=120.2,q75/q25=2464.66 train_time:268187ms step_avg:74.50ms +[2025-09-02 15:20:44] [Rank 0] PRINT: step:3600/10000 val_loss:4.0845 svd_entropy: attn_qk:H=0.7498,top10E=0.29,eRank=151.6,q75/q25=61.08 attn_vo:H=0.8267,top10E=0.16,eRank=271.6,q75/q25=49.84 mlp_w1:H=0.8828,top10E=0.17,eRank=356.8,q75/q25=5.36 mlp_w2:H=0.9704,top10E=0.04,eRank=631.1,q75/q25=2.93 vo_prod:H=0.7148,top10E=0.27,eRank=120.2,q75/q25=2464.66 train_time:268187ms step_avg:74.50ms +[2025-09-02 15:20:44] [Rank 0] step:3601/10000 train_time:268200ms step_avg:74.48ms +[2025-09-02 15:20:44] [Rank 0] step:3601/10000 train_time:268200ms step_avg:74.48ms +[2025-09-02 15:20:46] [Rank 0] step:3621/10000 train_time:269660ms step_avg:74.47ms +[2025-09-02 15:20:46] [Rank 0] step:3621/10000 train_time:269660ms step_avg:74.47ms +[2025-09-02 15:20:47] [Rank 0] step:3641/10000 train_time:271182ms step_avg:74.48ms +[2025-09-02 15:20:47] [Rank 0] step:3641/10000 train_time:271182ms step_avg:74.48ms +[2025-09-02 15:20:49] [Rank 0] step:3661/10000 train_time:272706ms step_avg:74.49ms +[2025-09-02 15:20:49] [Rank 0] step:3661/10000 train_time:272706ms step_avg:74.49ms +[2025-09-02 15:20:50] [Rank 0] step:3681/10000 train_time:274230ms step_avg:74.50ms +[2025-09-02 15:20:50] [Rank 0] step:3681/10000 train_time:274230ms step_avg:74.50ms +[2025-09-02 15:20:52] [Rank 0] step:3701/10000 train_time:275756ms step_avg:74.51ms +[2025-09-02 15:20:52] [Rank 0] step:3701/10000 train_time:275756ms step_avg:74.51ms +[2025-09-02 15:20:53] [Rank 0] step:3721/10000 train_time:277309ms step_avg:74.53ms +[2025-09-02 15:20:53] [Rank 0] step:3721/10000 train_time:277309ms step_avg:74.53ms +[2025-09-02 15:20:55] [Rank 0] step:3741/10000 train_time:278868ms step_avg:74.54ms +[2025-09-02 15:20:55] [Rank 0] step:3741/10000 train_time:278868ms step_avg:74.54ms +[2025-09-02 15:20:56] [Rank 0] step:3761/10000 train_time:280427ms step_avg:74.56ms +[2025-09-02 15:20:56] [Rank 0] step:3761/10000 train_time:280427ms step_avg:74.56ms +[2025-09-02 15:20:58] [Rank 0] step:3781/10000 train_time:281991ms step_avg:74.58ms +[2025-09-02 15:20:58] [Rank 0] step:3781/10000 train_time:281991ms step_avg:74.58ms +[2025-09-02 15:20:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:20:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:21:11] [Rank 0] PRINT: step:3800/10000 val_loss:4.0410 svd_entropy: attn_qk:H=0.7518,top10E=0.29,eRank=153.4,q75/q25=61.27 attn_vo:H=0.8290,top10E=0.16,eRank=274.9,q75/q25=48.94 mlp_w1:H=0.8853,top10E=0.16,eRank=362.7,q75/q25=5.26 mlp_w2:H=0.9705,top10E=0.04,eRank=631.4,q75/q25=2.93 vo_prod:H=0.7183,top10E=0.27,eRank=123.1,q75/q25=2303.77 train_time:283632ms step_avg:74.64ms +[2025-09-02 15:21:11] [Rank 0] PRINT: step:3800/10000 val_loss:4.0410 svd_entropy: attn_qk:H=0.7518,top10E=0.29,eRank=153.4,q75/q25=61.27 attn_vo:H=0.8290,top10E=0.16,eRank=274.9,q75/q25=48.94 mlp_w1:H=0.8853,top10E=0.16,eRank=362.7,q75/q25=5.26 mlp_w2:H=0.9705,top10E=0.04,eRank=631.4,q75/q25=2.93 vo_prod:H=0.7183,top10E=0.27,eRank=123.1,q75/q25=2303.77 train_time:283632ms step_avg:74.64ms +[2025-09-02 15:21:11] [Rank 0] step:3801/10000 train_time:283646ms step_avg:74.62ms +[2025-09-02 15:21:11] [Rank 0] step:3801/10000 train_time:283646ms step_avg:74.62ms +[2025-09-02 15:21:13] [Rank 0] step:3821/10000 train_time:285128ms step_avg:74.62ms +[2025-09-02 15:21:13] [Rank 0] step:3821/10000 train_time:285128ms step_avg:74.62ms +[2025-09-02 15:21:14] [Rank 0] step:3841/10000 train_time:286687ms step_avg:74.64ms +[2025-09-02 15:21:14] [Rank 0] step:3841/10000 train_time:286687ms step_avg:74.64ms +[2025-09-02 15:21:16] [Rank 0] step:3861/10000 train_time:288250ms step_avg:74.66ms +[2025-09-02 15:21:16] [Rank 0] step:3861/10000 train_time:288250ms step_avg:74.66ms +[2025-09-02 15:21:17] [Rank 0] step:3881/10000 train_time:289809ms step_avg:74.67ms +[2025-09-02 15:21:17] [Rank 0] step:3881/10000 train_time:289809ms step_avg:74.67ms +[2025-09-02 15:21:19] [Rank 0] step:3901/10000 train_time:291370ms step_avg:74.69ms +[2025-09-02 15:21:19] [Rank 0] step:3901/10000 train_time:291370ms step_avg:74.69ms +[2025-09-02 15:21:21] [Rank 0] step:3921/10000 train_time:292966ms step_avg:74.72ms +[2025-09-02 15:21:21] [Rank 0] step:3921/10000 train_time:292966ms step_avg:74.72ms +[2025-09-02 15:21:22] [Rank 0] step:3941/10000 train_time:294529ms step_avg:74.73ms +[2025-09-02 15:21:22] [Rank 0] step:3941/10000 train_time:294529ms step_avg:74.73ms +[2025-09-02 15:21:24] [Rank 0] step:3961/10000 train_time:296089ms step_avg:74.75ms +[2025-09-02 15:21:24] [Rank 0] step:3961/10000 train_time:296089ms step_avg:74.75ms +[2025-09-02 15:21:25] [Rank 0] step:3981/10000 train_time:297651ms step_avg:74.77ms +[2025-09-02 15:21:25] [Rank 0] step:3981/10000 train_time:297651ms step_avg:74.77ms +[2025-09-02 15:21:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:21:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:21:38] [Rank 0] PRINT: step:4000/10000 val_loss:4.0145 svd_entropy: attn_qk:H=0.7539,top10E=0.28,eRank=155.4,q75/q25=60.35 attn_vo:H=0.8311,top10E=0.15,eRank=278.0,q75/q25=47.57 mlp_w1:H=0.8877,top10E=0.16,eRank=368.2,q75/q25=5.17 mlp_w2:H=0.9706,top10E=0.04,eRank=631.7,q75/q25=2.92 vo_prod:H=0.7216,top10E=0.26,eRank=126.1,q75/q25=2098.41 train_time:299293ms step_avg:74.82ms +[2025-09-02 15:21:38] [Rank 0] PRINT: step:4000/10000 val_loss:4.0145 svd_entropy: attn_qk:H=0.7539,top10E=0.28,eRank=155.4,q75/q25=60.35 attn_vo:H=0.8311,top10E=0.15,eRank=278.0,q75/q25=47.57 mlp_w1:H=0.8877,top10E=0.16,eRank=368.2,q75/q25=5.17 mlp_w2:H=0.9706,top10E=0.04,eRank=631.7,q75/q25=2.92 vo_prod:H=0.7216,top10E=0.26,eRank=126.1,q75/q25=2098.41 train_time:299293ms step_avg:74.82ms +[2025-09-02 15:21:39] [Rank 0] step:4001/10000 train_time:299306ms step_avg:74.81ms +[2025-09-02 15:21:39] [Rank 0] step:4001/10000 train_time:299306ms step_avg:74.81ms +[2025-09-02 15:21:40] [Rank 0] step:4021/10000 train_time:300801ms step_avg:74.81ms +[2025-09-02 15:21:40] [Rank 0] step:4021/10000 train_time:300801ms step_avg:74.81ms +[2025-09-02 15:21:42] [Rank 0] step:4041/10000 train_time:302362ms step_avg:74.82ms +[2025-09-02 15:21:42] [Rank 0] step:4041/10000 train_time:302362ms step_avg:74.82ms +[2025-09-02 15:21:43] [Rank 0] step:4061/10000 train_time:303925ms step_avg:74.84ms +[2025-09-02 15:21:43] [Rank 0] step:4061/10000 train_time:303925ms step_avg:74.84ms +[2025-09-02 15:21:45] [Rank 0] step:4081/10000 train_time:305670ms step_avg:74.90ms +[2025-09-02 15:21:45] [Rank 0] step:4081/10000 train_time:305670ms step_avg:74.90ms +[2025-09-02 15:21:47] [Rank 0] step:4101/10000 train_time:307231ms step_avg:74.92ms +[2025-09-02 15:21:47] [Rank 0] step:4101/10000 train_time:307231ms step_avg:74.92ms +[2025-09-02 15:21:48] [Rank 0] step:4121/10000 train_time:308795ms step_avg:74.93ms +[2025-09-02 15:21:48] [Rank 0] step:4121/10000 train_time:308795ms step_avg:74.93ms +[2025-09-02 15:21:50] [Rank 0] step:4141/10000 train_time:310359ms step_avg:74.95ms +[2025-09-02 15:21:50] [Rank 0] step:4141/10000 train_time:310359ms step_avg:74.95ms +[2025-09-02 15:21:51] [Rank 0] step:4161/10000 train_time:311923ms step_avg:74.96ms +[2025-09-02 15:21:51] [Rank 0] step:4161/10000 train_time:311923ms step_avg:74.96ms +[2025-09-02 15:21:53] [Rank 0] step:4181/10000 train_time:313489ms step_avg:74.98ms +[2025-09-02 15:21:53] [Rank 0] step:4181/10000 train_time:313489ms step_avg:74.98ms +[2025-09-02 15:21:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:21:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:22:06] [Rank 0] PRINT: step:4200/10000 val_loss:3.9947 svd_entropy: attn_qk:H=0.7558,top10E=0.28,eRank=157.1,q75/q25=60.07 attn_vo:H=0.8331,top10E=0.15,eRank=280.9,q75/q25=46.43 mlp_w1:H=0.8898,top10E=0.16,eRank=373.3,q75/q25=5.10 mlp_w2:H=0.9706,top10E=0.04,eRank=631.8,q75/q25=2.92 vo_prod:H=0.7244,top10E=0.26,eRank=128.6,q75/q25=1957.72 train_time:315132ms step_avg:75.03ms +[2025-09-02 15:22:06] [Rank 0] PRINT: step:4200/10000 val_loss:3.9947 svd_entropy: attn_qk:H=0.7558,top10E=0.28,eRank=157.1,q75/q25=60.07 attn_vo:H=0.8331,top10E=0.15,eRank=280.9,q75/q25=46.43 mlp_w1:H=0.8898,top10E=0.16,eRank=373.3,q75/q25=5.10 mlp_w2:H=0.9706,top10E=0.04,eRank=631.8,q75/q25=2.92 vo_prod:H=0.7244,top10E=0.26,eRank=128.6,q75/q25=1957.72 train_time:315132ms step_avg:75.03ms +[2025-09-02 15:22:06] [Rank 0] step:4201/10000 train_time:315145ms step_avg:75.02ms +[2025-09-02 15:22:06] [Rank 0] step:4201/10000 train_time:315145ms step_avg:75.02ms +[2025-09-02 15:22:08] [Rank 0] step:4221/10000 train_time:316634ms step_avg:75.01ms +[2025-09-02 15:22:08] [Rank 0] step:4221/10000 train_time:316634ms step_avg:75.01ms +[2025-09-02 15:22:09] [Rank 0] step:4241/10000 train_time:318193ms step_avg:75.03ms +[2025-09-02 15:22:09] [Rank 0] step:4241/10000 train_time:318193ms step_avg:75.03ms +[2025-09-02 15:22:11] [Rank 0] step:4261/10000 train_time:319753ms step_avg:75.04ms +[2025-09-02 15:22:11] [Rank 0] step:4261/10000 train_time:319753ms step_avg:75.04ms +[2025-09-02 15:22:12] [Rank 0] step:4281/10000 train_time:321314ms step_avg:75.06ms +[2025-09-02 15:22:12] [Rank 0] step:4281/10000 train_time:321314ms step_avg:75.06ms +[2025-09-02 15:22:14] [Rank 0] step:4301/10000 train_time:322875ms step_avg:75.07ms +[2025-09-02 15:22:14] [Rank 0] step:4301/10000 train_time:322875ms step_avg:75.07ms +[2025-09-02 15:22:15] [Rank 0] step:4321/10000 train_time:324438ms step_avg:75.08ms +[2025-09-02 15:22:15] [Rank 0] step:4321/10000 train_time:324438ms step_avg:75.08ms +[2025-09-02 15:22:17] [Rank 0] step:4341/10000 train_time:325997ms step_avg:75.10ms +[2025-09-02 15:22:17] [Rank 0] step:4341/10000 train_time:325997ms step_avg:75.10ms +[2025-09-02 15:22:19] [Rank 0] step:4361/10000 train_time:327561ms step_avg:75.11ms +[2025-09-02 15:22:19] [Rank 0] step:4361/10000 train_time:327561ms step_avg:75.11ms +[2025-09-02 15:22:20] [Rank 0] step:4381/10000 train_time:329121ms step_avg:75.12ms +[2025-09-02 15:22:20] [Rank 0] step:4381/10000 train_time:329121ms step_avg:75.12ms +[2025-09-02 15:22:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:22:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:22:33] [Rank 0] PRINT: step:4400/10000 val_loss:3.9705 svd_entropy: attn_qk:H=0.7576,top10E=0.28,eRank=158.8,q75/q25=59.74 attn_vo:H=0.8349,top10E=0.15,eRank=283.6,q75/q25=45.32 mlp_w1:H=0.8919,top10E=0.16,eRank=378.3,q75/q25=5.03 mlp_w2:H=0.9706,top10E=0.04,eRank=632.0,q75/q25=2.92 vo_prod:H=0.7271,top10E=0.25,eRank=131.2,q75/q25=1830.86 train_time:330813ms step_avg:75.18ms +[2025-09-02 15:22:33] [Rank 0] PRINT: step:4400/10000 val_loss:3.9705 svd_entropy: attn_qk:H=0.7576,top10E=0.28,eRank=158.8,q75/q25=59.74 attn_vo:H=0.8349,top10E=0.15,eRank=283.6,q75/q25=45.32 mlp_w1:H=0.8919,top10E=0.16,eRank=378.3,q75/q25=5.03 mlp_w2:H=0.9706,top10E=0.04,eRank=632.0,q75/q25=2.92 vo_prod:H=0.7271,top10E=0.25,eRank=131.2,q75/q25=1830.86 train_time:330813ms step_avg:75.18ms +[2025-09-02 15:22:33] [Rank 0] step:4401/10000 train_time:330827ms step_avg:75.17ms +[2025-09-02 15:22:33] [Rank 0] step:4401/10000 train_time:330827ms step_avg:75.17ms +[2025-09-02 15:22:35] [Rank 0] step:4421/10000 train_time:332314ms step_avg:75.17ms +[2025-09-02 15:22:35] [Rank 0] step:4421/10000 train_time:332314ms step_avg:75.17ms +[2025-09-02 15:22:37] [Rank 0] step:4441/10000 train_time:333871ms step_avg:75.18ms +[2025-09-02 15:22:37] [Rank 0] step:4441/10000 train_time:333871ms step_avg:75.18ms +[2025-09-02 15:22:38] [Rank 0] step:4461/10000 train_time:335434ms step_avg:75.19ms +[2025-09-02 15:22:38] [Rank 0] step:4461/10000 train_time:335434ms step_avg:75.19ms +[2025-09-02 15:22:40] [Rank 0] step:4481/10000 train_time:336998ms step_avg:75.21ms +[2025-09-02 15:22:40] [Rank 0] step:4481/10000 train_time:336998ms step_avg:75.21ms +[2025-09-02 15:22:41] [Rank 0] step:4501/10000 train_time:338561ms step_avg:75.22ms +[2025-09-02 15:22:41] [Rank 0] step:4501/10000 train_time:338561ms step_avg:75.22ms +[2025-09-02 15:22:43] [Rank 0] step:4521/10000 train_time:340124ms step_avg:75.23ms +[2025-09-02 15:22:43] [Rank 0] step:4521/10000 train_time:340124ms step_avg:75.23ms +[2025-09-02 15:22:44] [Rank 0] step:4541/10000 train_time:341691ms step_avg:75.25ms +[2025-09-02 15:22:44] [Rank 0] step:4541/10000 train_time:341691ms step_avg:75.25ms +[2025-09-02 15:22:46] [Rank 0] step:4561/10000 train_time:343259ms step_avg:75.26ms +[2025-09-02 15:22:46] [Rank 0] step:4561/10000 train_time:343259ms step_avg:75.26ms +[2025-09-02 15:22:48] [Rank 0] step:4581/10000 train_time:344828ms step_avg:75.27ms +[2025-09-02 15:22:48] [Rank 0] step:4581/10000 train_time:344828ms step_avg:75.27ms +[2025-09-02 15:22:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:22:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:23:01] [Rank 0] PRINT: step:4600/10000 val_loss:3.9449 svd_entropy: attn_qk:H=0.7595,top10E=0.28,eRank=160.6,q75/q25=59.54 attn_vo:H=0.8365,top10E=0.15,eRank=286.2,q75/q25=44.39 mlp_w1:H=0.8939,top10E=0.16,eRank=383.3,q75/q25=4.96 mlp_w2:H=0.9707,top10E=0.04,eRank=632.1,q75/q25=2.91 vo_prod:H=0.7296,top10E=0.25,eRank=133.5,q75/q25=1733.75 train_time:346476ms step_avg:75.32ms +[2025-09-02 15:23:01] [Rank 0] PRINT: step:4600/10000 val_loss:3.9449 svd_entropy: attn_qk:H=0.7595,top10E=0.28,eRank=160.6,q75/q25=59.54 attn_vo:H=0.8365,top10E=0.15,eRank=286.2,q75/q25=44.39 mlp_w1:H=0.8939,top10E=0.16,eRank=383.3,q75/q25=4.96 mlp_w2:H=0.9707,top10E=0.04,eRank=632.1,q75/q25=2.91 vo_prod:H=0.7296,top10E=0.25,eRank=133.5,q75/q25=1733.75 train_time:346476ms step_avg:75.32ms +[2025-09-02 15:23:01] [Rank 0] step:4601/10000 train_time:346492ms step_avg:75.31ms +[2025-09-02 15:23:01] [Rank 0] step:4601/10000 train_time:346492ms step_avg:75.31ms +[2025-09-02 15:23:02] [Rank 0] step:4621/10000 train_time:347986ms step_avg:75.31ms +[2025-09-02 15:23:02] [Rank 0] step:4621/10000 train_time:347986ms step_avg:75.31ms +[2025-09-02 15:23:04] [Rank 0] step:4641/10000 train_time:349553ms step_avg:75.32ms +[2025-09-02 15:23:04] [Rank 0] step:4641/10000 train_time:349553ms step_avg:75.32ms +[2025-09-02 15:23:06] [Rank 0] step:4661/10000 train_time:351120ms step_avg:75.33ms +[2025-09-02 15:23:06] [Rank 0] step:4661/10000 train_time:351120ms step_avg:75.33ms +[2025-09-02 15:23:07] [Rank 0] step:4681/10000 train_time:352687ms step_avg:75.34ms +[2025-09-02 15:23:07] [Rank 0] step:4681/10000 train_time:352687ms step_avg:75.34ms +[2025-09-02 15:23:09] [Rank 0] step:4701/10000 train_time:354254ms step_avg:75.36ms +[2025-09-02 15:23:09] [Rank 0] step:4701/10000 train_time:354254ms step_avg:75.36ms +[2025-09-02 15:23:10] [Rank 0] step:4721/10000 train_time:355822ms step_avg:75.37ms +[2025-09-02 15:23:10] [Rank 0] step:4721/10000 train_time:355822ms step_avg:75.37ms +[2025-09-02 15:23:12] [Rank 0] step:4741/10000 train_time:357387ms step_avg:75.38ms +[2025-09-02 15:23:12] [Rank 0] step:4741/10000 train_time:357387ms step_avg:75.38ms +[2025-09-02 15:23:13] [Rank 0] step:4761/10000 train_time:358955ms step_avg:75.39ms +[2025-09-02 15:23:13] [Rank 0] step:4761/10000 train_time:358955ms step_avg:75.39ms +[2025-09-02 15:23:15] [Rank 0] step:4781/10000 train_time:360522ms step_avg:75.41ms +[2025-09-02 15:23:15] [Rank 0] step:4781/10000 train_time:360522ms step_avg:75.41ms +[2025-09-02 15:23:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:23:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:23:28] [Rank 0] PRINT: step:4800/10000 val_loss:3.9299 svd_entropy: attn_qk:H=0.7611,top10E=0.28,eRank=162.2,q75/q25=59.35 attn_vo:H=0.8381,top10E=0.15,eRank=288.7,q75/q25=43.50 mlp_w1:H=0.8957,top10E=0.15,eRank=387.7,q75/q25=4.90 mlp_w2:H=0.9707,top10E=0.04,eRank=632.2,q75/q25=2.91 vo_prod:H=0.7321,top10E=0.25,eRank=135.9,q75/q25=1595.34 train_time:362171ms step_avg:75.45ms +[2025-09-02 15:23:28] [Rank 0] PRINT: step:4800/10000 val_loss:3.9299 svd_entropy: attn_qk:H=0.7611,top10E=0.28,eRank=162.2,q75/q25=59.35 attn_vo:H=0.8381,top10E=0.15,eRank=288.7,q75/q25=43.50 mlp_w1:H=0.8957,top10E=0.15,eRank=387.7,q75/q25=4.90 mlp_w2:H=0.9707,top10E=0.04,eRank=632.2,q75/q25=2.91 vo_prod:H=0.7321,top10E=0.25,eRank=135.9,q75/q25=1595.34 train_time:362171ms step_avg:75.45ms +[2025-09-02 15:23:29] [Rank 0] step:4801/10000 train_time:362185ms step_avg:75.44ms +[2025-09-02 15:23:29] [Rank 0] step:4801/10000 train_time:362185ms step_avg:75.44ms +[2025-09-02 15:23:30] [Rank 0] step:4821/10000 train_time:363692ms step_avg:75.44ms +[2025-09-02 15:23:30] [Rank 0] step:4821/10000 train_time:363692ms step_avg:75.44ms +[2025-09-02 15:23:32] [Rank 0] step:4841/10000 train_time:365258ms step_avg:75.45ms +[2025-09-02 15:23:32] [Rank 0] step:4841/10000 train_time:365258ms step_avg:75.45ms +[2025-09-02 15:23:33] [Rank 0] step:4861/10000 train_time:366826ms step_avg:75.46ms +[2025-09-02 15:23:33] [Rank 0] step:4861/10000 train_time:366826ms step_avg:75.46ms +[2025-09-02 15:23:35] [Rank 0] step:4881/10000 train_time:368392ms step_avg:75.47ms +[2025-09-02 15:23:35] [Rank 0] step:4881/10000 train_time:368392ms step_avg:75.47ms +[2025-09-02 15:23:36] [Rank 0] step:4901/10000 train_time:369956ms step_avg:75.49ms +[2025-09-02 15:23:36] [Rank 0] step:4901/10000 train_time:369956ms step_avg:75.49ms +[2025-09-02 15:23:38] [Rank 0] step:4921/10000 train_time:371526ms step_avg:75.50ms +[2025-09-02 15:23:38] [Rank 0] step:4921/10000 train_time:371526ms step_avg:75.50ms +[2025-09-02 15:23:40] [Rank 0] step:4941/10000 train_time:373097ms step_avg:75.51ms +[2025-09-02 15:23:40] [Rank 0] step:4941/10000 train_time:373097ms step_avg:75.51ms +[2025-09-02 15:23:41] [Rank 0] step:4961/10000 train_time:374664ms step_avg:75.52ms +[2025-09-02 15:23:41] [Rank 0] step:4961/10000 train_time:374664ms step_avg:75.52ms +[2025-09-02 15:23:43] [Rank 0] step:4981/10000 train_time:376233ms step_avg:75.53ms +[2025-09-02 15:23:43] [Rank 0] step:4981/10000 train_time:376233ms step_avg:75.53ms +[2025-09-02 15:23:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:23:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:23:56] [Rank 0] PRINT: step:5000/10000 val_loss:3.9096 svd_entropy: attn_qk:H=0.7627,top10E=0.27,eRank=163.8,q75/q25=58.80 attn_vo:H=0.8395,top10E=0.14,eRank=290.9,q75/q25=42.34 mlp_w1:H=0.8973,top10E=0.15,eRank=391.8,q75/q25=4.85 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.90 vo_prod:H=0.7343,top10E=0.24,eRank=137.9,q75/q25=1493.21 train_time:377885ms step_avg:75.58ms +[2025-09-02 15:23:56] [Rank 0] PRINT: step:5000/10000 val_loss:3.9096 svd_entropy: attn_qk:H=0.7627,top10E=0.27,eRank=163.8,q75/q25=58.80 attn_vo:H=0.8395,top10E=0.14,eRank=290.9,q75/q25=42.34 mlp_w1:H=0.8973,top10E=0.15,eRank=391.8,q75/q25=4.85 mlp_w2:H=0.9707,top10E=0.04,eRank=632.3,q75/q25=2.90 vo_prod:H=0.7343,top10E=0.24,eRank=137.9,q75/q25=1493.21 train_time:377885ms step_avg:75.58ms +[2025-09-02 15:23:56] [Rank 0] step:5001/10000 train_time:377899ms step_avg:75.56ms +[2025-09-02 15:23:56] [Rank 0] step:5001/10000 train_time:377899ms step_avg:75.56ms +[2025-09-02 15:23:58] [Rank 0] step:5021/10000 train_time:379398ms step_avg:75.56ms +[2025-09-02 15:23:58] [Rank 0] step:5021/10000 train_time:379398ms step_avg:75.56ms +[2025-09-02 15:23:59] [Rank 0] step:5041/10000 train_time:380969ms step_avg:75.57ms +[2025-09-02 15:23:59] [Rank 0] step:5041/10000 train_time:380969ms step_avg:75.57ms +[2025-09-02 15:24:01] [Rank 0] step:5061/10000 train_time:382536ms step_avg:75.59ms +[2025-09-02 15:24:01] [Rank 0] step:5061/10000 train_time:382536ms step_avg:75.59ms +[2025-09-02 15:24:02] [Rank 0] step:5081/10000 train_time:384103ms step_avg:75.60ms +[2025-09-02 15:24:02] [Rank 0] step:5081/10000 train_time:384103ms step_avg:75.60ms +[2025-09-02 15:24:04] [Rank 0] step:5101/10000 train_time:385675ms step_avg:75.61ms +[2025-09-02 15:24:04] [Rank 0] step:5101/10000 train_time:385675ms step_avg:75.61ms +[2025-09-02 15:24:06] [Rank 0] step:5121/10000 train_time:387244ms step_avg:75.62ms +[2025-09-02 15:24:06] [Rank 0] step:5121/10000 train_time:387244ms step_avg:75.62ms +[2025-09-02 15:24:07] [Rank 0] step:5141/10000 train_time:388817ms step_avg:75.63ms +[2025-09-02 15:24:07] [Rank 0] step:5141/10000 train_time:388817ms step_avg:75.63ms +[2025-09-02 15:24:09] [Rank 0] step:5161/10000 train_time:390387ms step_avg:75.64ms +[2025-09-02 15:24:09] [Rank 0] step:5161/10000 train_time:390387ms step_avg:75.64ms +[2025-09-02 15:24:10] [Rank 0] step:5181/10000 train_time:391969ms step_avg:75.66ms +[2025-09-02 15:24:10] [Rank 0] step:5181/10000 train_time:391969ms step_avg:75.66ms +[2025-09-02 15:24:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:24:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:24:24] [Rank 0] PRINT: step:5200/10000 val_loss:3.8907 svd_entropy: attn_qk:H=0.7642,top10E=0.27,eRank=165.2,q75/q25=58.59 attn_vo:H=0.8408,top10E=0.14,eRank=293.0,q75/q25=41.68 mlp_w1:H=0.8989,top10E=0.15,eRank=395.7,q75/q25=4.79 mlp_w2:H=0.9708,top10E=0.04,eRank=632.4,q75/q25=2.90 vo_prod:H=0.7362,top10E=0.24,eRank=139.8,q75/q25=1421.92 train_time:393646ms step_avg:75.70ms +[2025-09-02 15:24:24] [Rank 0] PRINT: step:5200/10000 val_loss:3.8907 svd_entropy: attn_qk:H=0.7642,top10E=0.27,eRank=165.2,q75/q25=58.59 attn_vo:H=0.8408,top10E=0.14,eRank=293.0,q75/q25=41.68 mlp_w1:H=0.8989,top10E=0.15,eRank=395.7,q75/q25=4.79 mlp_w2:H=0.9708,top10E=0.04,eRank=632.4,q75/q25=2.90 vo_prod:H=0.7362,top10E=0.24,eRank=139.8,q75/q25=1421.92 train_time:393646ms step_avg:75.70ms +[2025-09-02 15:24:24] [Rank 0] step:5201/10000 train_time:393659ms step_avg:75.69ms +[2025-09-02 15:24:24] [Rank 0] step:5201/10000 train_time:393659ms step_avg:75.69ms +[2025-09-02 15:24:25] [Rank 0] step:5221/10000 train_time:395188ms step_avg:75.69ms +[2025-09-02 15:24:25] [Rank 0] step:5221/10000 train_time:395188ms step_avg:75.69ms +[2025-09-02 15:24:27] [Rank 0] step:5241/10000 train_time:396787ms step_avg:75.71ms +[2025-09-02 15:24:27] [Rank 0] step:5241/10000 train_time:396787ms step_avg:75.71ms +[2025-09-02 15:24:29] [Rank 0] step:5261/10000 train_time:398387ms step_avg:75.72ms +[2025-09-02 15:24:29] [Rank 0] step:5261/10000 train_time:398387ms step_avg:75.72ms +[2025-09-02 15:24:30] [Rank 0] step:5281/10000 train_time:400008ms step_avg:75.74ms +[2025-09-02 15:24:30] [Rank 0] step:5281/10000 train_time:400008ms step_avg:75.74ms +[2025-09-02 15:24:32] [Rank 0] step:5301/10000 train_time:401650ms step_avg:75.77ms +[2025-09-02 15:24:32] [Rank 0] step:5301/10000 train_time:401650ms step_avg:75.77ms +[2025-09-02 15:24:34] [Rank 0] step:5321/10000 train_time:403248ms step_avg:75.78ms +[2025-09-02 15:24:34] [Rank 0] step:5321/10000 train_time:403248ms step_avg:75.78ms +[2025-09-02 15:24:35] [Rank 0] step:5341/10000 train_time:404847ms step_avg:75.80ms +[2025-09-02 15:24:35] [Rank 0] step:5341/10000 train_time:404847ms step_avg:75.80ms +[2025-09-02 15:24:37] [Rank 0] step:5361/10000 train_time:406450ms step_avg:75.82ms +[2025-09-02 15:24:37] [Rank 0] step:5361/10000 train_time:406450ms step_avg:75.82ms +[2025-09-02 15:24:38] [Rank 0] step:5381/10000 train_time:408052ms step_avg:75.83ms +[2025-09-02 15:24:38] [Rank 0] step:5381/10000 train_time:408052ms step_avg:75.83ms +[2025-09-02 15:24:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:24:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:24:52] [Rank 0] PRINT: step:5400/10000 val_loss:3.8726 svd_entropy: attn_qk:H=0.7654,top10E=0.27,eRank=166.4,q75/q25=57.95 attn_vo:H=0.8420,top10E=0.14,eRank=294.8,q75/q25=40.92 mlp_w1:H=0.9003,top10E=0.15,eRank=399.5,q75/q25=4.76 mlp_w2:H=0.9708,top10E=0.04,eRank=632.5,q75/q25=2.90 vo_prod:H=0.7380,top10E=0.24,eRank=141.4,q75/q25=1363.69 train_time:409732ms step_avg:75.88ms +[2025-09-02 15:24:52] [Rank 0] PRINT: step:5400/10000 val_loss:3.8726 svd_entropy: attn_qk:H=0.7654,top10E=0.27,eRank=166.4,q75/q25=57.95 attn_vo:H=0.8420,top10E=0.14,eRank=294.8,q75/q25=40.92 mlp_w1:H=0.9003,top10E=0.15,eRank=399.5,q75/q25=4.76 mlp_w2:H=0.9708,top10E=0.04,eRank=632.5,q75/q25=2.90 vo_prod:H=0.7380,top10E=0.24,eRank=141.4,q75/q25=1363.69 train_time:409732ms step_avg:75.88ms +[2025-09-02 15:24:52] [Rank 0] step:5401/10000 train_time:409746ms step_avg:75.86ms +[2025-09-02 15:24:52] [Rank 0] step:5401/10000 train_time:409746ms step_avg:75.86ms +[2025-09-02 15:24:53] [Rank 0] step:5421/10000 train_time:411261ms step_avg:75.86ms +[2025-09-02 15:24:53] [Rank 0] step:5421/10000 train_time:411261ms step_avg:75.86ms +[2025-09-02 15:24:55] [Rank 0] step:5441/10000 train_time:412854ms step_avg:75.88ms +[2025-09-02 15:24:55] [Rank 0] step:5441/10000 train_time:412854ms step_avg:75.88ms +[2025-09-02 15:24:57] [Rank 0] step:5461/10000 train_time:414456ms step_avg:75.89ms +[2025-09-02 15:24:57] [Rank 0] step:5461/10000 train_time:414456ms step_avg:75.89ms +[2025-09-02 15:24:58] [Rank 0] step:5481/10000 train_time:416056ms step_avg:75.91ms +[2025-09-02 15:24:58] [Rank 0] step:5481/10000 train_time:416056ms step_avg:75.91ms +[2025-09-02 15:25:00] [Rank 0] step:5501/10000 train_time:417658ms step_avg:75.92ms +[2025-09-02 15:25:00] [Rank 0] step:5501/10000 train_time:417658ms step_avg:75.92ms +[2025-09-02 15:25:01] [Rank 0] step:5521/10000 train_time:419261ms step_avg:75.94ms +[2025-09-02 15:25:01] [Rank 0] step:5521/10000 train_time:419261ms step_avg:75.94ms +[2025-09-02 15:25:03] [Rank 0] step:5541/10000 train_time:420863ms step_avg:75.95ms +[2025-09-02 15:25:03] [Rank 0] step:5541/10000 train_time:420863ms step_avg:75.95ms +[2025-09-02 15:25:05] [Rank 0] step:5561/10000 train_time:422466ms step_avg:75.97ms +[2025-09-02 15:25:05] [Rank 0] step:5561/10000 train_time:422466ms step_avg:75.97ms +[2025-09-02 15:25:06] [Rank 0] step:5581/10000 train_time:424063ms step_avg:75.98ms +[2025-09-02 15:25:06] [Rank 0] step:5581/10000 train_time:424063ms step_avg:75.98ms +[2025-09-02 15:25:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:25:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:25:20] [Rank 0] PRINT: step:5600/10000 val_loss:3.8597 svd_entropy: attn_qk:H=0.7666,top10E=0.27,eRank=167.7,q75/q25=57.64 attn_vo:H=0.8430,top10E=0.14,eRank=296.6,q75/q25=40.58 mlp_w1:H=0.9016,top10E=0.15,eRank=402.9,q75/q25=4.71 mlp_w2:H=0.9708,top10E=0.04,eRank=632.6,q75/q25=2.89 vo_prod:H=0.7396,top10E=0.24,eRank=143.0,q75/q25=1319.85 train_time:425746ms step_avg:76.03ms +[2025-09-02 15:25:20] [Rank 0] PRINT: step:5600/10000 val_loss:3.8597 svd_entropy: attn_qk:H=0.7666,top10E=0.27,eRank=167.7,q75/q25=57.64 attn_vo:H=0.8430,top10E=0.14,eRank=296.6,q75/q25=40.58 mlp_w1:H=0.9016,top10E=0.15,eRank=402.9,q75/q25=4.71 mlp_w2:H=0.9708,top10E=0.04,eRank=632.6,q75/q25=2.89 vo_prod:H=0.7396,top10E=0.24,eRank=143.0,q75/q25=1319.85 train_time:425746ms step_avg:76.03ms +[2025-09-02 15:25:20] [Rank 0] step:5601/10000 train_time:425760ms step_avg:76.02ms +[2025-09-02 15:25:20] [Rank 0] step:5601/10000 train_time:425760ms step_avg:76.02ms +[2025-09-02 15:25:21] [Rank 0] step:5621/10000 train_time:427294ms step_avg:76.02ms +[2025-09-02 15:25:21] [Rank 0] step:5621/10000 train_time:427294ms step_avg:76.02ms +[2025-09-02 15:25:23] [Rank 0] step:5641/10000 train_time:428891ms step_avg:76.03ms +[2025-09-02 15:25:23] [Rank 0] step:5641/10000 train_time:428891ms step_avg:76.03ms +[2025-09-02 15:25:25] [Rank 0] step:5661/10000 train_time:430485ms step_avg:76.04ms +[2025-09-02 15:25:25] [Rank 0] step:5661/10000 train_time:430485ms step_avg:76.04ms +[2025-09-02 15:25:26] [Rank 0] step:5681/10000 train_time:432083ms step_avg:76.06ms +[2025-09-02 15:25:26] [Rank 0] step:5681/10000 train_time:432083ms step_avg:76.06ms +[2025-09-02 15:25:28] [Rank 0] step:5701/10000 train_time:433681ms step_avg:76.07ms +[2025-09-02 15:25:28] [Rank 0] step:5701/10000 train_time:433681ms step_avg:76.07ms +[2025-09-02 15:25:29] [Rank 0] step:5721/10000 train_time:435283ms step_avg:76.09ms +[2025-09-02 15:25:29] [Rank 0] step:5721/10000 train_time:435283ms step_avg:76.09ms +[2025-09-02 15:25:31] [Rank 0] step:5741/10000 train_time:436881ms step_avg:76.10ms +[2025-09-02 15:25:31] [Rank 0] step:5741/10000 train_time:436881ms step_avg:76.10ms +[2025-09-02 15:25:33] [Rank 0] step:5761/10000 train_time:438532ms step_avg:76.12ms +[2025-09-02 15:25:33] [Rank 0] step:5761/10000 train_time:438532ms step_avg:76.12ms +[2025-09-02 15:25:34] [Rank 0] step:5781/10000 train_time:440142ms step_avg:76.14ms +[2025-09-02 15:25:34] [Rank 0] step:5781/10000 train_time:440142ms step_avg:76.14ms +[2025-09-02 15:25:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:25:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:25:48] [Rank 0] PRINT: step:5800/10000 val_loss:3.8483 svd_entropy: attn_qk:H=0.7680,top10E=0.27,eRank=169.0,q75/q25=57.28 attn_vo:H=0.8441,top10E=0.14,eRank=298.2,q75/q25=39.83 mlp_w1:H=0.9029,top10E=0.14,eRank=406.1,q75/q25=4.67 mlp_w2:H=0.9708,top10E=0.04,eRank=632.7,q75/q25=2.89 vo_prod:H=0.7411,top10E=0.24,eRank=144.4,q75/q25=1226.92 train_time:441826ms step_avg:76.18ms +[2025-09-02 15:25:48] [Rank 0] PRINT: step:5800/10000 val_loss:3.8483 svd_entropy: attn_qk:H=0.7680,top10E=0.27,eRank=169.0,q75/q25=57.28 attn_vo:H=0.8441,top10E=0.14,eRank=298.2,q75/q25=39.83 mlp_w1:H=0.9029,top10E=0.14,eRank=406.1,q75/q25=4.67 mlp_w2:H=0.9708,top10E=0.04,eRank=632.7,q75/q25=2.89 vo_prod:H=0.7411,top10E=0.24,eRank=144.4,q75/q25=1226.92 train_time:441826ms step_avg:76.18ms +[2025-09-02 15:25:48] [Rank 0] step:5801/10000 train_time:441840ms step_avg:76.17ms +[2025-09-02 15:25:48] [Rank 0] step:5801/10000 train_time:441840ms step_avg:76.17ms +[2025-09-02 15:25:49] [Rank 0] step:5821/10000 train_time:443384ms step_avg:76.17ms +[2025-09-02 15:25:49] [Rank 0] step:5821/10000 train_time:443384ms step_avg:76.17ms +[2025-09-02 15:25:51] [Rank 0] step:5841/10000 train_time:444982ms step_avg:76.18ms +[2025-09-02 15:25:51] [Rank 0] step:5841/10000 train_time:444982ms step_avg:76.18ms +[2025-09-02 15:25:53] [Rank 0] step:5861/10000 train_time:446582ms step_avg:76.20ms +[2025-09-02 15:25:53] [Rank 0] step:5861/10000 train_time:446582ms step_avg:76.20ms +[2025-09-02 15:25:54] [Rank 0] step:5881/10000 train_time:448185ms step_avg:76.21ms +[2025-09-02 15:25:54] [Rank 0] step:5881/10000 train_time:448185ms step_avg:76.21ms +[2025-09-02 15:25:56] [Rank 0] step:5901/10000 train_time:449786ms step_avg:76.22ms +[2025-09-02 15:25:56] [Rank 0] step:5901/10000 train_time:449786ms step_avg:76.22ms +[2025-09-02 15:25:57] [Rank 0] step:5921/10000 train_time:451388ms step_avg:76.24ms +[2025-09-02 15:25:57] [Rank 0] step:5921/10000 train_time:451388ms step_avg:76.24ms +[2025-09-02 15:25:59] [Rank 0] step:5941/10000 train_time:452994ms step_avg:76.25ms +[2025-09-02 15:25:59] [Rank 0] step:5941/10000 train_time:452994ms step_avg:76.25ms +[2025-09-02 15:26:01] [Rank 0] step:5961/10000 train_time:454600ms step_avg:76.26ms +[2025-09-02 15:26:01] [Rank 0] step:5961/10000 train_time:454600ms step_avg:76.26ms +[2025-09-02 15:26:02] [Rank 0] step:5981/10000 train_time:456205ms step_avg:76.28ms +[2025-09-02 15:26:02] [Rank 0] step:5981/10000 train_time:456205ms step_avg:76.28ms +[2025-09-02 15:26:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:26:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:26:16] [Rank 0] PRINT: step:6000/10000 val_loss:3.8252 svd_entropy: attn_qk:H=0.7692,top10E=0.27,eRank=170.3,q75/q25=56.93 attn_vo:H=0.8451,top10E=0.14,eRank=299.9,q75/q25=38.98 mlp_w1:H=0.9041,top10E=0.14,eRank=409.3,q75/q25=4.64 mlp_w2:H=0.9708,top10E=0.04,eRank=632.8,q75/q25=2.89 vo_prod:H=0.7425,top10E=0.24,eRank=145.8,q75/q25=1188.57 train_time:457888ms step_avg:76.31ms +[2025-09-02 15:26:16] [Rank 0] PRINT: step:6000/10000 val_loss:3.8252 svd_entropy: attn_qk:H=0.7692,top10E=0.27,eRank=170.3,q75/q25=56.93 attn_vo:H=0.8451,top10E=0.14,eRank=299.9,q75/q25=38.98 mlp_w1:H=0.9041,top10E=0.14,eRank=409.3,q75/q25=4.64 mlp_w2:H=0.9708,top10E=0.04,eRank=632.8,q75/q25=2.89 vo_prod:H=0.7425,top10E=0.24,eRank=145.8,q75/q25=1188.57 train_time:457888ms step_avg:76.31ms +[2025-09-02 15:26:16] [Rank 0] step:6001/10000 train_time:457901ms step_avg:76.30ms +[2025-09-02 15:26:16] [Rank 0] step:6001/10000 train_time:457901ms step_avg:76.30ms +[2025-09-02 15:26:17] [Rank 0] step:6021/10000 train_time:459434ms step_avg:76.31ms +[2025-09-02 15:26:17] [Rank 0] step:6021/10000 train_time:459434ms step_avg:76.31ms +[2025-09-02 15:26:19] [Rank 0] step:6041/10000 train_time:461038ms step_avg:76.32ms +[2025-09-02 15:26:19] [Rank 0] step:6041/10000 train_time:461038ms step_avg:76.32ms +[2025-09-02 15:26:21] [Rank 0] step:6061/10000 train_time:462646ms step_avg:76.33ms +[2025-09-02 15:26:21] [Rank 0] step:6061/10000 train_time:462646ms step_avg:76.33ms +[2025-09-02 15:26:22] [Rank 0] step:6081/10000 train_time:464255ms step_avg:76.35ms +[2025-09-02 15:26:22] [Rank 0] step:6081/10000 train_time:464255ms step_avg:76.35ms +[2025-09-02 15:26:24] [Rank 0] step:6101/10000 train_time:465865ms step_avg:76.36ms +[2025-09-02 15:26:24] [Rank 0] step:6101/10000 train_time:465865ms step_avg:76.36ms +[2025-09-02 15:26:26] [Rank 0] step:6121/10000 train_time:467748ms step_avg:76.42ms +[2025-09-02 15:26:26] [Rank 0] step:6121/10000 train_time:467748ms step_avg:76.42ms +[2025-09-02 15:26:27] [Rank 0] step:6141/10000 train_time:469368ms step_avg:76.43ms +[2025-09-02 15:26:27] [Rank 0] step:6141/10000 train_time:469368ms step_avg:76.43ms +[2025-09-02 15:26:29] [Rank 0] step:6161/10000 train_time:470977ms step_avg:76.44ms +[2025-09-02 15:26:29] [Rank 0] step:6161/10000 train_time:470977ms step_avg:76.44ms +[2025-09-02 15:26:31] [Rank 0] step:6181/10000 train_time:472583ms step_avg:76.46ms +[2025-09-02 15:26:31] [Rank 0] step:6181/10000 train_time:472583ms step_avg:76.46ms +[2025-09-02 15:26:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:26:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:26:44] [Rank 0] PRINT: step:6200/10000 val_loss:3.8091 svd_entropy: attn_qk:H=0.7702,top10E=0.26,eRank=171.4,q75/q25=56.60 attn_vo:H=0.8460,top10E=0.14,eRank=301.4,q75/q25=38.30 mlp_w1:H=0.9051,top10E=0.14,eRank=412.1,q75/q25=4.61 mlp_w2:H=0.9709,top10E=0.04,eRank=632.9,q75/q25=2.89 vo_prod:H=0.7439,top10E=0.23,eRank=147.1,q75/q25=1115.29 train_time:474274ms step_avg:76.50ms +[2025-09-02 15:26:44] [Rank 0] PRINT: step:6200/10000 val_loss:3.8091 svd_entropy: attn_qk:H=0.7702,top10E=0.26,eRank=171.4,q75/q25=56.60 attn_vo:H=0.8460,top10E=0.14,eRank=301.4,q75/q25=38.30 mlp_w1:H=0.9051,top10E=0.14,eRank=412.1,q75/q25=4.61 mlp_w2:H=0.9709,top10E=0.04,eRank=632.9,q75/q25=2.89 vo_prod:H=0.7439,top10E=0.23,eRank=147.1,q75/q25=1115.29 train_time:474274ms step_avg:76.50ms +[2025-09-02 15:26:44] [Rank 0] step:6201/10000 train_time:474287ms step_avg:76.49ms +[2025-09-02 15:26:44] [Rank 0] step:6201/10000 train_time:474287ms step_avg:76.49ms +[2025-09-02 15:26:46] [Rank 0] step:6221/10000 train_time:475843ms step_avg:76.49ms +[2025-09-02 15:26:46] [Rank 0] step:6221/10000 train_time:475843ms step_avg:76.49ms +[2025-09-02 15:26:47] [Rank 0] step:6241/10000 train_time:477447ms step_avg:76.50ms +[2025-09-02 15:26:47] [Rank 0] step:6241/10000 train_time:477447ms step_avg:76.50ms +[2025-09-02 15:26:49] [Rank 0] step:6261/10000 train_time:479057ms step_avg:76.51ms +[2025-09-02 15:26:49] [Rank 0] step:6261/10000 train_time:479057ms step_avg:76.51ms +[2025-09-02 15:26:51] [Rank 0] step:6281/10000 train_time:480669ms step_avg:76.53ms +[2025-09-02 15:26:51] [Rank 0] step:6281/10000 train_time:480669ms step_avg:76.53ms +[2025-09-02 15:26:52] [Rank 0] step:6301/10000 train_time:482280ms step_avg:76.54ms +[2025-09-02 15:26:52] [Rank 0] step:6301/10000 train_time:482280ms step_avg:76.54ms +[2025-09-02 15:26:54] [Rank 0] step:6321/10000 train_time:483891ms step_avg:76.55ms +[2025-09-02 15:26:54] [Rank 0] step:6321/10000 train_time:483891ms step_avg:76.55ms +[2025-09-02 15:26:55] [Rank 0] step:6341/10000 train_time:485502ms step_avg:76.57ms +[2025-09-02 15:26:55] [Rank 0] step:6341/10000 train_time:485502ms step_avg:76.57ms +[2025-09-02 15:26:57] [Rank 0] step:6361/10000 train_time:487120ms step_avg:76.58ms +[2025-09-02 15:26:57] [Rank 0] step:6361/10000 train_time:487120ms step_avg:76.58ms +[2025-09-02 15:26:59] [Rank 0] step:6381/10000 train_time:488736ms step_avg:76.59ms +[2025-09-02 15:26:59] [Rank 0] step:6381/10000 train_time:488736ms step_avg:76.59ms +[2025-09-02 15:27:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:27:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:27:12] [Rank 0] PRINT: step:6400/10000 val_loss:3.7911 svd_entropy: attn_qk:H=0.7712,top10E=0.26,eRank=172.4,q75/q25=56.16 attn_vo:H=0.8468,top10E=0.14,eRank=302.8,q75/q25=37.71 mlp_w1:H=0.9061,top10E=0.14,eRank=414.7,q75/q25=4.57 mlp_w2:H=0.9709,top10E=0.04,eRank=632.9,q75/q25=2.88 vo_prod:H=0.7451,top10E=0.23,eRank=148.2,q75/q25=1061.78 train_time:490428ms step_avg:76.63ms +[2025-09-02 15:27:12] [Rank 0] PRINT: step:6400/10000 val_loss:3.7911 svd_entropy: attn_qk:H=0.7712,top10E=0.26,eRank=172.4,q75/q25=56.16 attn_vo:H=0.8468,top10E=0.14,eRank=302.8,q75/q25=37.71 mlp_w1:H=0.9061,top10E=0.14,eRank=414.7,q75/q25=4.57 mlp_w2:H=0.9709,top10E=0.04,eRank=632.9,q75/q25=2.88 vo_prod:H=0.7451,top10E=0.23,eRank=148.2,q75/q25=1061.78 train_time:490428ms step_avg:76.63ms +[2025-09-02 15:27:12] [Rank 0] step:6401/10000 train_time:490442ms step_avg:76.62ms +[2025-09-02 15:27:12] [Rank 0] step:6401/10000 train_time:490442ms step_avg:76.62ms +[2025-09-02 15:27:14] [Rank 0] step:6421/10000 train_time:491991ms step_avg:76.62ms +[2025-09-02 15:27:14] [Rank 0] step:6421/10000 train_time:491991ms step_avg:76.62ms +[2025-09-02 15:27:15] [Rank 0] step:6441/10000 train_time:493594ms step_avg:76.63ms +[2025-09-02 15:27:15] [Rank 0] step:6441/10000 train_time:493594ms step_avg:76.63ms +[2025-09-02 15:27:17] [Rank 0] step:6461/10000 train_time:495199ms step_avg:76.64ms +[2025-09-02 15:27:17] [Rank 0] step:6461/10000 train_time:495199ms step_avg:76.64ms +[2025-09-02 15:27:19] [Rank 0] step:6481/10000 train_time:496915ms step_avg:76.67ms +[2025-09-02 15:27:19] [Rank 0] step:6481/10000 train_time:496915ms step_avg:76.67ms +[2025-09-02 15:27:20] [Rank 0] step:6501/10000 train_time:498518ms step_avg:76.68ms +[2025-09-02 15:27:20] [Rank 0] step:6501/10000 train_time:498518ms step_avg:76.68ms +[2025-09-02 15:27:22] [Rank 0] step:6521/10000 train_time:500117ms step_avg:76.69ms +[2025-09-02 15:27:22] [Rank 0] step:6521/10000 train_time:500117ms step_avg:76.69ms +[2025-09-02 15:27:24] [Rank 0] step:6541/10000 train_time:501726ms step_avg:76.70ms +[2025-09-02 15:27:24] [Rank 0] step:6541/10000 train_time:501726ms step_avg:76.70ms +[2025-09-02 15:27:25] [Rank 0] step:6561/10000 train_time:503338ms step_avg:76.72ms +[2025-09-02 15:27:25] [Rank 0] step:6561/10000 train_time:503338ms step_avg:76.72ms +[2025-09-02 15:27:27] [Rank 0] step:6581/10000 train_time:504941ms step_avg:76.73ms +[2025-09-02 15:27:27] [Rank 0] step:6581/10000 train_time:504941ms step_avg:76.73ms +[2025-09-02 15:27:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:27:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:27:40] [Rank 0] PRINT: step:6600/10000 val_loss:3.7766 svd_entropy: attn_qk:H=0.7721,top10E=0.26,eRank=173.4,q75/q25=55.95 attn_vo:H=0.8476,top10E=0.14,eRank=304.1,q75/q25=37.12 mlp_w1:H=0.9069,top10E=0.14,eRank=417.0,q75/q25=4.55 mlp_w2:H=0.9709,top10E=0.04,eRank=632.9,q75/q25=2.88 vo_prod:H=0.7464,top10E=0.23,eRank=149.4,q75/q25=1018.00 train_time:506630ms step_avg:76.76ms +[2025-09-02 15:27:40] [Rank 0] PRINT: step:6600/10000 val_loss:3.7766 svd_entropy: attn_qk:H=0.7721,top10E=0.26,eRank=173.4,q75/q25=55.95 attn_vo:H=0.8476,top10E=0.14,eRank=304.1,q75/q25=37.12 mlp_w1:H=0.9069,top10E=0.14,eRank=417.0,q75/q25=4.55 mlp_w2:H=0.9709,top10E=0.04,eRank=632.9,q75/q25=2.88 vo_prod:H=0.7464,top10E=0.23,eRank=149.4,q75/q25=1018.00 train_time:506630ms step_avg:76.76ms +[2025-09-02 15:27:40] [Rank 0] step:6601/10000 train_time:506643ms step_avg:76.75ms +[2025-09-02 15:27:40] [Rank 0] step:6601/10000 train_time:506643ms step_avg:76.75ms +[2025-09-02 15:27:42] [Rank 0] step:6621/10000 train_time:508168ms step_avg:76.75ms +[2025-09-02 15:27:42] [Rank 0] step:6621/10000 train_time:508168ms step_avg:76.75ms +[2025-09-02 15:27:43] [Rank 0] step:6641/10000 train_time:509778ms step_avg:76.76ms +[2025-09-02 15:27:43] [Rank 0] step:6641/10000 train_time:509778ms step_avg:76.76ms +[2025-09-02 15:27:45] [Rank 0] step:6661/10000 train_time:511382ms step_avg:76.77ms +[2025-09-02 15:27:45] [Rank 0] step:6661/10000 train_time:511382ms step_avg:76.77ms +[2025-09-02 15:27:47] [Rank 0] step:6681/10000 train_time:513007ms step_avg:76.79ms +[2025-09-02 15:27:47] [Rank 0] step:6681/10000 train_time:513007ms step_avg:76.79ms +[2025-09-02 15:27:48] [Rank 0] step:6701/10000 train_time:514649ms step_avg:76.80ms +[2025-09-02 15:27:48] [Rank 0] step:6701/10000 train_time:514649ms step_avg:76.80ms +[2025-09-02 15:27:50] [Rank 0] step:6721/10000 train_time:516286ms step_avg:76.82ms +[2025-09-02 15:27:50] [Rank 0] step:6721/10000 train_time:516286ms step_avg:76.82ms +[2025-09-02 15:27:52] [Rank 0] step:6741/10000 train_time:517915ms step_avg:76.83ms +[2025-09-02 15:27:52] [Rank 0] step:6741/10000 train_time:517915ms step_avg:76.83ms +[2025-09-02 15:27:53] [Rank 0] step:6761/10000 train_time:519550ms step_avg:76.85ms +[2025-09-02 15:27:53] [Rank 0] step:6761/10000 train_time:519550ms step_avg:76.85ms +[2025-09-02 15:27:55] [Rank 0] step:6781/10000 train_time:521187ms step_avg:76.86ms +[2025-09-02 15:27:55] [Rank 0] step:6781/10000 train_time:521187ms step_avg:76.86ms +[2025-09-02 15:27:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:27:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:28:08] [Rank 0] PRINT: step:6800/10000 val_loss:3.7613 svd_entropy: attn_qk:H=0.7728,top10E=0.26,eRank=174.1,q75/q25=55.55 attn_vo:H=0.8483,top10E=0.14,eRank=305.3,q75/q25=36.68 mlp_w1:H=0.9077,top10E=0.14,eRank=419.2,q75/q25=4.52 mlp_w2:H=0.9709,top10E=0.04,eRank=632.9,q75/q25=2.87 vo_prod:H=0.7474,top10E=0.23,eRank=150.4,q75/q25=987.26 train_time:522908ms step_avg:76.90ms +[2025-09-02 15:28:08] [Rank 0] PRINT: step:6800/10000 val_loss:3.7613 svd_entropy: attn_qk:H=0.7728,top10E=0.26,eRank=174.1,q75/q25=55.55 attn_vo:H=0.8483,top10E=0.14,eRank=305.3,q75/q25=36.68 mlp_w1:H=0.9077,top10E=0.14,eRank=419.2,q75/q25=4.52 mlp_w2:H=0.9709,top10E=0.04,eRank=632.9,q75/q25=2.87 vo_prod:H=0.7474,top10E=0.23,eRank=150.4,q75/q25=987.26 train_time:522908ms step_avg:76.90ms +[2025-09-02 15:28:08] [Rank 0] step:6801/10000 train_time:522922ms step_avg:76.89ms +[2025-09-02 15:28:08] [Rank 0] step:6801/10000 train_time:522922ms step_avg:76.89ms +[2025-09-02 15:28:10] [Rank 0] step:6821/10000 train_time:524478ms step_avg:76.89ms +[2025-09-02 15:28:10] [Rank 0] step:6821/10000 train_time:524478ms step_avg:76.89ms +[2025-09-02 15:28:11] [Rank 0] step:6841/10000 train_time:526107ms step_avg:76.91ms +[2025-09-02 15:28:11] [Rank 0] step:6841/10000 train_time:526107ms step_avg:76.91ms +[2025-09-02 15:28:13] [Rank 0] step:6861/10000 train_time:527741ms step_avg:76.92ms +[2025-09-02 15:28:13] [Rank 0] step:6861/10000 train_time:527741ms step_avg:76.92ms +[2025-09-02 15:28:15] [Rank 0] step:6881/10000 train_time:529374ms step_avg:76.93ms +[2025-09-02 15:28:15] [Rank 0] step:6881/10000 train_time:529374ms step_avg:76.93ms +[2025-09-02 15:28:16] [Rank 0] step:6901/10000 train_time:531005ms step_avg:76.95ms +[2025-09-02 15:28:16] [Rank 0] step:6901/10000 train_time:531005ms step_avg:76.95ms +[2025-09-02 15:28:18] [Rank 0] step:6921/10000 train_time:532638ms step_avg:76.96ms +[2025-09-02 15:28:18] [Rank 0] step:6921/10000 train_time:532638ms step_avg:76.96ms +[2025-09-02 15:28:20] [Rank 0] step:6941/10000 train_time:534274ms step_avg:76.97ms +[2025-09-02 15:28:20] [Rank 0] step:6941/10000 train_time:534274ms step_avg:76.97ms +[2025-09-02 15:28:21] [Rank 0] step:6961/10000 train_time:535922ms step_avg:76.99ms +[2025-09-02 15:28:21] [Rank 0] step:6961/10000 train_time:535922ms step_avg:76.99ms +[2025-09-02 15:28:23] [Rank 0] step:6981/10000 train_time:537562ms step_avg:77.00ms +[2025-09-02 15:28:23] [Rank 0] step:6981/10000 train_time:537562ms step_avg:77.00ms +[2025-09-02 15:28:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:28:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:28:36] [Rank 0] PRINT: step:7000/10000 val_loss:3.7460 svd_entropy: attn_qk:H=0.7736,top10E=0.26,eRank=174.9,q75/q25=55.44 attn_vo:H=0.8490,top10E=0.13,eRank=306.5,q75/q25=36.22 mlp_w1:H=0.9085,top10E=0.14,eRank=421.1,q75/q25=4.50 mlp_w2:H=0.9709,top10E=0.04,eRank=633.0,q75/q25=2.88 vo_prod:H=0.7485,top10E=0.23,eRank=151.5,q75/q25=934.77 train_time:539288ms step_avg:77.04ms +[2025-09-02 15:28:36] [Rank 0] PRINT: step:7000/10000 val_loss:3.7460 svd_entropy: attn_qk:H=0.7736,top10E=0.26,eRank=174.9,q75/q25=55.44 attn_vo:H=0.8490,top10E=0.13,eRank=306.5,q75/q25=36.22 mlp_w1:H=0.9085,top10E=0.14,eRank=421.1,q75/q25=4.50 mlp_w2:H=0.9709,top10E=0.04,eRank=633.0,q75/q25=2.88 vo_prod:H=0.7485,top10E=0.23,eRank=151.5,q75/q25=934.77 train_time:539288ms step_avg:77.04ms +[2025-09-02 15:28:36] [Rank 0] step:7001/10000 train_time:539301ms step_avg:77.03ms +[2025-09-02 15:28:36] [Rank 0] step:7001/10000 train_time:539301ms step_avg:77.03ms +[2025-09-02 15:28:38] [Rank 0] step:7021/10000 train_time:540865ms step_avg:77.04ms +[2025-09-02 15:28:38] [Rank 0] step:7021/10000 train_time:540865ms step_avg:77.04ms +[2025-09-02 15:28:40] [Rank 0] step:7041/10000 train_time:542499ms step_avg:77.05ms +[2025-09-02 15:28:40] [Rank 0] step:7041/10000 train_time:542499ms step_avg:77.05ms +[2025-09-02 15:28:41] [Rank 0] step:7061/10000 train_time:544134ms step_avg:77.06ms +[2025-09-02 15:28:41] [Rank 0] step:7061/10000 train_time:544134ms step_avg:77.06ms +[2025-09-02 15:28:43] [Rank 0] step:7081/10000 train_time:545816ms step_avg:77.08ms +[2025-09-02 15:28:43] [Rank 0] step:7081/10000 train_time:545816ms step_avg:77.08ms +[2025-09-02 15:28:45] [Rank 0] step:7101/10000 train_time:547452ms step_avg:77.10ms +[2025-09-02 15:28:45] [Rank 0] step:7101/10000 train_time:547452ms step_avg:77.10ms +[2025-09-02 15:28:46] [Rank 0] step:7121/10000 train_time:549084ms step_avg:77.11ms +[2025-09-02 15:28:46] [Rank 0] step:7121/10000 train_time:549084ms step_avg:77.11ms +[2025-09-02 15:28:48] [Rank 0] step:7141/10000 train_time:550719ms step_avg:77.12ms +[2025-09-02 15:28:48] [Rank 0] step:7141/10000 train_time:550719ms step_avg:77.12ms +[2025-09-02 15:28:50] [Rank 0] step:7161/10000 train_time:552355ms step_avg:77.13ms +[2025-09-02 15:28:50] [Rank 0] step:7161/10000 train_time:552355ms step_avg:77.13ms +[2025-09-02 15:28:51] [Rank 0] step:7181/10000 train_time:553996ms step_avg:77.15ms +[2025-09-02 15:28:51] [Rank 0] step:7181/10000 train_time:553996ms step_avg:77.15ms +[2025-09-02 15:28:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:28:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:29:04] [Rank 0] PRINT: step:7200/10000 val_loss:3.7347 svd_entropy: attn_qk:H=0.7742,top10E=0.26,eRank=175.6,q75/q25=55.22 attn_vo:H=0.8497,top10E=0.13,eRank=307.6,q75/q25=35.77 mlp_w1:H=0.9091,top10E=0.14,eRank=422.9,q75/q25=4.48 mlp_w2:H=0.9709,top10E=0.04,eRank=633.0,q75/q25=2.88 vo_prod:H=0.7497,top10E=0.23,eRank=152.6,q75/q25=915.60 train_time:555720ms step_avg:77.18ms +[2025-09-02 15:29:04] [Rank 0] PRINT: step:7200/10000 val_loss:3.7347 svd_entropy: attn_qk:H=0.7742,top10E=0.26,eRank=175.6,q75/q25=55.22 attn_vo:H=0.8497,top10E=0.13,eRank=307.6,q75/q25=35.77 mlp_w1:H=0.9091,top10E=0.14,eRank=422.9,q75/q25=4.48 mlp_w2:H=0.9709,top10E=0.04,eRank=633.0,q75/q25=2.88 vo_prod:H=0.7497,top10E=0.23,eRank=152.6,q75/q25=915.60 train_time:555720ms step_avg:77.18ms +[2025-09-02 15:29:05] [Rank 0] step:7201/10000 train_time:555733ms step_avg:77.17ms +[2025-09-02 15:29:05] [Rank 0] step:7201/10000 train_time:555733ms step_avg:77.17ms +[2025-09-02 15:29:06] [Rank 0] step:7221/10000 train_time:557328ms step_avg:77.18ms +[2025-09-02 15:29:06] [Rank 0] step:7221/10000 train_time:557328ms step_avg:77.18ms +[2025-09-02 15:29:08] [Rank 0] step:7241/10000 train_time:558958ms step_avg:77.19ms +[2025-09-02 15:29:08] [Rank 0] step:7241/10000 train_time:558958ms step_avg:77.19ms +[2025-09-02 15:29:10] [Rank 0] step:7261/10000 train_time:560590ms step_avg:77.21ms +[2025-09-02 15:29:10] [Rank 0] step:7261/10000 train_time:560590ms step_avg:77.21ms +[2025-09-02 15:29:11] [Rank 0] step:7281/10000 train_time:562232ms step_avg:77.22ms +[2025-09-02 15:29:11] [Rank 0] step:7281/10000 train_time:562232ms step_avg:77.22ms +[2025-09-02 15:29:13] [Rank 0] step:7301/10000 train_time:563870ms step_avg:77.23ms +[2025-09-02 15:29:13] [Rank 0] step:7301/10000 train_time:563870ms step_avg:77.23ms +[2025-09-02 15:29:14] [Rank 0] step:7321/10000 train_time:565516ms step_avg:77.25ms +[2025-09-02 15:29:14] [Rank 0] step:7321/10000 train_time:565516ms step_avg:77.25ms +[2025-09-02 15:29:16] [Rank 0] step:7341/10000 train_time:567154ms step_avg:77.26ms +[2025-09-02 15:29:16] [Rank 0] step:7341/10000 train_time:567154ms step_avg:77.26ms +[2025-09-02 15:29:18] [Rank 0] step:7361/10000 train_time:568796ms step_avg:77.27ms +[2025-09-02 15:29:18] [Rank 0] step:7361/10000 train_time:568796ms step_avg:77.27ms +[2025-09-02 15:29:19] [Rank 0] step:7381/10000 train_time:570438ms step_avg:77.28ms +[2025-09-02 15:29:19] [Rank 0] step:7381/10000 train_time:570438ms step_avg:77.28ms +[2025-09-02 15:29:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:29:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:29:33] [Rank 0] PRINT: step:7400/10000 val_loss:3.7159 svd_entropy: attn_qk:H=0.7748,top10E=0.26,eRank=176.2,q75/q25=54.95 attn_vo:H=0.8503,top10E=0.13,eRank=308.5,q75/q25=35.45 mlp_w1:H=0.9097,top10E=0.14,eRank=424.5,q75/q25=4.45 mlp_w2:H=0.9709,top10E=0.04,eRank=633.0,q75/q25=2.87 vo_prod:H=0.7506,top10E=0.23,eRank=153.5,q75/q25=907.76 train_time:572138ms step_avg:77.32ms +[2025-09-02 15:29:33] [Rank 0] PRINT: step:7400/10000 val_loss:3.7159 svd_entropy: attn_qk:H=0.7748,top10E=0.26,eRank=176.2,q75/q25=54.95 attn_vo:H=0.8503,top10E=0.13,eRank=308.5,q75/q25=35.45 mlp_w1:H=0.9097,top10E=0.14,eRank=424.5,q75/q25=4.45 mlp_w2:H=0.9709,top10E=0.04,eRank=633.0,q75/q25=2.87 vo_prod:H=0.7506,top10E=0.23,eRank=153.5,q75/q25=907.76 train_time:572138ms step_avg:77.32ms +[2025-09-02 15:29:33] [Rank 0] step:7401/10000 train_time:572152ms step_avg:77.31ms +[2025-09-02 15:29:33] [Rank 0] step:7401/10000 train_time:572152ms step_avg:77.31ms +[2025-09-02 15:29:34] [Rank 0] step:7421/10000 train_time:573727ms step_avg:77.31ms +[2025-09-02 15:29:34] [Rank 0] step:7421/10000 train_time:573727ms step_avg:77.31ms +[2025-09-02 15:29:36] [Rank 0] step:7441/10000 train_time:575356ms step_avg:77.32ms +[2025-09-02 15:29:36] [Rank 0] step:7441/10000 train_time:575356ms step_avg:77.32ms +[2025-09-02 15:29:38] [Rank 0] step:7461/10000 train_time:576991ms step_avg:77.33ms +[2025-09-02 15:29:38] [Rank 0] step:7461/10000 train_time:576991ms step_avg:77.33ms +[2025-09-02 15:29:39] [Rank 0] step:7481/10000 train_time:578632ms step_avg:77.35ms +[2025-09-02 15:29:39] [Rank 0] step:7481/10000 train_time:578632ms step_avg:77.35ms +[2025-09-02 15:29:41] [Rank 0] step:7501/10000 train_time:580273ms step_avg:77.36ms +[2025-09-02 15:29:41] [Rank 0] step:7501/10000 train_time:580273ms step_avg:77.36ms +[2025-09-02 15:29:43] [Rank 0] step:7521/10000 train_time:581913ms step_avg:77.37ms +[2025-09-02 15:29:43] [Rank 0] step:7521/10000 train_time:581913ms step_avg:77.37ms +[2025-09-02 15:29:44] [Rank 0] step:7541/10000 train_time:583718ms step_avg:77.41ms +[2025-09-02 15:29:44] [Rank 0] step:7541/10000 train_time:583718ms step_avg:77.41ms +[2025-09-02 15:29:46] [Rank 0] step:7561/10000 train_time:585237ms step_avg:77.40ms +[2025-09-02 15:29:46] [Rank 0] step:7561/10000 train_time:585237ms step_avg:77.40ms +[2025-09-02 15:29:48] [Rank 0] step:7581/10000 train_time:586886ms step_avg:77.42ms +[2025-09-02 15:29:48] [Rank 0] step:7581/10000 train_time:586886ms step_avg:77.42ms +[2025-09-02 15:29:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:29:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:30:01] [Rank 0] PRINT: step:7600/10000 val_loss:3.7095 svd_entropy: attn_qk:H=0.7754,top10E=0.26,eRank=176.8,q75/q25=54.61 attn_vo:H=0.8508,top10E=0.13,eRank=309.4,q75/q25=35.16 mlp_w1:H=0.9102,top10E=0.14,eRank=426.0,q75/q25=4.43 mlp_w2:H=0.9709,top10E=0.04,eRank=633.0,q75/q25=2.88 vo_prod:H=0.7514,top10E=0.23,eRank=154.3,q75/q25=874.67 train_time:588619ms step_avg:77.45ms +[2025-09-02 15:30:01] [Rank 0] PRINT: step:7600/10000 val_loss:3.7095 svd_entropy: attn_qk:H=0.7754,top10E=0.26,eRank=176.8,q75/q25=54.61 attn_vo:H=0.8508,top10E=0.13,eRank=309.4,q75/q25=35.16 mlp_w1:H=0.9102,top10E=0.14,eRank=426.0,q75/q25=4.43 mlp_w2:H=0.9709,top10E=0.04,eRank=633.0,q75/q25=2.88 vo_prod:H=0.7514,top10E=0.23,eRank=154.3,q75/q25=874.67 train_time:588619ms step_avg:77.45ms +[2025-09-02 15:30:01] [Rank 0] step:7601/10000 train_time:588635ms step_avg:77.44ms +[2025-09-02 15:30:01] [Rank 0] step:7601/10000 train_time:588635ms step_avg:77.44ms +[2025-09-02 15:30:03] [Rank 0] step:7621/10000 train_time:590190ms step_avg:77.44ms +[2025-09-02 15:30:03] [Rank 0] step:7621/10000 train_time:590190ms step_avg:77.44ms +[2025-09-02 15:30:04] [Rank 0] step:7641/10000 train_time:591824ms step_avg:77.45ms +[2025-09-02 15:30:04] [Rank 0] step:7641/10000 train_time:591824ms step_avg:77.45ms +[2025-09-02 15:30:06] [Rank 0] step:7661/10000 train_time:593463ms step_avg:77.47ms +[2025-09-02 15:30:06] [Rank 0] step:7661/10000 train_time:593463ms step_avg:77.47ms +[2025-09-02 15:30:08] [Rank 0] step:7681/10000 train_time:595095ms step_avg:77.48ms +[2025-09-02 15:30:08] [Rank 0] step:7681/10000 train_time:595095ms step_avg:77.48ms +[2025-09-02 15:30:09] [Rank 0] step:7701/10000 train_time:596729ms step_avg:77.49ms +[2025-09-02 15:30:09] [Rank 0] step:7701/10000 train_time:596729ms step_avg:77.49ms +[2025-09-02 15:30:11] [Rank 0] step:7721/10000 train_time:598376ms step_avg:77.50ms +[2025-09-02 15:30:11] [Rank 0] step:7721/10000 train_time:598376ms step_avg:77.50ms +[2025-09-02 15:30:13] [Rank 0] step:7741/10000 train_time:600014ms step_avg:77.51ms +[2025-09-02 15:30:13] [Rank 0] step:7741/10000 train_time:600014ms step_avg:77.51ms +[2025-09-02 15:30:14] [Rank 0] step:7761/10000 train_time:601660ms step_avg:77.52ms +[2025-09-02 15:30:14] [Rank 0] step:7761/10000 train_time:601660ms step_avg:77.52ms +[2025-09-02 15:30:16] [Rank 0] step:7781/10000 train_time:603299ms step_avg:77.53ms +[2025-09-02 15:30:16] [Rank 0] step:7781/10000 train_time:603299ms step_avg:77.53ms +[2025-09-02 15:30:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:30:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:30:30] [Rank 0] PRINT: step:7800/10000 val_loss:3.6948 svd_entropy: attn_qk:H=0.7759,top10E=0.26,eRank=177.3,q75/q25=54.35 attn_vo:H=0.8513,top10E=0.13,eRank=310.2,q75/q25=34.87 mlp_w1:H=0.9107,top10E=0.14,eRank=427.3,q75/q25=4.42 mlp_w2:H=0.9709,top10E=0.04,eRank=633.0,q75/q25=2.88 vo_prod:H=0.7523,top10E=0.23,eRank=155.2,q75/q25=840.04 train_time:605035ms step_avg:77.57ms +[2025-09-02 15:30:30] [Rank 0] PRINT: step:7800/10000 val_loss:3.6948 svd_entropy: attn_qk:H=0.7759,top10E=0.26,eRank=177.3,q75/q25=54.35 attn_vo:H=0.8513,top10E=0.13,eRank=310.2,q75/q25=34.87 mlp_w1:H=0.9107,top10E=0.14,eRank=427.3,q75/q25=4.42 mlp_w2:H=0.9709,top10E=0.04,eRank=633.0,q75/q25=2.88 vo_prod:H=0.7523,top10E=0.23,eRank=155.2,q75/q25=840.04 train_time:605035ms step_avg:77.57ms +[2025-09-02 15:30:30] [Rank 0] step:7801/10000 train_time:605048ms step_avg:77.56ms +[2025-09-02 15:30:30] [Rank 0] step:7801/10000 train_time:605048ms step_avg:77.56ms +[2025-09-02 15:30:31] [Rank 0] step:7821/10000 train_time:606616ms step_avg:77.56ms +[2025-09-02 15:30:31] [Rank 0] step:7821/10000 train_time:606616ms step_avg:77.56ms +[2025-09-02 15:30:33] [Rank 0] step:7841/10000 train_time:608252ms step_avg:77.57ms +[2025-09-02 15:30:33] [Rank 0] step:7841/10000 train_time:608252ms step_avg:77.57ms +[2025-09-02 15:30:35] [Rank 0] step:7861/10000 train_time:609898ms step_avg:77.59ms +[2025-09-02 15:30:35] [Rank 0] step:7861/10000 train_time:609898ms step_avg:77.59ms +[2025-09-02 15:30:36] [Rank 0] step:7881/10000 train_time:611540ms step_avg:77.60ms +[2025-09-02 15:30:36] [Rank 0] step:7881/10000 train_time:611540ms step_avg:77.60ms +[2025-09-02 15:30:38] [Rank 0] step:7901/10000 train_time:613177ms step_avg:77.61ms +[2025-09-02 15:30:38] [Rank 0] step:7901/10000 train_time:613177ms step_avg:77.61ms +[2025-09-02 15:30:40] [Rank 0] step:7921/10000 train_time:614818ms step_avg:77.62ms +[2025-09-02 15:30:40] [Rank 0] step:7921/10000 train_time:614818ms step_avg:77.62ms +[2025-09-02 15:30:41] [Rank 0] step:7941/10000 train_time:616465ms step_avg:77.63ms +[2025-09-02 15:30:41] [Rank 0] step:7941/10000 train_time:616465ms step_avg:77.63ms +[2025-09-02 15:30:43] [Rank 0] step:7961/10000 train_time:618108ms step_avg:77.64ms +[2025-09-02 15:30:43] [Rank 0] step:7961/10000 train_time:618108ms step_avg:77.64ms +[2025-09-02 15:30:45] [Rank 0] step:7981/10000 train_time:619743ms step_avg:77.65ms +[2025-09-02 15:30:45] [Rank 0] step:7981/10000 train_time:619743ms step_avg:77.65ms +[2025-09-02 15:30:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:30:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:30:58] [Rank 0] PRINT: step:8000/10000 val_loss:3.6802 svd_entropy: attn_qk:H=0.7763,top10E=0.26,eRank=177.8,q75/q25=54.10 attn_vo:H=0.8517,top10E=0.13,eRank=311.0,q75/q25=34.52 mlp_w1:H=0.9111,top10E=0.13,eRank=428.5,q75/q25=4.40 mlp_w2:H=0.9709,top10E=0.04,eRank=633.1,q75/q25=2.88 vo_prod:H=0.7531,top10E=0.22,eRank=156.0,q75/q25=818.61 train_time:621470ms step_avg:77.68ms +[2025-09-02 15:30:58] [Rank 0] PRINT: step:8000/10000 val_loss:3.6802 svd_entropy: attn_qk:H=0.7763,top10E=0.26,eRank=177.8,q75/q25=54.10 attn_vo:H=0.8517,top10E=0.13,eRank=311.0,q75/q25=34.52 mlp_w1:H=0.9111,top10E=0.13,eRank=428.5,q75/q25=4.40 mlp_w2:H=0.9709,top10E=0.04,eRank=633.1,q75/q25=2.88 vo_prod:H=0.7531,top10E=0.22,eRank=156.0,q75/q25=818.61 train_time:621470ms step_avg:77.68ms +[2025-09-02 15:30:58] [Rank 0] step:8001/10000 train_time:621483ms step_avg:77.68ms +[2025-09-02 15:30:58] [Rank 0] step:8001/10000 train_time:621483ms step_avg:77.68ms +[2025-09-02 15:31:00] [Rank 0] step:8021/10000 train_time:623053ms step_avg:77.68ms +[2025-09-02 15:31:00] [Rank 0] step:8021/10000 train_time:623053ms step_avg:77.68ms +[2025-09-02 15:31:01] [Rank 0] step:8041/10000 train_time:624695ms step_avg:77.69ms +[2025-09-02 15:31:01] [Rank 0] step:8041/10000 train_time:624695ms step_avg:77.69ms +[2025-09-02 15:31:03] [Rank 0] step:8061/10000 train_time:626336ms step_avg:77.70ms +[2025-09-02 15:31:03] [Rank 0] step:8061/10000 train_time:626336ms step_avg:77.70ms +[2025-09-02 15:31:05] [Rank 0] step:8081/10000 train_time:627969ms step_avg:77.71ms +[2025-09-02 15:31:05] [Rank 0] step:8081/10000 train_time:627969ms step_avg:77.71ms +[2025-09-02 15:31:06] [Rank 0] step:8101/10000 train_time:629619ms step_avg:77.72ms +[2025-09-02 15:31:06] [Rank 0] step:8101/10000 train_time:629619ms step_avg:77.72ms +[2025-09-02 15:31:08] [Rank 0] step:8121/10000 train_time:631257ms step_avg:77.73ms +[2025-09-02 15:31:08] [Rank 0] step:8121/10000 train_time:631257ms step_avg:77.73ms +[2025-09-02 15:31:10] [Rank 0] step:8141/10000 train_time:633086ms step_avg:77.77ms +[2025-09-02 15:31:10] [Rank 0] step:8141/10000 train_time:633086ms step_avg:77.77ms +[2025-09-02 15:31:12] [Rank 0] step:8161/10000 train_time:634739ms step_avg:77.78ms +[2025-09-02 15:31:12] [Rank 0] step:8161/10000 train_time:634739ms step_avg:77.78ms +[2025-09-02 15:31:13] [Rank 0] step:8181/10000 train_time:636410ms step_avg:77.79ms +[2025-09-02 15:31:13] [Rank 0] step:8181/10000 train_time:636410ms step_avg:77.79ms +[2025-09-02 15:31:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:31:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:31:27] [Rank 0] PRINT: step:8200/10000 val_loss:3.6705 svd_entropy: attn_qk:H=0.7767,top10E=0.26,eRank=178.2,q75/q25=53.84 attn_vo:H=0.8521,top10E=0.13,eRank=311.7,q75/q25=34.24 mlp_w1:H=0.9115,top10E=0.13,eRank=429.5,q75/q25=4.38 mlp_w2:H=0.9709,top10E=0.04,eRank=633.1,q75/q25=2.87 vo_prod:H=0.7538,top10E=0.22,eRank=156.7,q75/q25=801.81 train_time:638189ms step_avg:77.83ms +[2025-09-02 15:31:27] [Rank 0] PRINT: step:8200/10000 val_loss:3.6705 svd_entropy: attn_qk:H=0.7767,top10E=0.26,eRank=178.2,q75/q25=53.84 attn_vo:H=0.8521,top10E=0.13,eRank=311.7,q75/q25=34.24 mlp_w1:H=0.9115,top10E=0.13,eRank=429.5,q75/q25=4.38 mlp_w2:H=0.9709,top10E=0.04,eRank=633.1,q75/q25=2.87 vo_prod:H=0.7538,top10E=0.22,eRank=156.7,q75/q25=801.81 train_time:638189ms step_avg:77.83ms +[2025-09-02 15:31:27] [Rank 0] step:8201/10000 train_time:638202ms step_avg:77.82ms +[2025-09-02 15:31:27] [Rank 0] step:8201/10000 train_time:638202ms step_avg:77.82ms +[2025-09-02 15:31:29] [Rank 0] step:8221/10000 train_time:639818ms step_avg:77.83ms +[2025-09-02 15:31:29] [Rank 0] step:8221/10000 train_time:639818ms step_avg:77.83ms +[2025-09-02 15:31:30] [Rank 0] step:8241/10000 train_time:641495ms step_avg:77.84ms +[2025-09-02 15:31:30] [Rank 0] step:8241/10000 train_time:641495ms step_avg:77.84ms +[2025-09-02 15:31:32] [Rank 0] step:8261/10000 train_time:643166ms step_avg:77.86ms +[2025-09-02 15:31:32] [Rank 0] step:8261/10000 train_time:643166ms step_avg:77.86ms +[2025-09-02 15:31:34] [Rank 0] step:8281/10000 train_time:644835ms step_avg:77.87ms +[2025-09-02 15:31:34] [Rank 0] step:8281/10000 train_time:644835ms step_avg:77.87ms +[2025-09-02 15:31:35] [Rank 0] step:8301/10000 train_time:646503ms step_avg:77.88ms +[2025-09-02 15:31:35] [Rank 0] step:8301/10000 train_time:646503ms step_avg:77.88ms +[2025-09-02 15:31:37] [Rank 0] step:8321/10000 train_time:648161ms step_avg:77.89ms +[2025-09-02 15:31:37] [Rank 0] step:8321/10000 train_time:648161ms step_avg:77.89ms +[2025-09-02 15:31:39] [Rank 0] step:8341/10000 train_time:649833ms step_avg:77.91ms +[2025-09-02 15:31:39] [Rank 0] step:8341/10000 train_time:649833ms step_avg:77.91ms +[2025-09-02 15:31:40] [Rank 0] step:8361/10000 train_time:651506ms step_avg:77.92ms +[2025-09-02 15:31:40] [Rank 0] step:8361/10000 train_time:651506ms step_avg:77.92ms +[2025-09-02 15:31:42] [Rank 0] step:8381/10000 train_time:653171ms step_avg:77.93ms +[2025-09-02 15:31:42] [Rank 0] step:8381/10000 train_time:653171ms step_avg:77.93ms +[2025-09-02 15:31:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:31:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:31:56] [Rank 0] PRINT: step:8400/10000 val_loss:3.6591 svd_entropy: attn_qk:H=0.7769,top10E=0.26,eRank=178.5,q75/q25=53.57 attn_vo:H=0.8525,top10E=0.13,eRank=312.3,q75/q25=34.04 mlp_w1:H=0.9119,top10E=0.13,eRank=430.5,q75/q25=4.37 mlp_w2:H=0.9709,top10E=0.04,eRank=633.1,q75/q25=2.87 vo_prod:H=0.7545,top10E=0.22,eRank=157.3,q75/q25=785.09 train_time:654924ms step_avg:77.97ms +[2025-09-02 15:31:56] [Rank 0] PRINT: step:8400/10000 val_loss:3.6591 svd_entropy: attn_qk:H=0.7769,top10E=0.26,eRank=178.5,q75/q25=53.57 attn_vo:H=0.8525,top10E=0.13,eRank=312.3,q75/q25=34.04 mlp_w1:H=0.9119,top10E=0.13,eRank=430.5,q75/q25=4.37 mlp_w2:H=0.9709,top10E=0.04,eRank=633.1,q75/q25=2.87 vo_prod:H=0.7545,top10E=0.22,eRank=157.3,q75/q25=785.09 train_time:654924ms step_avg:77.97ms +[2025-09-02 15:31:56] [Rank 0] step:8401/10000 train_time:654938ms step_avg:77.96ms +[2025-09-02 15:31:56] [Rank 0] step:8401/10000 train_time:654938ms step_avg:77.96ms +[2025-09-02 15:31:57] [Rank 0] step:8421/10000 train_time:656524ms step_avg:77.96ms +[2025-09-02 15:31:57] [Rank 0] step:8421/10000 train_time:656524ms step_avg:77.96ms +[2025-09-02 15:31:59] [Rank 0] step:8441/10000 train_time:658189ms step_avg:77.98ms +[2025-09-02 15:31:59] [Rank 0] step:8441/10000 train_time:658189ms step_avg:77.98ms +[2025-09-02 15:32:01] [Rank 0] step:8461/10000 train_time:659852ms step_avg:77.99ms +[2025-09-02 15:32:01] [Rank 0] step:8461/10000 train_time:659852ms step_avg:77.99ms +[2025-09-02 15:32:02] [Rank 0] step:8481/10000 train_time:661524ms step_avg:78.00ms +[2025-09-02 15:32:02] [Rank 0] step:8481/10000 train_time:661524ms step_avg:78.00ms +[2025-09-02 15:32:04] [Rank 0] step:8501/10000 train_time:663213ms step_avg:78.02ms +[2025-09-02 15:32:04] [Rank 0] step:8501/10000 train_time:663213ms step_avg:78.02ms +[2025-09-02 15:32:06] [Rank 0] step:8521/10000 train_time:664890ms step_avg:78.03ms +[2025-09-02 15:32:06] [Rank 0] step:8521/10000 train_time:664890ms step_avg:78.03ms +[2025-09-02 15:32:07] [Rank 0] step:8541/10000 train_time:666574ms step_avg:78.04ms +[2025-09-02 15:32:07] [Rank 0] step:8541/10000 train_time:666574ms step_avg:78.04ms +[2025-09-02 15:32:09] [Rank 0] step:8561/10000 train_time:668250ms step_avg:78.06ms +[2025-09-02 15:32:09] [Rank 0] step:8561/10000 train_time:668250ms step_avg:78.06ms +[2025-09-02 15:32:11] [Rank 0] step:8581/10000 train_time:669924ms step_avg:78.07ms +[2025-09-02 15:32:11] [Rank 0] step:8581/10000 train_time:669924ms step_avg:78.07ms +[2025-09-02 15:32:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:32:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:32:24] [Rank 0] PRINT: step:8600/10000 val_loss:3.6494 svd_entropy: attn_qk:H=0.7772,top10E=0.26,eRank=178.8,q75/q25=53.47 attn_vo:H=0.8528,top10E=0.13,eRank=312.9,q75/q25=33.68 mlp_w1:H=0.9122,top10E=0.13,eRank=431.4,q75/q25=4.35 mlp_w2:H=0.9709,top10E=0.04,eRank=633.1,q75/q25=2.87 vo_prod:H=0.7551,top10E=0.22,eRank=158.0,q75/q25=780.55 train_time:671673ms step_avg:78.10ms +[2025-09-02 15:32:24] [Rank 0] PRINT: step:8600/10000 val_loss:3.6494 svd_entropy: attn_qk:H=0.7772,top10E=0.26,eRank=178.8,q75/q25=53.47 attn_vo:H=0.8528,top10E=0.13,eRank=312.9,q75/q25=33.68 mlp_w1:H=0.9122,top10E=0.13,eRank=431.4,q75/q25=4.35 mlp_w2:H=0.9709,top10E=0.04,eRank=633.1,q75/q25=2.87 vo_prod:H=0.7551,top10E=0.22,eRank=158.0,q75/q25=780.55 train_time:671673ms step_avg:78.10ms +[2025-09-02 15:32:24] [Rank 0] step:8601/10000 train_time:671686ms step_avg:78.09ms +[2025-09-02 15:32:24] [Rank 0] step:8601/10000 train_time:671686ms step_avg:78.09ms +[2025-09-02 15:32:26] [Rank 0] step:8621/10000 train_time:673273ms step_avg:78.10ms +[2025-09-02 15:32:26] [Rank 0] step:8621/10000 train_time:673273ms step_avg:78.10ms +[2025-09-02 15:32:28] [Rank 0] step:8641/10000 train_time:674939ms step_avg:78.11ms +[2025-09-02 15:32:28] [Rank 0] step:8641/10000 train_time:674939ms step_avg:78.11ms +[2025-09-02 15:32:29] [Rank 0] step:8661/10000 train_time:676608ms step_avg:78.12ms +[2025-09-02 15:32:29] [Rank 0] step:8661/10000 train_time:676608ms step_avg:78.12ms +[2025-09-02 15:32:31] [Rank 0] step:8681/10000 train_time:678276ms step_avg:78.13ms +[2025-09-02 15:32:31] [Rank 0] step:8681/10000 train_time:678276ms step_avg:78.13ms +[2025-09-02 15:32:33] [Rank 0] step:8701/10000 train_time:679935ms step_avg:78.14ms +[2025-09-02 15:32:33] [Rank 0] step:8701/10000 train_time:679935ms step_avg:78.14ms +[2025-09-02 15:32:34] [Rank 0] step:8721/10000 train_time:681607ms step_avg:78.16ms +[2025-09-02 15:32:34] [Rank 0] step:8721/10000 train_time:681607ms step_avg:78.16ms +[2025-09-02 15:32:36] [Rank 0] step:8741/10000 train_time:683265ms step_avg:78.17ms +[2025-09-02 15:32:36] [Rank 0] step:8741/10000 train_time:683265ms step_avg:78.17ms +[2025-09-02 15:32:38] [Rank 0] step:8761/10000 train_time:684932ms step_avg:78.18ms +[2025-09-02 15:32:38] [Rank 0] step:8761/10000 train_time:684932ms step_avg:78.18ms +[2025-09-02 15:32:39] [Rank 0] step:8781/10000 train_time:686602ms step_avg:78.19ms +[2025-09-02 15:32:39] [Rank 0] step:8781/10000 train_time:686602ms step_avg:78.19ms +[2025-09-02 15:32:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:32:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:32:53] [Rank 0] PRINT: step:8800/10000 val_loss:3.6400 svd_entropy: attn_qk:H=0.7774,top10E=0.26,eRank=179.0,q75/q25=53.34 attn_vo:H=0.8532,top10E=0.13,eRank=313.4,q75/q25=33.62 mlp_w1:H=0.9125,top10E=0.13,eRank=432.2,q75/q25=4.34 mlp_w2:H=0.9709,top10E=0.04,eRank=633.2,q75/q25=2.87 vo_prod:H=0.7557,top10E=0.22,eRank=158.6,q75/q25=758.76 train_time:688361ms step_avg:78.22ms +[2025-09-02 15:32:53] [Rank 0] PRINT: step:8800/10000 val_loss:3.6400 svd_entropy: attn_qk:H=0.7774,top10E=0.26,eRank=179.0,q75/q25=53.34 attn_vo:H=0.8532,top10E=0.13,eRank=313.4,q75/q25=33.62 mlp_w1:H=0.9125,top10E=0.13,eRank=432.2,q75/q25=4.34 mlp_w2:H=0.9709,top10E=0.04,eRank=633.2,q75/q25=2.87 vo_prod:H=0.7557,top10E=0.22,eRank=158.6,q75/q25=758.76 train_time:688361ms step_avg:78.22ms +[2025-09-02 15:32:53] [Rank 0] step:8801/10000 train_time:688374ms step_avg:78.22ms +[2025-09-02 15:32:53] [Rank 0] step:8801/10000 train_time:688374ms step_avg:78.22ms +[2025-09-02 15:32:55] [Rank 0] step:8821/10000 train_time:689992ms step_avg:78.22ms +[2025-09-02 15:32:55] [Rank 0] step:8821/10000 train_time:689992ms step_avg:78.22ms +[2025-09-02 15:32:56] [Rank 0] step:8841/10000 train_time:691678ms step_avg:78.24ms +[2025-09-02 15:32:56] [Rank 0] step:8841/10000 train_time:691678ms step_avg:78.24ms +[2025-09-02 15:32:58] [Rank 0] step:8861/10000 train_time:693368ms step_avg:78.25ms +[2025-09-02 15:32:58] [Rank 0] step:8861/10000 train_time:693368ms step_avg:78.25ms +[2025-09-02 15:33:00] [Rank 0] step:8881/10000 train_time:695034ms step_avg:78.26ms +[2025-09-02 15:33:00] [Rank 0] step:8881/10000 train_time:695034ms step_avg:78.26ms +[2025-09-02 15:33:01] [Rank 0] step:8901/10000 train_time:696702ms step_avg:78.27ms +[2025-09-02 15:33:01] [Rank 0] step:8901/10000 train_time:696702ms step_avg:78.27ms +[2025-09-02 15:33:03] [Rank 0] step:8921/10000 train_time:698386ms step_avg:78.29ms +[2025-09-02 15:33:03] [Rank 0] step:8921/10000 train_time:698386ms step_avg:78.29ms +[2025-09-02 15:33:05] [Rank 0] step:8941/10000 train_time:700062ms step_avg:78.30ms +[2025-09-02 15:33:05] [Rank 0] step:8941/10000 train_time:700062ms step_avg:78.30ms +[2025-09-02 15:33:06] [Rank 0] step:8961/10000 train_time:701728ms step_avg:78.31ms +[2025-09-02 15:33:06] [Rank 0] step:8961/10000 train_time:701728ms step_avg:78.31ms +[2025-09-02 15:33:08] [Rank 0] step:8981/10000 train_time:703396ms step_avg:78.32ms +[2025-09-02 15:33:08] [Rank 0] step:8981/10000 train_time:703396ms step_avg:78.32ms +[2025-09-02 15:33:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:33:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:33:21] [Rank 0] PRINT: step:9000/10000 val_loss:3.6304 svd_entropy: attn_qk:H=0.7777,top10E=0.26,eRank=179.3,q75/q25=53.34 attn_vo:H=0.8535,top10E=0.13,eRank=313.9,q75/q25=33.48 mlp_w1:H=0.9127,top10E=0.13,eRank=432.9,q75/q25=4.33 mlp_w2:H=0.9709,top10E=0.04,eRank=633.2,q75/q25=2.88 vo_prod:H=0.7563,top10E=0.22,eRank=159.2,q75/q25=754.74 train_time:705149ms step_avg:78.35ms +[2025-09-02 15:33:21] [Rank 0] PRINT: step:9000/10000 val_loss:3.6304 svd_entropy: attn_qk:H=0.7777,top10E=0.26,eRank=179.3,q75/q25=53.34 attn_vo:H=0.8535,top10E=0.13,eRank=313.9,q75/q25=33.48 mlp_w1:H=0.9127,top10E=0.13,eRank=432.9,q75/q25=4.33 mlp_w2:H=0.9709,top10E=0.04,eRank=633.2,q75/q25=2.88 vo_prod:H=0.7563,top10E=0.22,eRank=159.2,q75/q25=754.74 train_time:705149ms step_avg:78.35ms +[2025-09-02 15:33:21] [Rank 0] step:9001/10000 train_time:705163ms step_avg:78.34ms +[2025-09-02 15:33:21] [Rank 0] step:9001/10000 train_time:705163ms step_avg:78.34ms +[2025-09-02 15:33:23] [Rank 0] step:9021/10000 train_time:706751ms step_avg:78.35ms +[2025-09-02 15:33:23] [Rank 0] step:9021/10000 train_time:706751ms step_avg:78.35ms +[2025-09-02 15:33:25] [Rank 0] step:9041/10000 train_time:708417ms step_avg:78.36ms +[2025-09-02 15:33:25] [Rank 0] step:9041/10000 train_time:708417ms step_avg:78.36ms +[2025-09-02 15:33:26] [Rank 0] step:9061/10000 train_time:710100ms step_avg:78.37ms +[2025-09-02 15:33:26] [Rank 0] step:9061/10000 train_time:710100ms step_avg:78.37ms +[2025-09-02 15:33:28] [Rank 0] step:9081/10000 train_time:711775ms step_avg:78.38ms +[2025-09-02 15:33:28] [Rank 0] step:9081/10000 train_time:711775ms step_avg:78.38ms +[2025-09-02 15:33:30] [Rank 0] step:9101/10000 train_time:713461ms step_avg:78.39ms +[2025-09-02 15:33:30] [Rank 0] step:9101/10000 train_time:713461ms step_avg:78.39ms +[2025-09-02 15:33:31] [Rank 0] step:9121/10000 train_time:715131ms step_avg:78.40ms +[2025-09-02 15:33:31] [Rank 0] step:9121/10000 train_time:715131ms step_avg:78.40ms +[2025-09-02 15:33:33] [Rank 0] step:9141/10000 train_time:716790ms step_avg:78.41ms +[2025-09-02 15:33:33] [Rank 0] step:9141/10000 train_time:716790ms step_avg:78.41ms +[2025-09-02 15:33:35] [Rank 0] step:9161/10000 train_time:718453ms step_avg:78.43ms +[2025-09-02 15:33:35] [Rank 0] step:9161/10000 train_time:718453ms step_avg:78.43ms +[2025-09-02 15:33:36] [Rank 0] step:9181/10000 train_time:720156ms step_avg:78.44ms +[2025-09-02 15:33:36] [Rank 0] step:9181/10000 train_time:720156ms step_avg:78.44ms +[2025-09-02 15:33:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:33:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:33:50] [Rank 0] PRINT: step:9200/10000 val_loss:3.6223 svd_entropy: attn_qk:H=0.7779,top10E=0.26,eRank=179.5,q75/q25=53.40 attn_vo:H=0.8537,top10E=0.13,eRank=314.3,q75/q25=33.34 mlp_w1:H=0.9130,top10E=0.13,eRank=433.5,q75/q25=4.32 mlp_w2:H=0.9709,top10E=0.04,eRank=633.1,q75/q25=2.87 vo_prod:H=0.7567,top10E=0.22,eRank=159.6,q75/q25=743.47 train_time:721919ms step_avg:78.47ms +[2025-09-02 15:33:50] [Rank 0] PRINT: step:9200/10000 val_loss:3.6223 svd_entropy: attn_qk:H=0.7779,top10E=0.26,eRank=179.5,q75/q25=53.40 attn_vo:H=0.8537,top10E=0.13,eRank=314.3,q75/q25=33.34 mlp_w1:H=0.9130,top10E=0.13,eRank=433.5,q75/q25=4.32 mlp_w2:H=0.9709,top10E=0.04,eRank=633.1,q75/q25=2.87 vo_prod:H=0.7567,top10E=0.22,eRank=159.6,q75/q25=743.47 train_time:721919ms step_avg:78.47ms +[2025-09-02 15:33:50] [Rank 0] step:9201/10000 train_time:721933ms step_avg:78.46ms +[2025-09-02 15:33:50] [Rank 0] step:9201/10000 train_time:721933ms step_avg:78.46ms +[2025-09-02 15:33:52] [Rank 0] step:9221/10000 train_time:723551ms step_avg:78.47ms +[2025-09-02 15:33:52] [Rank 0] step:9221/10000 train_time:723551ms step_avg:78.47ms +[2025-09-02 15:33:53] [Rank 0] step:9241/10000 train_time:725228ms step_avg:78.48ms +[2025-09-02 15:33:53] [Rank 0] step:9241/10000 train_time:725228ms step_avg:78.48ms +[2025-09-02 15:33:55] [Rank 0] step:9261/10000 train_time:726908ms step_avg:78.49ms +[2025-09-02 15:33:55] [Rank 0] step:9261/10000 train_time:726908ms step_avg:78.49ms +[2025-09-02 15:33:57] [Rank 0] step:9281/10000 train_time:728572ms step_avg:78.50ms +[2025-09-02 15:33:57] [Rank 0] step:9281/10000 train_time:728572ms step_avg:78.50ms +[2025-09-02 15:33:58] [Rank 0] step:9301/10000 train_time:730310ms step_avg:78.52ms +[2025-09-02 15:33:58] [Rank 0] step:9301/10000 train_time:730310ms step_avg:78.52ms +[2025-09-02 15:34:00] [Rank 0] step:9321/10000 train_time:731985ms step_avg:78.53ms +[2025-09-02 15:34:00] [Rank 0] step:9321/10000 train_time:731985ms step_avg:78.53ms +[2025-09-02 15:34:02] [Rank 0] step:9341/10000 train_time:733656ms step_avg:78.54ms +[2025-09-02 15:34:02] [Rank 0] step:9341/10000 train_time:733656ms step_avg:78.54ms +[2025-09-02 15:34:03] [Rank 0] step:9361/10000 train_time:735334ms step_avg:78.55ms +[2025-09-02 15:34:03] [Rank 0] step:9361/10000 train_time:735334ms step_avg:78.55ms +[2025-09-02 15:34:05] [Rank 0] step:9381/10000 train_time:737021ms step_avg:78.57ms +[2025-09-02 15:34:05] [Rank 0] step:9381/10000 train_time:737021ms step_avg:78.57ms +[2025-09-02 15:34:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:34:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:34:19] [Rank 0] PRINT: step:9400/10000 val_loss:3.6145 svd_entropy: attn_qk:H=0.7780,top10E=0.26,eRank=179.6,q75/q25=53.24 attn_vo:H=0.8539,top10E=0.13,eRank=314.6,q75/q25=33.21 mlp_w1:H=0.9131,top10E=0.13,eRank=434.0,q75/q25=4.32 mlp_w2:H=0.9709,top10E=0.04,eRank=633.1,q75/q25=2.87 vo_prod:H=0.7571,top10E=0.22,eRank=160.0,q75/q25=737.46 train_time:738786ms step_avg:78.59ms +[2025-09-02 15:34:19] [Rank 0] PRINT: step:9400/10000 val_loss:3.6145 svd_entropy: attn_qk:H=0.7780,top10E=0.26,eRank=179.6,q75/q25=53.24 attn_vo:H=0.8539,top10E=0.13,eRank=314.6,q75/q25=33.21 mlp_w1:H=0.9131,top10E=0.13,eRank=434.0,q75/q25=4.32 mlp_w2:H=0.9709,top10E=0.04,eRank=633.1,q75/q25=2.87 vo_prod:H=0.7571,top10E=0.22,eRank=160.0,q75/q25=737.46 train_time:738786ms step_avg:78.59ms +[2025-09-02 15:34:19] [Rank 0] step:9401/10000 train_time:738799ms step_avg:78.59ms +[2025-09-02 15:34:19] [Rank 0] step:9401/10000 train_time:738799ms step_avg:78.59ms +[2025-09-02 15:34:21] [Rank 0] step:9421/10000 train_time:740408ms step_avg:78.59ms +[2025-09-02 15:34:21] [Rank 0] step:9421/10000 train_time:740408ms step_avg:78.59ms +[2025-09-02 15:34:22] [Rank 0] step:9441/10000 train_time:742079ms step_avg:78.60ms +[2025-09-02 15:34:22] [Rank 0] step:9441/10000 train_time:742079ms step_avg:78.60ms +[2025-09-02 15:34:24] [Rank 0] step:9461/10000 train_time:743754ms step_avg:78.61ms +[2025-09-02 15:34:24] [Rank 0] step:9461/10000 train_time:743754ms step_avg:78.61ms +[2025-09-02 15:34:26] [Rank 0] step:9481/10000 train_time:745426ms step_avg:78.62ms +[2025-09-02 15:34:26] [Rank 0] step:9481/10000 train_time:745426ms step_avg:78.62ms +[2025-09-02 15:34:27] [Rank 0] step:9501/10000 train_time:747114ms step_avg:78.64ms +[2025-09-02 15:34:27] [Rank 0] step:9501/10000 train_time:747114ms step_avg:78.64ms +[2025-09-02 15:34:29] [Rank 0] step:9521/10000 train_time:748779ms step_avg:78.65ms +[2025-09-02 15:34:29] [Rank 0] step:9521/10000 train_time:748779ms step_avg:78.65ms +[2025-09-02 15:34:31] [Rank 0] step:9541/10000 train_time:750453ms step_avg:78.66ms +[2025-09-02 15:34:31] [Rank 0] step:9541/10000 train_time:750453ms step_avg:78.66ms +[2025-09-02 15:34:32] [Rank 0] step:9561/10000 train_time:752121ms step_avg:78.67ms +[2025-09-02 15:34:32] [Rank 0] step:9561/10000 train_time:752121ms step_avg:78.67ms +[2025-09-02 15:34:34] [Rank 0] step:9581/10000 train_time:753797ms step_avg:78.68ms +[2025-09-02 15:34:34] [Rank 0] step:9581/10000 train_time:753797ms step_avg:78.68ms +[2025-09-02 15:34:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:34:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:34:47] [Rank 0] PRINT: step:9600/10000 val_loss:3.6085 svd_entropy: attn_qk:H=0.7781,top10E=0.26,eRank=179.8,q75/q25=53.06 attn_vo:H=0.8541,top10E=0.13,eRank=314.9,q75/q25=33.07 mlp_w1:H=0.9133,top10E=0.13,eRank=434.5,q75/q25=4.31 mlp_w2:H=0.9709,top10E=0.04,eRank=633.1,q75/q25=2.88 vo_prod:H=0.7575,top10E=0.22,eRank=160.4,q75/q25=726.30 train_time:755570ms step_avg:78.71ms +[2025-09-02 15:34:47] [Rank 0] PRINT: step:9600/10000 val_loss:3.6085 svd_entropy: attn_qk:H=0.7781,top10E=0.26,eRank=179.8,q75/q25=53.06 attn_vo:H=0.8541,top10E=0.13,eRank=314.9,q75/q25=33.07 mlp_w1:H=0.9133,top10E=0.13,eRank=434.5,q75/q25=4.31 mlp_w2:H=0.9709,top10E=0.04,eRank=633.1,q75/q25=2.88 vo_prod:H=0.7575,top10E=0.22,eRank=160.4,q75/q25=726.30 train_time:755570ms step_avg:78.71ms +[2025-09-02 15:34:48] [Rank 0] step:9601/10000 train_time:755583ms step_avg:78.70ms +[2025-09-02 15:34:48] [Rank 0] step:9601/10000 train_time:755583ms step_avg:78.70ms +[2025-09-02 15:34:49] [Rank 0] step:9621/10000 train_time:757182ms step_avg:78.70ms +[2025-09-02 15:34:49] [Rank 0] step:9621/10000 train_time:757182ms step_avg:78.70ms +[2025-09-02 15:34:51] [Rank 0] step:9641/10000 train_time:758862ms step_avg:78.71ms +[2025-09-02 15:34:51] [Rank 0] step:9641/10000 train_time:758862ms step_avg:78.71ms +[2025-09-02 15:34:53] [Rank 0] step:9661/10000 train_time:760568ms step_avg:78.73ms +[2025-09-02 15:34:53] [Rank 0] step:9661/10000 train_time:760568ms step_avg:78.73ms +[2025-09-02 15:34:54] [Rank 0] step:9681/10000 train_time:762265ms step_avg:78.74ms +[2025-09-02 15:34:54] [Rank 0] step:9681/10000 train_time:762265ms step_avg:78.74ms +[2025-09-02 15:34:56] [Rank 0] step:9701/10000 train_time:763978ms step_avg:78.75ms +[2025-09-02 15:34:56] [Rank 0] step:9701/10000 train_time:763978ms step_avg:78.75ms +[2025-09-02 15:34:58] [Rank 0] step:9721/10000 train_time:765671ms step_avg:78.76ms +[2025-09-02 15:34:58] [Rank 0] step:9721/10000 train_time:765671ms step_avg:78.76ms +[2025-09-02 15:34:59] [Rank 0] step:9741/10000 train_time:767390ms step_avg:78.78ms +[2025-09-02 15:34:59] [Rank 0] step:9741/10000 train_time:767390ms step_avg:78.78ms +[2025-09-02 15:35:01] [Rank 0] step:9761/10000 train_time:769097ms step_avg:78.79ms +[2025-09-02 15:35:01] [Rank 0] step:9761/10000 train_time:769097ms step_avg:78.79ms +[2025-09-02 15:35:03] [Rank 0] step:9781/10000 train_time:770805ms step_avg:78.81ms +[2025-09-02 15:35:03] [Rank 0] step:9781/10000 train_time:770805ms step_avg:78.81ms +[2025-09-02 15:35:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:35:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:35:17] [Rank 0] PRINT: step:9800/10000 val_loss:3.6015 svd_entropy: attn_qk:H=0.7782,top10E=0.26,eRank=179.8,q75/q25=53.03 attn_vo:H=0.8542,top10E=0.13,eRank=315.2,q75/q25=32.99 mlp_w1:H=0.9134,top10E=0.13,eRank=434.8,q75/q25=4.30 mlp_w2:H=0.9709,top10E=0.04,eRank=633.1,q75/q25=2.88 vo_prod:H=0.7578,top10E=0.22,eRank=160.7,q75/q25=724.80 train_time:772611ms step_avg:78.84ms +[2025-09-02 15:35:17] [Rank 0] PRINT: step:9800/10000 val_loss:3.6015 svd_entropy: attn_qk:H=0.7782,top10E=0.26,eRank=179.8,q75/q25=53.03 attn_vo:H=0.8542,top10E=0.13,eRank=315.2,q75/q25=32.99 mlp_w1:H=0.9134,top10E=0.13,eRank=434.8,q75/q25=4.30 mlp_w2:H=0.9709,top10E=0.04,eRank=633.1,q75/q25=2.88 vo_prod:H=0.7578,top10E=0.22,eRank=160.7,q75/q25=724.80 train_time:772611ms step_avg:78.84ms +[2025-09-02 15:35:17] [Rank 0] step:9801/10000 train_time:772624ms step_avg:78.83ms +[2025-09-02 15:35:17] [Rank 0] step:9801/10000 train_time:772624ms step_avg:78.83ms +[2025-09-02 15:35:18] [Rank 0] step:9821/10000 train_time:774242ms step_avg:78.84ms +[2025-09-02 15:35:18] [Rank 0] step:9821/10000 train_time:774242ms step_avg:78.84ms +[2025-09-02 15:35:20] [Rank 0] step:9841/10000 train_time:775951ms step_avg:78.85ms +[2025-09-02 15:35:20] [Rank 0] step:9841/10000 train_time:775951ms step_avg:78.85ms +[2025-09-02 15:35:22] [Rank 0] step:9861/10000 train_time:777638ms step_avg:78.86ms +[2025-09-02 15:35:22] [Rank 0] step:9861/10000 train_time:777638ms step_avg:78.86ms +[2025-09-02 15:35:23] [Rank 0] step:9881/10000 train_time:779324ms step_avg:78.87ms +[2025-09-02 15:35:23] [Rank 0] step:9881/10000 train_time:779324ms step_avg:78.87ms +[2025-09-02 15:35:25] [Rank 0] step:9901/10000 train_time:781021ms step_avg:78.88ms +[2025-09-02 15:35:25] [Rank 0] step:9901/10000 train_time:781021ms step_avg:78.88ms +[2025-09-02 15:35:27] [Rank 0] step:9921/10000 train_time:782718ms step_avg:78.90ms +[2025-09-02 15:35:27] [Rank 0] step:9921/10000 train_time:782718ms step_avg:78.90ms +[2025-09-02 15:35:29] [Rank 0] step:9941/10000 train_time:784419ms step_avg:78.91ms +[2025-09-02 15:35:29] [Rank 0] step:9941/10000 train_time:784419ms step_avg:78.91ms +[2025-09-02 15:35:30] [Rank 0] step:9961/10000 train_time:786121ms step_avg:78.92ms +[2025-09-02 15:35:30] [Rank 0] step:9961/10000 train_time:786121ms step_avg:78.92ms +[2025-09-02 15:35:32] [Rank 0] step:9981/10000 train_time:787817ms step_avg:78.93ms +[2025-09-02 15:35:32] [Rank 0] step:9981/10000 train_time:787817ms step_avg:78.93ms +[2025-09-02 15:35:34] [Rank 0] step:10000/10000 train_time:789437ms step_avg:78.94ms +[2025-09-02 15:35:34] [Rank 0] step:10000/10000 train_time:789437ms step_avg:78.94ms +[2025-09-02 15:35:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:35:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:35:45] [Rank 0] PRINT: step:10000/10000 val_loss:3.5958 svd_entropy: attn_qk:H=0.7783,top10E=0.26,eRank=179.9,q75/q25=52.99 attn_vo:H=0.8543,top10E=0.13,eRank=315.3,q75/q25=32.94 mlp_w1:H=0.9135,top10E=0.13,eRank=435.0,q75/q25=4.30 mlp_w2:H=0.9709,top10E=0.04,eRank=633.1,q75/q25=2.87 vo_prod:H=0.7580,top10E=0.22,eRank=160.9,q75/q25=718.41 train_time:789617ms step_avg:78.96ms +[2025-09-02 15:35:45] [Rank 0] PRINT: step:10000/10000 val_loss:3.5958 svd_entropy: attn_qk:H=0.7783,top10E=0.26,eRank=179.9,q75/q25=52.99 attn_vo:H=0.8543,top10E=0.13,eRank=315.3,q75/q25=32.94 mlp_w1:H=0.9135,top10E=0.13,eRank=435.0,q75/q25=4.30 mlp_w2:H=0.9709,top10E=0.04,eRank=633.1,q75/q25=2.87 vo_prod:H=0.7580,top10E=0.22,eRank=160.9,q75/q25=718.41 train_time:789617ms step_avg:78.96ms +[2025-09-02 15:35:45] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 15:35:45 2025 --- +[2025-09-02 15:35:45] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 15:35:45 2025 --- +[2025-09-02 15:35:45] [Rank 0] PRINT: Peak memory allocated: 10086 MiB reserved: 15116 MiB +[2025-09-02 15:35:45] [Rank 0] PRINT: Peak memory allocated: 10086 MiB reserved: 15116 MiB diff --git a/logs_svd_qkvo/mode_13_param_qkvo_seed_48/config.json b/logs_svd_qkvo/mode_13_param_qkvo_seed_48/config.json new file mode 100644 index 0000000000000000000000000000000000000000..d6546fd913cb288eda2f91e016cc0332e178fa96 --- /dev/null +++ b/logs_svd_qkvo/mode_13_param_qkvo_seed_48/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 48, + "optimizer_mode": 13, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "42158a0f-7b2e-4cd4-b9a3-2bc42bf296cb", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_13_param_qkvo_seed_48/training_log_42158a0f-7b2e-4cd4-b9a3-2bc42bf296cb.txt b/logs_svd_qkvo/mode_13_param_qkvo_seed_48/training_log_42158a0f-7b2e-4cd4-b9a3-2bc42bf296cb.txt new file mode 100644 index 0000000000000000000000000000000000000000..aec49fbc44489e21973808d41f456d0b8b0805ef --- /dev/null +++ b/logs_svd_qkvo/mode_13_param_qkvo_seed_48/training_log_42158a0f-7b2e-4cd4-b9a3-2bc42bf296cb.txt @@ -0,0 +1,2984 @@ +[2025-09-02 16:25:25] [Rank 0] PRINT: --- Script Start: Tue Sep 2 16:25:25 2025 --- +[2025-09-02 16:25:25] [Rank 0] PRINT: --- Script Start: Tue Sep 2 16:25:25 2025 --- +[2025-09-02 16:25:25] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=13, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 16:25:25] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=13, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 16:25:25] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 16:25:25] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 16:25:25] [Rank 0] PRINT: Using fixed seed: 48 +[2025-09-02 16:25:25] [Rank 0] PRINT: Using fixed seed: 48 +[2025-09-02 16:25:25] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_13_param_qkvo_seed_48 +[2025-09-02 16:25:25] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_13_param_qkvo_seed_48 +[2025-09-02 16:25:25] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 16:25:25] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 16:25:25] [Rank 0] PRINT: Constructing model... +[2025-09-02 16:25:25] [Rank 0] PRINT: Constructing model... +[2025-09-02 16:25:27] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 16:25:27] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 16:25:27] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 16:25:27] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 16:25:27] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 16:25:27] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 16:25:27] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 13 +[2025-09-02 16:25:27] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 13 +[2025-09-02 16:25:27] [Rank 0] PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: 0.008). +[2025-09-02 16:25:27] [Rank 0] PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: 0.008). +[2025-09-02 16:25:27] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 16:25:27] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 16:25:27] [Rank 0] PRINT: Muon optimizer is active with 23 parameters. +[2025-09-02 16:25:27] [Rank 0] PRINT: Muon optimizer is active with 23 parameters. +[2025-09-02 16:25:27] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 16:25:27] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 16:25:27] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 16:25:27] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 16:25:27] [Rank 0] PRINT: Starting warmup... +[2025-09-02 16:25:27] [Rank 0] PRINT: Starting warmup... +[2025-09-02 16:26:10] [Rank 0] PRINT: Warmup complete. +[2025-09-02 16:26:10] [Rank 0] PRINT: Warmup complete. +[2025-09-02 16:26:10] [Rank 0] PRINT: Starting training... +[2025-09-02 16:26:10] [Rank 0] PRINT: Starting training... +[2025-09-02 16:26:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:26:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:26:26] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.25 attn_vo:H=0.4624,top10E=0.02,eRank=233.0,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 16:26:26] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.25 attn_vo:H=0.4624,top10E=0.02,eRank=233.0,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 16:26:28] [Rank 0] step:21/10000 train_time:1414ms step_avg:67.35ms +[2025-09-02 16:26:28] [Rank 0] step:21/10000 train_time:1414ms step_avg:67.35ms +[2025-09-02 16:26:29] [Rank 0] step:41/10000 train_time:2874ms step_avg:70.10ms +[2025-09-02 16:26:29] [Rank 0] step:41/10000 train_time:2874ms step_avg:70.10ms +[2025-09-02 16:26:31] [Rank 0] step:61/10000 train_time:4335ms step_avg:71.07ms +[2025-09-02 16:26:31] [Rank 0] step:61/10000 train_time:4335ms step_avg:71.07ms +[2025-09-02 16:26:32] [Rank 0] step:81/10000 train_time:5797ms step_avg:71.56ms +[2025-09-02 16:26:32] [Rank 0] step:81/10000 train_time:5797ms step_avg:71.56ms +[2025-09-02 16:26:34] [Rank 0] step:101/10000 train_time:7258ms step_avg:71.86ms +[2025-09-02 16:26:34] [Rank 0] step:101/10000 train_time:7258ms step_avg:71.86ms +[2025-09-02 16:26:35] [Rank 0] step:121/10000 train_time:8719ms step_avg:72.06ms +[2025-09-02 16:26:35] [Rank 0] step:121/10000 train_time:8719ms step_avg:72.06ms +[2025-09-02 16:26:37] [Rank 0] step:141/10000 train_time:10184ms step_avg:72.22ms +[2025-09-02 16:26:37] [Rank 0] step:141/10000 train_time:10184ms step_avg:72.22ms +[2025-09-02 16:26:38] [Rank 0] step:161/10000 train_time:11646ms step_avg:72.33ms +[2025-09-02 16:26:38] [Rank 0] step:161/10000 train_time:11646ms step_avg:72.33ms +[2025-09-02 16:26:40] [Rank 0] step:181/10000 train_time:13163ms step_avg:72.72ms +[2025-09-02 16:26:40] [Rank 0] step:181/10000 train_time:13163ms step_avg:72.72ms +[2025-09-02 16:26:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:26:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:26:53] [Rank 0] PRINT: step:200/10000 val_loss:6.2420 svd_entropy: attn_qk:H=0.5687,top10E=0.61,eRank=56.4,q75/q25=13.07 attn_vo:H=0.5360,top10E=0.61,eRank=64.2,q75/q25=18.48 mlp_w1:H=0.6549,top10E=0.53,eRank=90.2,q75/q25=2.98 mlp_w2:H=0.7967,top10E=0.18,eRank=202.0,q75/q25=17.19 vo_prod:H=0.3588,top10E=0.88,eRank=15.9,q75/q25=122.99 train_time:14699ms step_avg:73.50ms +[2025-09-02 16:26:53] [Rank 0] PRINT: step:200/10000 val_loss:6.2420 svd_entropy: attn_qk:H=0.5687,top10E=0.61,eRank=56.4,q75/q25=13.07 attn_vo:H=0.5360,top10E=0.61,eRank=64.2,q75/q25=18.48 mlp_w1:H=0.6549,top10E=0.53,eRank=90.2,q75/q25=2.98 mlp_w2:H=0.7967,top10E=0.18,eRank=202.0,q75/q25=17.19 vo_prod:H=0.3588,top10E=0.88,eRank=15.9,q75/q25=122.99 train_time:14699ms step_avg:73.50ms +[2025-09-02 16:26:53] [Rank 0] step:201/10000 train_time:14714ms step_avg:73.21ms +[2025-09-02 16:26:53] [Rank 0] step:201/10000 train_time:14714ms step_avg:73.21ms +[2025-09-02 16:26:54] [Rank 0] step:221/10000 train_time:16130ms step_avg:72.99ms +[2025-09-02 16:26:54] [Rank 0] step:221/10000 train_time:16130ms step_avg:72.99ms +[2025-09-02 16:26:56] [Rank 0] step:241/10000 train_time:17589ms step_avg:72.98ms +[2025-09-02 16:26:56] [Rank 0] step:241/10000 train_time:17589ms step_avg:72.98ms +[2025-09-02 16:26:57] [Rank 0] step:261/10000 train_time:19048ms step_avg:72.98ms +[2025-09-02 16:26:57] [Rank 0] step:261/10000 train_time:19048ms step_avg:72.98ms +[2025-09-02 16:26:59] [Rank 0] step:281/10000 train_time:20506ms step_avg:72.98ms +[2025-09-02 16:26:59] [Rank 0] step:281/10000 train_time:20506ms step_avg:72.98ms +[2025-09-02 16:27:00] [Rank 0] step:301/10000 train_time:21968ms step_avg:72.98ms +[2025-09-02 16:27:00] [Rank 0] step:301/10000 train_time:21968ms step_avg:72.98ms +[2025-09-02 16:27:02] [Rank 0] step:321/10000 train_time:23427ms step_avg:72.98ms +[2025-09-02 16:27:02] [Rank 0] step:321/10000 train_time:23427ms step_avg:72.98ms +[2025-09-02 16:27:03] [Rank 0] step:341/10000 train_time:24887ms step_avg:72.98ms +[2025-09-02 16:27:03] [Rank 0] step:341/10000 train_time:24887ms step_avg:72.98ms +[2025-09-02 16:27:05] [Rank 0] step:361/10000 train_time:26348ms step_avg:72.99ms +[2025-09-02 16:27:05] [Rank 0] step:361/10000 train_time:26348ms step_avg:72.99ms +[2025-09-02 16:27:06] [Rank 0] step:381/10000 train_time:27808ms step_avg:72.99ms +[2025-09-02 16:27:06] [Rank 0] step:381/10000 train_time:27808ms step_avg:72.99ms +[2025-09-02 16:27:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:27:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:27:19] [Rank 0] PRINT: step:400/10000 val_loss:5.7298 svd_entropy: attn_qk:H=0.6121,top10E=0.50,eRank=71.4,q75/q25=15.91 attn_vo:H=0.6135,top10E=0.45,eRank=90.5,q75/q25=25.36 mlp_w1:H=0.6853,top10E=0.41,eRank=113.0,q75/q25=4.54 mlp_w2:H=0.9255,top10E=0.07,eRank=469.4,q75/q25=6.78 vo_prod:H=0.4627,top10E=0.73,eRank=25.8,q75/q25=223.87 train_time:29343ms step_avg:73.36ms +[2025-09-02 16:27:19] [Rank 0] PRINT: step:400/10000 val_loss:5.7298 svd_entropy: attn_qk:H=0.6121,top10E=0.50,eRank=71.4,q75/q25=15.91 attn_vo:H=0.6135,top10E=0.45,eRank=90.5,q75/q25=25.36 mlp_w1:H=0.6853,top10E=0.41,eRank=113.0,q75/q25=4.54 mlp_w2:H=0.9255,top10E=0.07,eRank=469.4,q75/q25=6.78 vo_prod:H=0.4627,top10E=0.73,eRank=25.8,q75/q25=223.87 train_time:29343ms step_avg:73.36ms +[2025-09-02 16:27:19] [Rank 0] step:401/10000 train_time:29358ms step_avg:73.21ms +[2025-09-02 16:27:19] [Rank 0] step:401/10000 train_time:29358ms step_avg:73.21ms +[2025-09-02 16:27:21] [Rank 0] step:421/10000 train_time:30751ms step_avg:73.04ms +[2025-09-02 16:27:21] [Rank 0] step:421/10000 train_time:30751ms step_avg:73.04ms +[2025-09-02 16:27:22] [Rank 0] step:441/10000 train_time:32210ms step_avg:73.04ms +[2025-09-02 16:27:22] [Rank 0] step:441/10000 train_time:32210ms step_avg:73.04ms +[2025-09-02 16:27:24] [Rank 0] step:461/10000 train_time:33668ms step_avg:73.03ms +[2025-09-02 16:27:24] [Rank 0] step:461/10000 train_time:33668ms step_avg:73.03ms +[2025-09-02 16:27:25] [Rank 0] step:481/10000 train_time:35127ms step_avg:73.03ms +[2025-09-02 16:27:25] [Rank 0] step:481/10000 train_time:35127ms step_avg:73.03ms +[2025-09-02 16:27:27] [Rank 0] step:501/10000 train_time:36586ms step_avg:73.03ms +[2025-09-02 16:27:27] [Rank 0] step:501/10000 train_time:36586ms step_avg:73.03ms +[2025-09-02 16:27:28] [Rank 0] step:521/10000 train_time:38045ms step_avg:73.02ms +[2025-09-02 16:27:28] [Rank 0] step:521/10000 train_time:38045ms step_avg:73.02ms +[2025-09-02 16:27:30] [Rank 0] step:541/10000 train_time:39504ms step_avg:73.02ms +[2025-09-02 16:27:30] [Rank 0] step:541/10000 train_time:39504ms step_avg:73.02ms +[2025-09-02 16:27:31] [Rank 0] step:561/10000 train_time:40963ms step_avg:73.02ms +[2025-09-02 16:27:31] [Rank 0] step:561/10000 train_time:40963ms step_avg:73.02ms +[2025-09-02 16:27:33] [Rank 0] step:581/10000 train_time:42422ms step_avg:73.02ms +[2025-09-02 16:27:33] [Rank 0] step:581/10000 train_time:42422ms step_avg:73.02ms +[2025-09-02 16:27:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:27:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:27:46] [Rank 0] PRINT: step:600/10000 val_loss:5.4335 svd_entropy: attn_qk:H=0.6441,top10E=0.44,eRank=84.3,q75/q25=20.99 attn_vo:H=0.6623,top10E=0.38,eRank=115.2,q75/q25=34.62 mlp_w1:H=0.7279,top10E=0.35,eRank=143.2,q75/q25=6.33 mlp_w2:H=0.9471,top10E=0.05,eRank=541.2,q75/q25=4.63 vo_prod:H=0.5225,top10E=0.61,eRank=36.0,q75/q25=448.95 train_time:43957ms step_avg:73.26ms +[2025-09-02 16:27:46] [Rank 0] PRINT: step:600/10000 val_loss:5.4335 svd_entropy: attn_qk:H=0.6441,top10E=0.44,eRank=84.3,q75/q25=20.99 attn_vo:H=0.6623,top10E=0.38,eRank=115.2,q75/q25=34.62 mlp_w1:H=0.7279,top10E=0.35,eRank=143.2,q75/q25=6.33 mlp_w2:H=0.9471,top10E=0.05,eRank=541.2,q75/q25=4.63 vo_prod:H=0.5225,top10E=0.61,eRank=36.0,q75/q25=448.95 train_time:43957ms step_avg:73.26ms +[2025-09-02 16:27:46] [Rank 0] step:601/10000 train_time:43971ms step_avg:73.16ms +[2025-09-02 16:27:46] [Rank 0] step:601/10000 train_time:43971ms step_avg:73.16ms +[2025-09-02 16:27:47] [Rank 0] step:621/10000 train_time:45379ms step_avg:73.07ms +[2025-09-02 16:27:47] [Rank 0] step:621/10000 train_time:45379ms step_avg:73.07ms +[2025-09-02 16:27:49] [Rank 0] step:641/10000 train_time:46837ms step_avg:73.07ms +[2025-09-02 16:27:49] [Rank 0] step:641/10000 train_time:46837ms step_avg:73.07ms +[2025-09-02 16:27:50] [Rank 0] step:661/10000 train_time:48296ms step_avg:73.06ms +[2025-09-02 16:27:50] [Rank 0] step:661/10000 train_time:48296ms step_avg:73.06ms +[2025-09-02 16:27:52] [Rank 0] step:681/10000 train_time:49760ms step_avg:73.07ms +[2025-09-02 16:27:52] [Rank 0] step:681/10000 train_time:49760ms step_avg:73.07ms +[2025-09-02 16:27:53] [Rank 0] step:701/10000 train_time:51218ms step_avg:73.06ms +[2025-09-02 16:27:53] [Rank 0] step:701/10000 train_time:51218ms step_avg:73.06ms +[2025-09-02 16:27:55] [Rank 0] step:721/10000 train_time:52676ms step_avg:73.06ms +[2025-09-02 16:27:55] [Rank 0] step:721/10000 train_time:52676ms step_avg:73.06ms +[2025-09-02 16:27:56] [Rank 0] step:741/10000 train_time:54136ms step_avg:73.06ms +[2025-09-02 16:27:56] [Rank 0] step:741/10000 train_time:54136ms step_avg:73.06ms +[2025-09-02 16:27:58] [Rank 0] step:761/10000 train_time:55607ms step_avg:73.07ms +[2025-09-02 16:27:58] [Rank 0] step:761/10000 train_time:55607ms step_avg:73.07ms +[2025-09-02 16:27:59] [Rank 0] step:781/10000 train_time:57081ms step_avg:73.09ms +[2025-09-02 16:27:59] [Rank 0] step:781/10000 train_time:57081ms step_avg:73.09ms +[2025-09-02 16:28:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:28:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:28:13] [Rank 0] PRINT: step:800/10000 val_loss:5.2047 svd_entropy: attn_qk:H=0.6661,top10E=0.40,eRank=94.7,q75/q25=28.07 attn_vo:H=0.6947,top10E=0.33,eRank=135.8,q75/q25=43.64 mlp_w1:H=0.7597,top10E=0.31,eRank=170.7,q75/q25=7.26 mlp_w2:H=0.9543,top10E=0.05,eRank=567.3,q75/q25=4.04 vo_prod:H=0.5625,top10E=0.53,eRank=45.7,q75/q25=950.27 train_time:58628ms step_avg:73.29ms +[2025-09-02 16:28:13] [Rank 0] PRINT: step:800/10000 val_loss:5.2047 svd_entropy: attn_qk:H=0.6661,top10E=0.40,eRank=94.7,q75/q25=28.07 attn_vo:H=0.6947,top10E=0.33,eRank=135.8,q75/q25=43.64 mlp_w1:H=0.7597,top10E=0.31,eRank=170.7,q75/q25=7.26 mlp_w2:H=0.9543,top10E=0.05,eRank=567.3,q75/q25=4.04 vo_prod:H=0.5625,top10E=0.53,eRank=45.7,q75/q25=950.27 train_time:58628ms step_avg:73.29ms +[2025-09-02 16:28:13] [Rank 0] step:801/10000 train_time:58643ms step_avg:73.21ms +[2025-09-02 16:28:13] [Rank 0] step:801/10000 train_time:58643ms step_avg:73.21ms +[2025-09-02 16:28:14] [Rank 0] step:821/10000 train_time:60039ms step_avg:73.13ms +[2025-09-02 16:28:14] [Rank 0] step:821/10000 train_time:60039ms step_avg:73.13ms +[2025-09-02 16:28:16] [Rank 0] step:841/10000 train_time:61509ms step_avg:73.14ms +[2025-09-02 16:28:16] [Rank 0] step:841/10000 train_time:61509ms step_avg:73.14ms +[2025-09-02 16:28:17] [Rank 0] step:861/10000 train_time:62980ms step_avg:73.15ms +[2025-09-02 16:28:17] [Rank 0] step:861/10000 train_time:62980ms step_avg:73.15ms +[2025-09-02 16:28:18] [Rank 0] step:881/10000 train_time:64451ms step_avg:73.16ms +[2025-09-02 16:28:18] [Rank 0] step:881/10000 train_time:64451ms step_avg:73.16ms +[2025-09-02 16:28:20] [Rank 0] step:901/10000 train_time:65923ms step_avg:73.17ms +[2025-09-02 16:28:20] [Rank 0] step:901/10000 train_time:65923ms step_avg:73.17ms +[2025-09-02 16:28:21] [Rank 0] step:921/10000 train_time:67396ms step_avg:73.18ms +[2025-09-02 16:28:21] [Rank 0] step:921/10000 train_time:67396ms step_avg:73.18ms +[2025-09-02 16:28:23] [Rank 0] step:941/10000 train_time:68869ms step_avg:73.19ms +[2025-09-02 16:28:23] [Rank 0] step:941/10000 train_time:68869ms step_avg:73.19ms +[2025-09-02 16:28:24] [Rank 0] step:961/10000 train_time:70341ms step_avg:73.20ms +[2025-09-02 16:28:24] [Rank 0] step:961/10000 train_time:70341ms step_avg:73.20ms +[2025-09-02 16:28:26] [Rank 0] step:981/10000 train_time:71813ms step_avg:73.20ms +[2025-09-02 16:28:26] [Rank 0] step:981/10000 train_time:71813ms step_avg:73.20ms +[2025-09-02 16:28:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:28:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:28:39] [Rank 0] PRINT: step:1000/10000 val_loss:5.0331 svd_entropy: attn_qk:H=0.6828,top10E=0.37,eRank=103.5,q75/q25=35.72 attn_vo:H=0.7180,top10E=0.29,eRank=153.6,q75/q25=50.57 mlp_w1:H=0.7835,top10E=0.28,eRank=195.4,q75/q25=7.49 mlp_w2:H=0.9596,top10E=0.05,eRank=587.5,q75/q25=3.65 vo_prod:H=0.5893,top10E=0.47,eRank=54.1,q75/q25=1756.54 train_time:73360ms step_avg:73.36ms +[2025-09-02 16:28:39] [Rank 0] PRINT: step:1000/10000 val_loss:5.0331 svd_entropy: attn_qk:H=0.6828,top10E=0.37,eRank=103.5,q75/q25=35.72 attn_vo:H=0.7180,top10E=0.29,eRank=153.6,q75/q25=50.57 mlp_w1:H=0.7835,top10E=0.28,eRank=195.4,q75/q25=7.49 mlp_w2:H=0.9596,top10E=0.05,eRank=587.5,q75/q25=3.65 vo_prod:H=0.5893,top10E=0.47,eRank=54.1,q75/q25=1756.54 train_time:73360ms step_avg:73.36ms +[2025-09-02 16:28:39] [Rank 0] step:1001/10000 train_time:73374ms step_avg:73.30ms +[2025-09-02 16:28:39] [Rank 0] step:1001/10000 train_time:73374ms step_avg:73.30ms +[2025-09-02 16:28:41] [Rank 0] step:1021/10000 train_time:74790ms step_avg:73.25ms +[2025-09-02 16:28:41] [Rank 0] step:1021/10000 train_time:74790ms step_avg:73.25ms +[2025-09-02 16:28:42] [Rank 0] step:1041/10000 train_time:76259ms step_avg:73.26ms +[2025-09-02 16:28:42] [Rank 0] step:1041/10000 train_time:76259ms step_avg:73.26ms +[2025-09-02 16:28:44] [Rank 0] step:1061/10000 train_time:77730ms step_avg:73.26ms +[2025-09-02 16:28:44] [Rank 0] step:1061/10000 train_time:77730ms step_avg:73.26ms +[2025-09-02 16:28:45] [Rank 0] step:1081/10000 train_time:79315ms step_avg:73.37ms +[2025-09-02 16:28:45] [Rank 0] step:1081/10000 train_time:79315ms step_avg:73.37ms +[2025-09-02 16:28:47] [Rank 0] step:1101/10000 train_time:80785ms step_avg:73.37ms +[2025-09-02 16:28:47] [Rank 0] step:1101/10000 train_time:80785ms step_avg:73.37ms +[2025-09-02 16:28:48] [Rank 0] step:1121/10000 train_time:82257ms step_avg:73.38ms +[2025-09-02 16:28:48] [Rank 0] step:1121/10000 train_time:82257ms step_avg:73.38ms +[2025-09-02 16:28:50] [Rank 0] step:1141/10000 train_time:83729ms step_avg:73.38ms +[2025-09-02 16:28:50] [Rank 0] step:1141/10000 train_time:83729ms step_avg:73.38ms +[2025-09-02 16:28:51] [Rank 0] step:1161/10000 train_time:85201ms step_avg:73.39ms +[2025-09-02 16:28:51] [Rank 0] step:1161/10000 train_time:85201ms step_avg:73.39ms +[2025-09-02 16:28:53] [Rank 0] step:1181/10000 train_time:86673ms step_avg:73.39ms +[2025-09-02 16:28:53] [Rank 0] step:1181/10000 train_time:86673ms step_avg:73.39ms +[2025-09-02 16:28:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:28:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:29:06] [Rank 0] PRINT: step:1200/10000 val_loss:4.8669 svd_entropy: attn_qk:H=0.6959,top10E=0.35,eRank=111.4,q75/q25=42.74 attn_vo:H=0.7397,top10E=0.27,eRank=172.6,q75/q25=55.96 mlp_w1:H=0.8019,top10E=0.26,eRank=217.5,q75/q25=7.38 mlp_w2:H=0.9630,top10E=0.04,eRank=600.9,q75/q25=3.41 vo_prod:H=0.6116,top10E=0.43,eRank=62.1,q75/q25=2890.77 train_time:88220ms step_avg:73.52ms +[2025-09-02 16:29:06] [Rank 0] PRINT: step:1200/10000 val_loss:4.8669 svd_entropy: attn_qk:H=0.6959,top10E=0.35,eRank=111.4,q75/q25=42.74 attn_vo:H=0.7397,top10E=0.27,eRank=172.6,q75/q25=55.96 mlp_w1:H=0.8019,top10E=0.26,eRank=217.5,q75/q25=7.38 mlp_w2:H=0.9630,top10E=0.04,eRank=600.9,q75/q25=3.41 vo_prod:H=0.6116,top10E=0.43,eRank=62.1,q75/q25=2890.77 train_time:88220ms step_avg:73.52ms +[2025-09-02 16:29:06] [Rank 0] step:1201/10000 train_time:88234ms step_avg:73.47ms +[2025-09-02 16:29:06] [Rank 0] step:1201/10000 train_time:88234ms step_avg:73.47ms +[2025-09-02 16:29:08] [Rank 0] step:1221/10000 train_time:89648ms step_avg:73.42ms +[2025-09-02 16:29:08] [Rank 0] step:1221/10000 train_time:89648ms step_avg:73.42ms +[2025-09-02 16:29:09] [Rank 0] step:1241/10000 train_time:91117ms step_avg:73.42ms +[2025-09-02 16:29:09] [Rank 0] step:1241/10000 train_time:91117ms step_avg:73.42ms +[2025-09-02 16:29:11] [Rank 0] step:1261/10000 train_time:92586ms step_avg:73.42ms +[2025-09-02 16:29:11] [Rank 0] step:1261/10000 train_time:92586ms step_avg:73.42ms +[2025-09-02 16:29:12] [Rank 0] step:1281/10000 train_time:94057ms step_avg:73.42ms +[2025-09-02 16:29:12] [Rank 0] step:1281/10000 train_time:94057ms step_avg:73.42ms +[2025-09-02 16:29:14] [Rank 0] step:1301/10000 train_time:95529ms step_avg:73.43ms +[2025-09-02 16:29:14] [Rank 0] step:1301/10000 train_time:95529ms step_avg:73.43ms +[2025-09-02 16:29:15] [Rank 0] step:1321/10000 train_time:97000ms step_avg:73.43ms +[2025-09-02 16:29:15] [Rank 0] step:1321/10000 train_time:97000ms step_avg:73.43ms +[2025-09-02 16:29:16] [Rank 0] step:1341/10000 train_time:98471ms step_avg:73.43ms +[2025-09-02 16:29:16] [Rank 0] step:1341/10000 train_time:98471ms step_avg:73.43ms +[2025-09-02 16:29:18] [Rank 0] step:1361/10000 train_time:99943ms step_avg:73.43ms +[2025-09-02 16:29:18] [Rank 0] step:1361/10000 train_time:99943ms step_avg:73.43ms +[2025-09-02 16:29:19] [Rank 0] step:1381/10000 train_time:101416ms step_avg:73.44ms +[2025-09-02 16:29:19] [Rank 0] step:1381/10000 train_time:101416ms step_avg:73.44ms +[2025-09-02 16:29:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:29:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:29:33] [Rank 0] PRINT: step:1400/10000 val_loss:4.7415 svd_entropy: attn_qk:H=0.7060,top10E=0.34,eRank=118.1,q75/q25=48.97 attn_vo:H=0.7565,top10E=0.25,eRank=189.2,q75/q25=63.41 mlp_w1:H=0.8168,top10E=0.24,eRank=237.5,q75/q25=7.18 mlp_w2:H=0.9651,top10E=0.04,eRank=609.4,q75/q25=3.27 vo_prod:H=0.6314,top10E=0.40,eRank=70.1,q75/q25=3907.47 train_time:102964ms step_avg:73.55ms +[2025-09-02 16:29:33] [Rank 0] PRINT: step:1400/10000 val_loss:4.7415 svd_entropy: attn_qk:H=0.7060,top10E=0.34,eRank=118.1,q75/q25=48.97 attn_vo:H=0.7565,top10E=0.25,eRank=189.2,q75/q25=63.41 mlp_w1:H=0.8168,top10E=0.24,eRank=237.5,q75/q25=7.18 mlp_w2:H=0.9651,top10E=0.04,eRank=609.4,q75/q25=3.27 vo_prod:H=0.6314,top10E=0.40,eRank=70.1,q75/q25=3907.47 train_time:102964ms step_avg:73.55ms +[2025-09-02 16:29:33] [Rank 0] step:1401/10000 train_time:102978ms step_avg:73.50ms +[2025-09-02 16:29:33] [Rank 0] step:1401/10000 train_time:102978ms step_avg:73.50ms +[2025-09-02 16:29:34] [Rank 0] step:1421/10000 train_time:104381ms step_avg:73.46ms +[2025-09-02 16:29:34] [Rank 0] step:1421/10000 train_time:104381ms step_avg:73.46ms +[2025-09-02 16:29:36] [Rank 0] step:1441/10000 train_time:105850ms step_avg:73.46ms +[2025-09-02 16:29:36] [Rank 0] step:1441/10000 train_time:105850ms step_avg:73.46ms +[2025-09-02 16:29:37] [Rank 0] step:1461/10000 train_time:107320ms step_avg:73.46ms +[2025-09-02 16:29:37] [Rank 0] step:1461/10000 train_time:107320ms step_avg:73.46ms +[2025-09-02 16:29:39] [Rank 0] step:1481/10000 train_time:108791ms step_avg:73.46ms +[2025-09-02 16:29:39] [Rank 0] step:1481/10000 train_time:108791ms step_avg:73.46ms +[2025-09-02 16:29:40] [Rank 0] step:1501/10000 train_time:110272ms step_avg:73.47ms +[2025-09-02 16:29:40] [Rank 0] step:1501/10000 train_time:110272ms step_avg:73.47ms +[2025-09-02 16:29:42] [Rank 0] step:1521/10000 train_time:111753ms step_avg:73.47ms +[2025-09-02 16:29:42] [Rank 0] step:1521/10000 train_time:111753ms step_avg:73.47ms +[2025-09-02 16:29:43] [Rank 0] step:1541/10000 train_time:113236ms step_avg:73.48ms +[2025-09-02 16:29:43] [Rank 0] step:1541/10000 train_time:113236ms step_avg:73.48ms +[2025-09-02 16:29:44] [Rank 0] step:1561/10000 train_time:114719ms step_avg:73.49ms +[2025-09-02 16:29:44] [Rank 0] step:1561/10000 train_time:114719ms step_avg:73.49ms +[2025-09-02 16:29:46] [Rank 0] step:1581/10000 train_time:116203ms step_avg:73.50ms +[2025-09-02 16:29:46] [Rank 0] step:1581/10000 train_time:116203ms step_avg:73.50ms +[2025-09-02 16:29:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:29:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:29:59] [Rank 0] PRINT: step:1600/10000 val_loss:4.6056 svd_entropy: attn_qk:H=0.7136,top10E=0.33,eRank=123.2,q75/q25=53.67 attn_vo:H=0.7687,top10E=0.23,eRank=201.7,q75/q25=65.34 mlp_w1:H=0.8289,top10E=0.23,eRank=255.5,q75/q25=6.92 mlp_w2:H=0.9665,top10E=0.04,eRank=615.0,q75/q25=3.18 vo_prod:H=0.6446,top10E=0.38,eRank=76.7,q75/q25=4541.32 train_time:117763ms step_avg:73.60ms +[2025-09-02 16:29:59] [Rank 0] PRINT: step:1600/10000 val_loss:4.6056 svd_entropy: attn_qk:H=0.7136,top10E=0.33,eRank=123.2,q75/q25=53.67 attn_vo:H=0.7687,top10E=0.23,eRank=201.7,q75/q25=65.34 mlp_w1:H=0.8289,top10E=0.23,eRank=255.5,q75/q25=6.92 mlp_w2:H=0.9665,top10E=0.04,eRank=615.0,q75/q25=3.18 vo_prod:H=0.6446,top10E=0.38,eRank=76.7,q75/q25=4541.32 train_time:117763ms step_avg:73.60ms +[2025-09-02 16:30:00] [Rank 0] step:1601/10000 train_time:117777ms step_avg:73.56ms +[2025-09-02 16:30:00] [Rank 0] step:1601/10000 train_time:117777ms step_avg:73.56ms +[2025-09-02 16:30:01] [Rank 0] step:1621/10000 train_time:119191ms step_avg:73.53ms +[2025-09-02 16:30:01] [Rank 0] step:1621/10000 train_time:119191ms step_avg:73.53ms +[2025-09-02 16:30:02] [Rank 0] step:1641/10000 train_time:120683ms step_avg:73.54ms +[2025-09-02 16:30:02] [Rank 0] step:1641/10000 train_time:120683ms step_avg:73.54ms +[2025-09-02 16:30:04] [Rank 0] step:1661/10000 train_time:122165ms step_avg:73.55ms +[2025-09-02 16:30:04] [Rank 0] step:1661/10000 train_time:122165ms step_avg:73.55ms +[2025-09-02 16:30:05] [Rank 0] step:1681/10000 train_time:123650ms step_avg:73.56ms +[2025-09-02 16:30:05] [Rank 0] step:1681/10000 train_time:123650ms step_avg:73.56ms +[2025-09-02 16:30:07] [Rank 0] step:1701/10000 train_time:125135ms step_avg:73.57ms +[2025-09-02 16:30:07] [Rank 0] step:1701/10000 train_time:125135ms step_avg:73.57ms +[2025-09-02 16:30:08] [Rank 0] step:1721/10000 train_time:126622ms step_avg:73.57ms +[2025-09-02 16:30:08] [Rank 0] step:1721/10000 train_time:126622ms step_avg:73.57ms +[2025-09-02 16:30:10] [Rank 0] step:1741/10000 train_time:128109ms step_avg:73.58ms +[2025-09-02 16:30:10] [Rank 0] step:1741/10000 train_time:128109ms step_avg:73.58ms +[2025-09-02 16:30:11] [Rank 0] step:1761/10000 train_time:129596ms step_avg:73.59ms +[2025-09-02 16:30:11] [Rank 0] step:1761/10000 train_time:129596ms step_avg:73.59ms +[2025-09-02 16:30:13] [Rank 0] step:1781/10000 train_time:131083ms step_avg:73.60ms +[2025-09-02 16:30:13] [Rank 0] step:1781/10000 train_time:131083ms step_avg:73.60ms +[2025-09-02 16:30:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:30:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:30:26] [Rank 0] PRINT: step:1800/10000 val_loss:4.4998 svd_entropy: attn_qk:H=0.7200,top10E=0.32,eRank=127.8,q75/q25=56.74 attn_vo:H=0.7783,top10E=0.22,eRank=212.1,q75/q25=65.96 mlp_w1:H=0.8388,top10E=0.22,eRank=271.3,q75/q25=6.65 mlp_w2:H=0.9674,top10E=0.04,eRank=618.8,q75/q25=3.13 vo_prod:H=0.6562,top10E=0.36,eRank=82.8,q75/q25=4794.60 train_time:132647ms step_avg:73.69ms +[2025-09-02 16:30:26] [Rank 0] PRINT: step:1800/10000 val_loss:4.4998 svd_entropy: attn_qk:H=0.7200,top10E=0.32,eRank=127.8,q75/q25=56.74 attn_vo:H=0.7783,top10E=0.22,eRank=212.1,q75/q25=65.96 mlp_w1:H=0.8388,top10E=0.22,eRank=271.3,q75/q25=6.65 mlp_w2:H=0.9674,top10E=0.04,eRank=618.8,q75/q25=3.13 vo_prod:H=0.6562,top10E=0.36,eRank=82.8,q75/q25=4794.60 train_time:132647ms step_avg:73.69ms +[2025-09-02 16:30:26] [Rank 0] step:1801/10000 train_time:132661ms step_avg:73.66ms +[2025-09-02 16:30:26] [Rank 0] step:1801/10000 train_time:132661ms step_avg:73.66ms +[2025-09-02 16:30:28] [Rank 0] step:1821/10000 train_time:134072ms step_avg:73.63ms +[2025-09-02 16:30:28] [Rank 0] step:1821/10000 train_time:134072ms step_avg:73.63ms +[2025-09-02 16:30:29] [Rank 0] step:1841/10000 train_time:135553ms step_avg:73.63ms +[2025-09-02 16:30:29] [Rank 0] step:1841/10000 train_time:135553ms step_avg:73.63ms +[2025-09-02 16:30:31] [Rank 0] step:1861/10000 train_time:137034ms step_avg:73.63ms +[2025-09-02 16:30:31] [Rank 0] step:1861/10000 train_time:137034ms step_avg:73.63ms +[2025-09-02 16:30:32] [Rank 0] step:1881/10000 train_time:138515ms step_avg:73.64ms +[2025-09-02 16:30:32] [Rank 0] step:1881/10000 train_time:138515ms step_avg:73.64ms +[2025-09-02 16:30:34] [Rank 0] step:1901/10000 train_time:139998ms step_avg:73.64ms +[2025-09-02 16:30:34] [Rank 0] step:1901/10000 train_time:139998ms step_avg:73.64ms +[2025-09-02 16:30:35] [Rank 0] step:1921/10000 train_time:141480ms step_avg:73.65ms +[2025-09-02 16:30:35] [Rank 0] step:1921/10000 train_time:141480ms step_avg:73.65ms +[2025-09-02 16:30:37] [Rank 0] step:1941/10000 train_time:142962ms step_avg:73.65ms +[2025-09-02 16:30:37] [Rank 0] step:1941/10000 train_time:142962ms step_avg:73.65ms +[2025-09-02 16:30:38] [Rank 0] step:1961/10000 train_time:144444ms step_avg:73.66ms +[2025-09-02 16:30:38] [Rank 0] step:1961/10000 train_time:144444ms step_avg:73.66ms +[2025-09-02 16:30:40] [Rank 0] step:1981/10000 train_time:145928ms step_avg:73.66ms +[2025-09-02 16:30:40] [Rank 0] step:1981/10000 train_time:145928ms step_avg:73.66ms +[2025-09-02 16:30:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:30:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:30:53] [Rank 0] PRINT: step:2000/10000 val_loss:4.4334 svd_entropy: attn_qk:H=0.7254,top10E=0.31,eRank=131.9,q75/q25=59.76 attn_vo:H=0.7863,top10E=0.21,eRank=221.0,q75/q25=66.22 mlp_w1:H=0.8469,top10E=0.21,eRank=285.0,q75/q25=6.42 mlp_w2:H=0.9681,top10E=0.04,eRank=621.5,q75/q25=3.09 vo_prod:H=0.6662,top10E=0.34,eRank=88.5,q75/q25=4757.86 train_time:147490ms step_avg:73.74ms +[2025-09-02 16:30:53] [Rank 0] PRINT: step:2000/10000 val_loss:4.4334 svd_entropy: attn_qk:H=0.7254,top10E=0.31,eRank=131.9,q75/q25=59.76 attn_vo:H=0.7863,top10E=0.21,eRank=221.0,q75/q25=66.22 mlp_w1:H=0.8469,top10E=0.21,eRank=285.0,q75/q25=6.42 mlp_w2:H=0.9681,top10E=0.04,eRank=621.5,q75/q25=3.09 vo_prod:H=0.6662,top10E=0.34,eRank=88.5,q75/q25=4757.86 train_time:147490ms step_avg:73.74ms +[2025-09-02 16:30:53] [Rank 0] step:2001/10000 train_time:147503ms step_avg:73.71ms +[2025-09-02 16:30:53] [Rank 0] step:2001/10000 train_time:147503ms step_avg:73.71ms +[2025-09-02 16:30:55] [Rank 0] step:2021/10000 train_time:148947ms step_avg:73.70ms +[2025-09-02 16:30:55] [Rank 0] step:2021/10000 train_time:148947ms step_avg:73.70ms +[2025-09-02 16:30:56] [Rank 0] step:2041/10000 train_time:150605ms step_avg:73.79ms +[2025-09-02 16:30:56] [Rank 0] step:2041/10000 train_time:150605ms step_avg:73.79ms +[2025-09-02 16:30:58] [Rank 0] step:2061/10000 train_time:152088ms step_avg:73.79ms +[2025-09-02 16:30:58] [Rank 0] step:2061/10000 train_time:152088ms step_avg:73.79ms +[2025-09-02 16:30:59] [Rank 0] step:2081/10000 train_time:153570ms step_avg:73.80ms +[2025-09-02 16:30:59] [Rank 0] step:2081/10000 train_time:153570ms step_avg:73.80ms +[2025-09-02 16:31:01] [Rank 0] step:2101/10000 train_time:155053ms step_avg:73.80ms +[2025-09-02 16:31:01] [Rank 0] step:2101/10000 train_time:155053ms step_avg:73.80ms +[2025-09-02 16:31:02] [Rank 0] step:2121/10000 train_time:156536ms step_avg:73.80ms +[2025-09-02 16:31:02] [Rank 0] step:2121/10000 train_time:156536ms step_avg:73.80ms +[2025-09-02 16:31:04] [Rank 0] step:2141/10000 train_time:158020ms step_avg:73.81ms +[2025-09-02 16:31:04] [Rank 0] step:2141/10000 train_time:158020ms step_avg:73.81ms +[2025-09-02 16:31:05] [Rank 0] step:2161/10000 train_time:159503ms step_avg:73.81ms +[2025-09-02 16:31:05] [Rank 0] step:2161/10000 train_time:159503ms step_avg:73.81ms +[2025-09-02 16:31:07] [Rank 0] step:2181/10000 train_time:160987ms step_avg:73.81ms +[2025-09-02 16:31:07] [Rank 0] step:2181/10000 train_time:160987ms step_avg:73.81ms +[2025-09-02 16:31:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:31:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:31:20] [Rank 0] PRINT: step:2200/10000 val_loss:4.3640 svd_entropy: attn_qk:H=0.7301,top10E=0.31,eRank=135.6,q75/q25=61.39 attn_vo:H=0.7927,top10E=0.20,eRank=228.5,q75/q25=65.69 mlp_w1:H=0.8536,top10E=0.20,eRank=297.0,q75/q25=6.19 mlp_w2:H=0.9686,top10E=0.04,eRank=623.4,q75/q25=3.06 vo_prod:H=0.6743,top10E=0.33,eRank=93.5,q75/q25=4676.69 train_time:162546ms step_avg:73.88ms +[2025-09-02 16:31:20] [Rank 0] PRINT: step:2200/10000 val_loss:4.3640 svd_entropy: attn_qk:H=0.7301,top10E=0.31,eRank=135.6,q75/q25=61.39 attn_vo:H=0.7927,top10E=0.20,eRank=228.5,q75/q25=65.69 mlp_w1:H=0.8536,top10E=0.20,eRank=297.0,q75/q25=6.19 mlp_w2:H=0.9686,top10E=0.04,eRank=623.4,q75/q25=3.06 vo_prod:H=0.6743,top10E=0.33,eRank=93.5,q75/q25=4676.69 train_time:162546ms step_avg:73.88ms +[2025-09-02 16:31:20] [Rank 0] step:2201/10000 train_time:162560ms step_avg:73.86ms +[2025-09-02 16:31:20] [Rank 0] step:2201/10000 train_time:162560ms step_avg:73.86ms +[2025-09-02 16:31:22] [Rank 0] step:2221/10000 train_time:163973ms step_avg:73.83ms +[2025-09-02 16:31:22] [Rank 0] step:2221/10000 train_time:163973ms step_avg:73.83ms +[2025-09-02 16:31:23] [Rank 0] step:2241/10000 train_time:165494ms step_avg:73.85ms +[2025-09-02 16:31:23] [Rank 0] step:2241/10000 train_time:165494ms step_avg:73.85ms +[2025-09-02 16:31:25] [Rank 0] step:2261/10000 train_time:167020ms step_avg:73.87ms +[2025-09-02 16:31:25] [Rank 0] step:2261/10000 train_time:167020ms step_avg:73.87ms +[2025-09-02 16:31:26] [Rank 0] step:2281/10000 train_time:168548ms step_avg:73.89ms +[2025-09-02 16:31:26] [Rank 0] step:2281/10000 train_time:168548ms step_avg:73.89ms +[2025-09-02 16:31:28] [Rank 0] step:2301/10000 train_time:170076ms step_avg:73.91ms +[2025-09-02 16:31:28] [Rank 0] step:2301/10000 train_time:170076ms step_avg:73.91ms +[2025-09-02 16:31:29] [Rank 0] step:2321/10000 train_time:171604ms step_avg:73.94ms +[2025-09-02 16:31:29] [Rank 0] step:2321/10000 train_time:171604ms step_avg:73.94ms +[2025-09-02 16:31:31] [Rank 0] step:2341/10000 train_time:173132ms step_avg:73.96ms +[2025-09-02 16:31:31] [Rank 0] step:2341/10000 train_time:173132ms step_avg:73.96ms +[2025-09-02 16:31:32] [Rank 0] step:2361/10000 train_time:174661ms step_avg:73.98ms +[2025-09-02 16:31:32] [Rank 0] step:2361/10000 train_time:174661ms step_avg:73.98ms +[2025-09-02 16:31:34] [Rank 0] step:2381/10000 train_time:176190ms step_avg:74.00ms +[2025-09-02 16:31:34] [Rank 0] step:2381/10000 train_time:176190ms step_avg:74.00ms +[2025-09-02 16:31:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:31:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:31:47] [Rank 0] PRINT: step:2400/10000 val_loss:4.2834 svd_entropy: attn_qk:H=0.7332,top10E=0.31,eRank=137.9,q75/q25=62.36 attn_vo:H=0.7978,top10E=0.19,eRank=234.8,q75/q25=64.82 mlp_w1:H=0.8595,top10E=0.20,eRank=308.2,q75/q25=5.99 mlp_w2:H=0.9689,top10E=0.04,eRank=624.6,q75/q25=3.05 vo_prod:H=0.6809,top10E=0.32,eRank=97.9,q75/q25=4550.76 train_time:177797ms step_avg:74.08ms +[2025-09-02 16:31:47] [Rank 0] PRINT: step:2400/10000 val_loss:4.2834 svd_entropy: attn_qk:H=0.7332,top10E=0.31,eRank=137.9,q75/q25=62.36 attn_vo:H=0.7978,top10E=0.19,eRank=234.8,q75/q25=64.82 mlp_w1:H=0.8595,top10E=0.20,eRank=308.2,q75/q25=5.99 mlp_w2:H=0.9689,top10E=0.04,eRank=624.6,q75/q25=3.05 vo_prod:H=0.6809,top10E=0.32,eRank=97.9,q75/q25=4550.76 train_time:177797ms step_avg:74.08ms +[2025-09-02 16:31:47] [Rank 0] step:2401/10000 train_time:177811ms step_avg:74.06ms +[2025-09-02 16:31:47] [Rank 0] step:2401/10000 train_time:177811ms step_avg:74.06ms +[2025-09-02 16:31:49] [Rank 0] step:2421/10000 train_time:179281ms step_avg:74.05ms +[2025-09-02 16:31:49] [Rank 0] step:2421/10000 train_time:179281ms step_avg:74.05ms +[2025-09-02 16:31:51] [Rank 0] step:2441/10000 train_time:180806ms step_avg:74.07ms +[2025-09-02 16:31:51] [Rank 0] step:2441/10000 train_time:180806ms step_avg:74.07ms +[2025-09-02 16:31:52] [Rank 0] step:2461/10000 train_time:182334ms step_avg:74.09ms +[2025-09-02 16:31:52] [Rank 0] step:2461/10000 train_time:182334ms step_avg:74.09ms +[2025-09-02 16:31:54] [Rank 0] step:2481/10000 train_time:183861ms step_avg:74.11ms +[2025-09-02 16:31:54] [Rank 0] step:2481/10000 train_time:183861ms step_avg:74.11ms +[2025-09-02 16:31:55] [Rank 0] step:2501/10000 train_time:185415ms step_avg:74.14ms +[2025-09-02 16:31:55] [Rank 0] step:2501/10000 train_time:185415ms step_avg:74.14ms +[2025-09-02 16:31:57] [Rank 0] step:2521/10000 train_time:186986ms step_avg:74.17ms +[2025-09-02 16:31:57] [Rank 0] step:2521/10000 train_time:186986ms step_avg:74.17ms +[2025-09-02 16:31:58] [Rank 0] step:2541/10000 train_time:188567ms step_avg:74.21ms +[2025-09-02 16:31:58] [Rank 0] step:2541/10000 train_time:188567ms step_avg:74.21ms +[2025-09-02 16:32:00] [Rank 0] step:2561/10000 train_time:190096ms step_avg:74.23ms +[2025-09-02 16:32:00] [Rank 0] step:2561/10000 train_time:190096ms step_avg:74.23ms +[2025-09-02 16:32:01] [Rank 0] step:2581/10000 train_time:191626ms step_avg:74.24ms +[2025-09-02 16:32:01] [Rank 0] step:2581/10000 train_time:191626ms step_avg:74.24ms +[2025-09-02 16:32:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:32:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:32:15] [Rank 0] PRINT: step:2600/10000 val_loss:4.2341 svd_entropy: attn_qk:H=0.7367,top10E=0.30,eRank=140.7,q75/q25=62.93 attn_vo:H=0.8027,top10E=0.19,eRank=240.6,q75/q25=63.97 mlp_w1:H=0.8646,top10E=0.19,eRank=318.2,q75/q25=5.83 mlp_w2:H=0.9691,top10E=0.04,eRank=625.5,q75/q25=3.03 vo_prod:H=0.6877,top10E=0.31,eRank=102.3,q75/q25=4414.85 train_time:193233ms step_avg:74.32ms +[2025-09-02 16:32:15] [Rank 0] PRINT: step:2600/10000 val_loss:4.2341 svd_entropy: attn_qk:H=0.7367,top10E=0.30,eRank=140.7,q75/q25=62.93 attn_vo:H=0.8027,top10E=0.19,eRank=240.6,q75/q25=63.97 mlp_w1:H=0.8646,top10E=0.19,eRank=318.2,q75/q25=5.83 mlp_w2:H=0.9691,top10E=0.04,eRank=625.5,q75/q25=3.03 vo_prod:H=0.6877,top10E=0.31,eRank=102.3,q75/q25=4414.85 train_time:193233ms step_avg:74.32ms +[2025-09-02 16:32:15] [Rank 0] step:2601/10000 train_time:193247ms step_avg:74.30ms +[2025-09-02 16:32:15] [Rank 0] step:2601/10000 train_time:193247ms step_avg:74.30ms +[2025-09-02 16:32:16] [Rank 0] step:2621/10000 train_time:194728ms step_avg:74.30ms +[2025-09-02 16:32:16] [Rank 0] step:2621/10000 train_time:194728ms step_avg:74.30ms +[2025-09-02 16:32:18] [Rank 0] step:2641/10000 train_time:196255ms step_avg:74.31ms +[2025-09-02 16:32:18] [Rank 0] step:2641/10000 train_time:196255ms step_avg:74.31ms +[2025-09-02 16:32:19] [Rank 0] step:2661/10000 train_time:197782ms step_avg:74.33ms +[2025-09-02 16:32:19] [Rank 0] step:2661/10000 train_time:197782ms step_avg:74.33ms +[2025-09-02 16:32:21] [Rank 0] step:2681/10000 train_time:199310ms step_avg:74.34ms +[2025-09-02 16:32:21] [Rank 0] step:2681/10000 train_time:199310ms step_avg:74.34ms +[2025-09-02 16:32:22] [Rank 0] step:2701/10000 train_time:200837ms step_avg:74.36ms +[2025-09-02 16:32:22] [Rank 0] step:2701/10000 train_time:200837ms step_avg:74.36ms +[2025-09-02 16:32:24] [Rank 0] step:2721/10000 train_time:202365ms step_avg:74.37ms +[2025-09-02 16:32:24] [Rank 0] step:2721/10000 train_time:202365ms step_avg:74.37ms +[2025-09-02 16:32:26] [Rank 0] step:2741/10000 train_time:203894ms step_avg:74.39ms +[2025-09-02 16:32:26] [Rank 0] step:2741/10000 train_time:203894ms step_avg:74.39ms +[2025-09-02 16:32:27] [Rank 0] step:2761/10000 train_time:205426ms step_avg:74.40ms +[2025-09-02 16:32:27] [Rank 0] step:2761/10000 train_time:205426ms step_avg:74.40ms +[2025-09-02 16:32:29] [Rank 0] step:2781/10000 train_time:206954ms step_avg:74.42ms +[2025-09-02 16:32:29] [Rank 0] step:2781/10000 train_time:206954ms step_avg:74.42ms +[2025-09-02 16:32:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:32:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:32:42] [Rank 0] PRINT: step:2800/10000 val_loss:4.1945 svd_entropy: attn_qk:H=0.7400,top10E=0.30,eRank=143.4,q75/q25=64.07 attn_vo:H=0.8069,top10E=0.18,eRank=245.9,q75/q25=63.22 mlp_w1:H=0.8693,top10E=0.19,eRank=327.7,q75/q25=5.67 mlp_w2:H=0.9693,top10E=0.04,eRank=626.2,q75/q25=3.02 vo_prod:H=0.6936,top10E=0.30,eRank=106.6,q75/q25=4269.51 train_time:208563ms step_avg:74.49ms +[2025-09-02 16:32:42] [Rank 0] PRINT: step:2800/10000 val_loss:4.1945 svd_entropy: attn_qk:H=0.7400,top10E=0.30,eRank=143.4,q75/q25=64.07 attn_vo:H=0.8069,top10E=0.18,eRank=245.9,q75/q25=63.22 mlp_w1:H=0.8693,top10E=0.19,eRank=327.7,q75/q25=5.67 mlp_w2:H=0.9693,top10E=0.04,eRank=626.2,q75/q25=3.02 vo_prod:H=0.6936,top10E=0.30,eRank=106.6,q75/q25=4269.51 train_time:208563ms step_avg:74.49ms +[2025-09-02 16:32:42] [Rank 0] step:2801/10000 train_time:208577ms step_avg:74.47ms +[2025-09-02 16:32:42] [Rank 0] step:2801/10000 train_time:208577ms step_avg:74.47ms +[2025-09-02 16:32:44] [Rank 0] step:2821/10000 train_time:210053ms step_avg:74.46ms +[2025-09-02 16:32:44] [Rank 0] step:2821/10000 train_time:210053ms step_avg:74.46ms +[2025-09-02 16:32:45] [Rank 0] step:2841/10000 train_time:211580ms step_avg:74.47ms +[2025-09-02 16:32:45] [Rank 0] step:2841/10000 train_time:211580ms step_avg:74.47ms +[2025-09-02 16:32:47] [Rank 0] step:2861/10000 train_time:213108ms step_avg:74.49ms +[2025-09-02 16:32:47] [Rank 0] step:2861/10000 train_time:213108ms step_avg:74.49ms +[2025-09-02 16:32:48] [Rank 0] step:2881/10000 train_time:214634ms step_avg:74.50ms +[2025-09-02 16:32:48] [Rank 0] step:2881/10000 train_time:214634ms step_avg:74.50ms +[2025-09-02 16:32:50] [Rank 0] step:2901/10000 train_time:216160ms step_avg:74.51ms +[2025-09-02 16:32:50] [Rank 0] step:2901/10000 train_time:216160ms step_avg:74.51ms +[2025-09-02 16:32:51] [Rank 0] step:2921/10000 train_time:217687ms step_avg:74.52ms +[2025-09-02 16:32:51] [Rank 0] step:2921/10000 train_time:217687ms step_avg:74.52ms +[2025-09-02 16:32:53] [Rank 0] step:2941/10000 train_time:219214ms step_avg:74.54ms +[2025-09-02 16:32:53] [Rank 0] step:2941/10000 train_time:219214ms step_avg:74.54ms +[2025-09-02 16:32:54] [Rank 0] step:2961/10000 train_time:220742ms step_avg:74.55ms +[2025-09-02 16:32:54] [Rank 0] step:2961/10000 train_time:220742ms step_avg:74.55ms +[2025-09-02 16:32:56] [Rank 0] step:2981/10000 train_time:222277ms step_avg:74.56ms +[2025-09-02 16:32:56] [Rank 0] step:2981/10000 train_time:222277ms step_avg:74.56ms +[2025-09-02 16:32:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:32:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:33:09] [Rank 0] PRINT: step:3000/10000 val_loss:4.1480 svd_entropy: attn_qk:H=0.7429,top10E=0.29,eRank=145.8,q75/q25=64.20 attn_vo:H=0.8105,top10E=0.18,eRank=250.5,q75/q25=62.29 mlp_w1:H=0.8734,top10E=0.18,eRank=336.1,q75/q25=5.54 mlp_w2:H=0.9694,top10E=0.04,eRank=626.8,q75/q25=3.01 vo_prod:H=0.6986,top10E=0.29,eRank=110.1,q75/q25=4102.13 train_time:223893ms step_avg:74.63ms +[2025-09-02 16:33:09] [Rank 0] PRINT: step:3000/10000 val_loss:4.1480 svd_entropy: attn_qk:H=0.7429,top10E=0.29,eRank=145.8,q75/q25=64.20 attn_vo:H=0.8105,top10E=0.18,eRank=250.5,q75/q25=62.29 mlp_w1:H=0.8734,top10E=0.18,eRank=336.1,q75/q25=5.54 mlp_w2:H=0.9694,top10E=0.04,eRank=626.8,q75/q25=3.01 vo_prod:H=0.6986,top10E=0.29,eRank=110.1,q75/q25=4102.13 train_time:223893ms step_avg:74.63ms +[2025-09-02 16:33:09] [Rank 0] step:3001/10000 train_time:223907ms step_avg:74.61ms +[2025-09-02 16:33:09] [Rank 0] step:3001/10000 train_time:223907ms step_avg:74.61ms +[2025-09-02 16:33:11] [Rank 0] step:3021/10000 train_time:225371ms step_avg:74.60ms +[2025-09-02 16:33:11] [Rank 0] step:3021/10000 train_time:225371ms step_avg:74.60ms +[2025-09-02 16:33:12] [Rank 0] step:3041/10000 train_time:226905ms step_avg:74.62ms +[2025-09-02 16:33:12] [Rank 0] step:3041/10000 train_time:226905ms step_avg:74.62ms +[2025-09-02 16:33:14] [Rank 0] step:3061/10000 train_time:228440ms step_avg:74.63ms +[2025-09-02 16:33:14] [Rank 0] step:3061/10000 train_time:228440ms step_avg:74.63ms +[2025-09-02 16:33:16] [Rank 0] step:3081/10000 train_time:229976ms step_avg:74.64ms +[2025-09-02 16:33:16] [Rank 0] step:3081/10000 train_time:229976ms step_avg:74.64ms +[2025-09-02 16:33:17] [Rank 0] step:3101/10000 train_time:231512ms step_avg:74.66ms +[2025-09-02 16:33:17] [Rank 0] step:3101/10000 train_time:231512ms step_avg:74.66ms +[2025-09-02 16:33:19] [Rank 0] step:3121/10000 train_time:233047ms step_avg:74.67ms +[2025-09-02 16:33:19] [Rank 0] step:3121/10000 train_time:233047ms step_avg:74.67ms +[2025-09-02 16:33:20] [Rank 0] step:3141/10000 train_time:234582ms step_avg:74.68ms +[2025-09-02 16:33:20] [Rank 0] step:3141/10000 train_time:234582ms step_avg:74.68ms +[2025-09-02 16:33:22] [Rank 0] step:3161/10000 train_time:236118ms step_avg:74.70ms +[2025-09-02 16:33:22] [Rank 0] step:3161/10000 train_time:236118ms step_avg:74.70ms +[2025-09-02 16:33:23] [Rank 0] step:3181/10000 train_time:237654ms step_avg:74.71ms +[2025-09-02 16:33:23] [Rank 0] step:3181/10000 train_time:237654ms step_avg:74.71ms +[2025-09-02 16:33:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:33:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:33:37] [Rank 0] PRINT: step:3200/10000 val_loss:4.1162 svd_entropy: attn_qk:H=0.7454,top10E=0.29,eRank=148.0,q75/q25=64.32 attn_vo:H=0.8137,top10E=0.17,eRank=254.7,q75/q25=61.26 mlp_w1:H=0.8773,top10E=0.18,eRank=344.5,q75/q25=5.44 mlp_w2:H=0.9695,top10E=0.04,eRank=627.3,q75/q25=3.00 vo_prod:H=0.7034,top10E=0.29,eRank=113.7,q75/q25=3988.82 train_time:239271ms step_avg:74.77ms +[2025-09-02 16:33:37] [Rank 0] PRINT: step:3200/10000 val_loss:4.1162 svd_entropy: attn_qk:H=0.7454,top10E=0.29,eRank=148.0,q75/q25=64.32 attn_vo:H=0.8137,top10E=0.17,eRank=254.7,q75/q25=61.26 mlp_w1:H=0.8773,top10E=0.18,eRank=344.5,q75/q25=5.44 mlp_w2:H=0.9695,top10E=0.04,eRank=627.3,q75/q25=3.00 vo_prod:H=0.7034,top10E=0.29,eRank=113.7,q75/q25=3988.82 train_time:239271ms step_avg:74.77ms +[2025-09-02 16:33:37] [Rank 0] step:3201/10000 train_time:239284ms step_avg:74.75ms +[2025-09-02 16:33:37] [Rank 0] step:3201/10000 train_time:239284ms step_avg:74.75ms +[2025-09-02 16:33:38] [Rank 0] step:3221/10000 train_time:240759ms step_avg:74.75ms +[2025-09-02 16:33:38] [Rank 0] step:3221/10000 train_time:240759ms step_avg:74.75ms +[2025-09-02 16:33:40] [Rank 0] step:3241/10000 train_time:242292ms step_avg:74.76ms +[2025-09-02 16:33:40] [Rank 0] step:3241/10000 train_time:242292ms step_avg:74.76ms +[2025-09-02 16:33:41] [Rank 0] step:3261/10000 train_time:243827ms step_avg:74.77ms +[2025-09-02 16:33:41] [Rank 0] step:3261/10000 train_time:243827ms step_avg:74.77ms +[2025-09-02 16:33:43] [Rank 0] step:3281/10000 train_time:245363ms step_avg:74.78ms +[2025-09-02 16:33:43] [Rank 0] step:3281/10000 train_time:245363ms step_avg:74.78ms +[2025-09-02 16:33:44] [Rank 0] step:3301/10000 train_time:246899ms step_avg:74.80ms +[2025-09-02 16:33:44] [Rank 0] step:3301/10000 train_time:246899ms step_avg:74.80ms +[2025-09-02 16:33:46] [Rank 0] step:3321/10000 train_time:248437ms step_avg:74.81ms +[2025-09-02 16:33:46] [Rank 0] step:3321/10000 train_time:248437ms step_avg:74.81ms +[2025-09-02 16:33:48] [Rank 0] step:3341/10000 train_time:249974ms step_avg:74.82ms +[2025-09-02 16:33:48] [Rank 0] step:3341/10000 train_time:249974ms step_avg:74.82ms +[2025-09-02 16:33:49] [Rank 0] step:3361/10000 train_time:251511ms step_avg:74.83ms +[2025-09-02 16:33:49] [Rank 0] step:3361/10000 train_time:251511ms step_avg:74.83ms +[2025-09-02 16:33:51] [Rank 0] step:3381/10000 train_time:253047ms step_avg:74.84ms +[2025-09-02 16:33:51] [Rank 0] step:3381/10000 train_time:253047ms step_avg:74.84ms +[2025-09-02 16:33:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:33:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:34:04] [Rank 0] PRINT: step:3400/10000 val_loss:4.0741 svd_entropy: attn_qk:H=0.7479,top10E=0.29,eRank=150.2,q75/q25=64.38 attn_vo:H=0.8165,top10E=0.17,eRank=258.5,q75/q25=60.33 mlp_w1:H=0.8808,top10E=0.17,eRank=352.2,q75/q25=5.32 mlp_w2:H=0.9696,top10E=0.04,eRank=627.7,q75/q25=2.99 vo_prod:H=0.7073,top10E=0.28,eRank=116.8,q75/q25=3849.27 train_time:254663ms step_avg:74.90ms +[2025-09-02 16:34:04] [Rank 0] PRINT: step:3400/10000 val_loss:4.0741 svd_entropy: attn_qk:H=0.7479,top10E=0.29,eRank=150.2,q75/q25=64.38 attn_vo:H=0.8165,top10E=0.17,eRank=258.5,q75/q25=60.33 mlp_w1:H=0.8808,top10E=0.17,eRank=352.2,q75/q25=5.32 mlp_w2:H=0.9696,top10E=0.04,eRank=627.7,q75/q25=2.99 vo_prod:H=0.7073,top10E=0.28,eRank=116.8,q75/q25=3849.27 train_time:254663ms step_avg:74.90ms +[2025-09-02 16:34:04] [Rank 0] step:3401/10000 train_time:254677ms step_avg:74.88ms +[2025-09-02 16:34:04] [Rank 0] step:3401/10000 train_time:254677ms step_avg:74.88ms +[2025-09-02 16:34:06] [Rank 0] step:3421/10000 train_time:256140ms step_avg:74.87ms +[2025-09-02 16:34:06] [Rank 0] step:3421/10000 train_time:256140ms step_avg:74.87ms +[2025-09-02 16:34:07] [Rank 0] step:3441/10000 train_time:257676ms step_avg:74.88ms +[2025-09-02 16:34:07] [Rank 0] step:3441/10000 train_time:257676ms step_avg:74.88ms +[2025-09-02 16:34:09] [Rank 0] step:3461/10000 train_time:259211ms step_avg:74.89ms +[2025-09-02 16:34:09] [Rank 0] step:3461/10000 train_time:259211ms step_avg:74.89ms +[2025-09-02 16:34:10] [Rank 0] step:3481/10000 train_time:260747ms step_avg:74.91ms +[2025-09-02 16:34:10] [Rank 0] step:3481/10000 train_time:260747ms step_avg:74.91ms +[2025-09-02 16:34:12] [Rank 0] step:3501/10000 train_time:262286ms step_avg:74.92ms +[2025-09-02 16:34:12] [Rank 0] step:3501/10000 train_time:262286ms step_avg:74.92ms +[2025-09-02 16:34:13] [Rank 0] step:3521/10000 train_time:263825ms step_avg:74.93ms +[2025-09-02 16:34:13] [Rank 0] step:3521/10000 train_time:263825ms step_avg:74.93ms +[2025-09-02 16:34:15] [Rank 0] step:3541/10000 train_time:265361ms step_avg:74.94ms +[2025-09-02 16:34:15] [Rank 0] step:3541/10000 train_time:265361ms step_avg:74.94ms +[2025-09-02 16:34:16] [Rank 0] step:3561/10000 train_time:266898ms step_avg:74.95ms +[2025-09-02 16:34:16] [Rank 0] step:3561/10000 train_time:266898ms step_avg:74.95ms +[2025-09-02 16:34:18] [Rank 0] step:3581/10000 train_time:268436ms step_avg:74.96ms +[2025-09-02 16:34:18] [Rank 0] step:3581/10000 train_time:268436ms step_avg:74.96ms +[2025-09-02 16:34:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:34:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:34:31] [Rank 0] PRINT: step:3600/10000 val_loss:4.0617 svd_entropy: attn_qk:H=0.7504,top10E=0.28,eRank=152.5,q75/q25=64.61 attn_vo:H=0.8191,top10E=0.17,eRank=262.0,q75/q25=59.49 mlp_w1:H=0.8837,top10E=0.17,eRank=358.9,q75/q25=5.24 mlp_w2:H=0.9697,top10E=0.04,eRank=628.2,q75/q25=2.99 vo_prod:H=0.7108,top10E=0.28,eRank=119.4,q75/q25=3696.39 train_time:270053ms step_avg:75.01ms +[2025-09-02 16:34:31] [Rank 0] PRINT: step:3600/10000 val_loss:4.0617 svd_entropy: attn_qk:H=0.7504,top10E=0.28,eRank=152.5,q75/q25=64.61 attn_vo:H=0.8191,top10E=0.17,eRank=262.0,q75/q25=59.49 mlp_w1:H=0.8837,top10E=0.17,eRank=358.9,q75/q25=5.24 mlp_w2:H=0.9697,top10E=0.04,eRank=628.2,q75/q25=2.99 vo_prod:H=0.7108,top10E=0.28,eRank=119.4,q75/q25=3696.39 train_time:270053ms step_avg:75.01ms +[2025-09-02 16:34:32] [Rank 0] step:3601/10000 train_time:270067ms step_avg:75.00ms +[2025-09-02 16:34:32] [Rank 0] step:3601/10000 train_time:270067ms step_avg:75.00ms +[2025-09-02 16:34:33] [Rank 0] step:3621/10000 train_time:271539ms step_avg:74.99ms +[2025-09-02 16:34:33] [Rank 0] step:3621/10000 train_time:271539ms step_avg:74.99ms +[2025-09-02 16:34:35] [Rank 0] step:3641/10000 train_time:273076ms step_avg:75.00ms +[2025-09-02 16:34:35] [Rank 0] step:3641/10000 train_time:273076ms step_avg:75.00ms +[2025-09-02 16:34:36] [Rank 0] step:3661/10000 train_time:274611ms step_avg:75.01ms +[2025-09-02 16:34:36] [Rank 0] step:3661/10000 train_time:274611ms step_avg:75.01ms +[2025-09-02 16:34:38] [Rank 0] step:3681/10000 train_time:276147ms step_avg:75.02ms +[2025-09-02 16:34:38] [Rank 0] step:3681/10000 train_time:276147ms step_avg:75.02ms +[2025-09-02 16:34:39] [Rank 0] step:3701/10000 train_time:277685ms step_avg:75.03ms +[2025-09-02 16:34:39] [Rank 0] step:3701/10000 train_time:277685ms step_avg:75.03ms +[2025-09-02 16:34:41] [Rank 0] step:3721/10000 train_time:279249ms step_avg:75.05ms +[2025-09-02 16:34:41] [Rank 0] step:3721/10000 train_time:279249ms step_avg:75.05ms +[2025-09-02 16:34:42] [Rank 0] step:3741/10000 train_time:280823ms step_avg:75.07ms +[2025-09-02 16:34:42] [Rank 0] step:3741/10000 train_time:280823ms step_avg:75.07ms +[2025-09-02 16:34:44] [Rank 0] step:3761/10000 train_time:282399ms step_avg:75.09ms +[2025-09-02 16:34:44] [Rank 0] step:3761/10000 train_time:282399ms step_avg:75.09ms +[2025-09-02 16:34:46] [Rank 0] step:3781/10000 train_time:283973ms step_avg:75.11ms +[2025-09-02 16:34:46] [Rank 0] step:3781/10000 train_time:283973ms step_avg:75.11ms +[2025-09-02 16:34:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:34:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:34:59] [Rank 0] PRINT: step:3800/10000 val_loss:4.0138 svd_entropy: attn_qk:H=0.7522,top10E=0.28,eRank=154.1,q75/q25=64.35 attn_vo:H=0.8213,top10E=0.16,eRank=265.0,q75/q25=58.46 mlp_w1:H=0.8865,top10E=0.17,eRank=365.2,q75/q25=5.16 mlp_w2:H=0.9698,top10E=0.04,eRank=628.5,q75/q25=2.98 vo_prod:H=0.7140,top10E=0.27,eRank=121.8,q75/q25=3602.55 train_time:285630ms step_avg:75.17ms +[2025-09-02 16:34:59] [Rank 0] PRINT: step:3800/10000 val_loss:4.0138 svd_entropy: attn_qk:H=0.7522,top10E=0.28,eRank=154.1,q75/q25=64.35 attn_vo:H=0.8213,top10E=0.16,eRank=265.0,q75/q25=58.46 mlp_w1:H=0.8865,top10E=0.17,eRank=365.2,q75/q25=5.16 mlp_w2:H=0.9698,top10E=0.04,eRank=628.5,q75/q25=2.98 vo_prod:H=0.7140,top10E=0.27,eRank=121.8,q75/q25=3602.55 train_time:285630ms step_avg:75.17ms +[2025-09-02 16:34:59] [Rank 0] step:3801/10000 train_time:285643ms step_avg:75.15ms +[2025-09-02 16:34:59] [Rank 0] step:3801/10000 train_time:285643ms step_avg:75.15ms +[2025-09-02 16:35:01] [Rank 0] step:3821/10000 train_time:287162ms step_avg:75.15ms +[2025-09-02 16:35:01] [Rank 0] step:3821/10000 train_time:287162ms step_avg:75.15ms +[2025-09-02 16:35:02] [Rank 0] step:3841/10000 train_time:288737ms step_avg:75.17ms +[2025-09-02 16:35:02] [Rank 0] step:3841/10000 train_time:288737ms step_avg:75.17ms +[2025-09-02 16:35:04] [Rank 0] step:3861/10000 train_time:290310ms step_avg:75.19ms +[2025-09-02 16:35:04] [Rank 0] step:3861/10000 train_time:290310ms step_avg:75.19ms +[2025-09-02 16:35:05] [Rank 0] step:3881/10000 train_time:291879ms step_avg:75.21ms +[2025-09-02 16:35:05] [Rank 0] step:3881/10000 train_time:291879ms step_avg:75.21ms +[2025-09-02 16:35:07] [Rank 0] step:3901/10000 train_time:293528ms step_avg:75.24ms +[2025-09-02 16:35:07] [Rank 0] step:3901/10000 train_time:293528ms step_avg:75.24ms +[2025-09-02 16:35:09] [Rank 0] step:3921/10000 train_time:295139ms step_avg:75.27ms +[2025-09-02 16:35:09] [Rank 0] step:3921/10000 train_time:295139ms step_avg:75.27ms +[2025-09-02 16:35:10] [Rank 0] step:3941/10000 train_time:296710ms step_avg:75.29ms +[2025-09-02 16:35:10] [Rank 0] step:3941/10000 train_time:296710ms step_avg:75.29ms +[2025-09-02 16:35:12] [Rank 0] step:3961/10000 train_time:298281ms step_avg:75.30ms +[2025-09-02 16:35:12] [Rank 0] step:3961/10000 train_time:298281ms step_avg:75.30ms +[2025-09-02 16:35:13] [Rank 0] step:3981/10000 train_time:299852ms step_avg:75.32ms +[2025-09-02 16:35:13] [Rank 0] step:3981/10000 train_time:299852ms step_avg:75.32ms +[2025-09-02 16:35:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:35:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:35:27] [Rank 0] PRINT: step:4000/10000 val_loss:3.9858 svd_entropy: attn_qk:H=0.7541,top10E=0.28,eRank=155.8,q75/q25=64.16 attn_vo:H=0.8233,top10E=0.16,eRank=267.9,q75/q25=57.61 mlp_w1:H=0.8889,top10E=0.16,eRank=370.9,q75/q25=5.08 mlp_w2:H=0.9699,top10E=0.04,eRank=628.9,q75/q25=2.97 vo_prod:H=0.7168,top10E=0.27,eRank=124.1,q75/q25=3511.17 train_time:301503ms step_avg:75.38ms +[2025-09-02 16:35:27] [Rank 0] PRINT: step:4000/10000 val_loss:3.9858 svd_entropy: attn_qk:H=0.7541,top10E=0.28,eRank=155.8,q75/q25=64.16 attn_vo:H=0.8233,top10E=0.16,eRank=267.9,q75/q25=57.61 mlp_w1:H=0.8889,top10E=0.16,eRank=370.9,q75/q25=5.08 mlp_w2:H=0.9699,top10E=0.04,eRank=628.9,q75/q25=2.97 vo_prod:H=0.7168,top10E=0.27,eRank=124.1,q75/q25=3511.17 train_time:301503ms step_avg:75.38ms +[2025-09-02 16:35:27] [Rank 0] step:4001/10000 train_time:301517ms step_avg:75.36ms +[2025-09-02 16:35:27] [Rank 0] step:4001/10000 train_time:301517ms step_avg:75.36ms +[2025-09-02 16:35:28] [Rank 0] step:4021/10000 train_time:303018ms step_avg:75.36ms +[2025-09-02 16:35:28] [Rank 0] step:4021/10000 train_time:303018ms step_avg:75.36ms +[2025-09-02 16:35:30] [Rank 0] step:4041/10000 train_time:304587ms step_avg:75.37ms +[2025-09-02 16:35:30] [Rank 0] step:4041/10000 train_time:304587ms step_avg:75.37ms +[2025-09-02 16:35:31] [Rank 0] step:4061/10000 train_time:306158ms step_avg:75.39ms +[2025-09-02 16:35:31] [Rank 0] step:4061/10000 train_time:306158ms step_avg:75.39ms +[2025-09-02 16:35:33] [Rank 0] step:4081/10000 train_time:307908ms step_avg:75.45ms +[2025-09-02 16:35:33] [Rank 0] step:4081/10000 train_time:307908ms step_avg:75.45ms +[2025-09-02 16:35:35] [Rank 0] step:4101/10000 train_time:309478ms step_avg:75.46ms +[2025-09-02 16:35:35] [Rank 0] step:4101/10000 train_time:309478ms step_avg:75.46ms +[2025-09-02 16:35:36] [Rank 0] step:4121/10000 train_time:311049ms step_avg:75.48ms +[2025-09-02 16:35:36] [Rank 0] step:4121/10000 train_time:311049ms step_avg:75.48ms +[2025-09-02 16:35:38] [Rank 0] step:4141/10000 train_time:312620ms step_avg:75.49ms +[2025-09-02 16:35:38] [Rank 0] step:4141/10000 train_time:312620ms step_avg:75.49ms +[2025-09-02 16:35:39] [Rank 0] step:4161/10000 train_time:314190ms step_avg:75.51ms +[2025-09-02 16:35:39] [Rank 0] step:4161/10000 train_time:314190ms step_avg:75.51ms +[2025-09-02 16:35:41] [Rank 0] step:4181/10000 train_time:315764ms step_avg:75.52ms +[2025-09-02 16:35:41] [Rank 0] step:4181/10000 train_time:315764ms step_avg:75.52ms +[2025-09-02 16:35:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:35:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:35:54] [Rank 0] PRINT: step:4200/10000 val_loss:3.9682 svd_entropy: attn_qk:H=0.7559,top10E=0.28,eRank=157.5,q75/q25=63.84 attn_vo:H=0.8253,top10E=0.16,eRank=270.7,q75/q25=56.57 mlp_w1:H=0.8911,top10E=0.16,eRank=376.1,q75/q25=5.02 mlp_w2:H=0.9700,top10E=0.04,eRank=629.2,q75/q25=2.97 vo_prod:H=0.7196,top10E=0.27,eRank=126.3,q75/q25=3391.46 train_time:317415ms step_avg:75.57ms +[2025-09-02 16:35:54] [Rank 0] PRINT: step:4200/10000 val_loss:3.9682 svd_entropy: attn_qk:H=0.7559,top10E=0.28,eRank=157.5,q75/q25=63.84 attn_vo:H=0.8253,top10E=0.16,eRank=270.7,q75/q25=56.57 mlp_w1:H=0.8911,top10E=0.16,eRank=376.1,q75/q25=5.02 mlp_w2:H=0.9700,top10E=0.04,eRank=629.2,q75/q25=2.97 vo_prod:H=0.7196,top10E=0.27,eRank=126.3,q75/q25=3391.46 train_time:317415ms step_avg:75.57ms +[2025-09-02 16:35:54] [Rank 0] step:4201/10000 train_time:317428ms step_avg:75.56ms +[2025-09-02 16:35:54] [Rank 0] step:4201/10000 train_time:317428ms step_avg:75.56ms +[2025-09-02 16:35:56] [Rank 0] step:4221/10000 train_time:318919ms step_avg:75.56ms +[2025-09-02 16:35:56] [Rank 0] step:4221/10000 train_time:318919ms step_avg:75.56ms +[2025-09-02 16:35:58] [Rank 0] step:4241/10000 train_time:320490ms step_avg:75.57ms +[2025-09-02 16:35:58] [Rank 0] step:4241/10000 train_time:320490ms step_avg:75.57ms +[2025-09-02 16:35:59] [Rank 0] step:4261/10000 train_time:322061ms step_avg:75.58ms +[2025-09-02 16:35:59] [Rank 0] step:4261/10000 train_time:322061ms step_avg:75.58ms +[2025-09-02 16:36:01] [Rank 0] step:4281/10000 train_time:323634ms step_avg:75.60ms +[2025-09-02 16:36:01] [Rank 0] step:4281/10000 train_time:323634ms step_avg:75.60ms +[2025-09-02 16:36:02] [Rank 0] step:4301/10000 train_time:325205ms step_avg:75.61ms +[2025-09-02 16:36:02] [Rank 0] step:4301/10000 train_time:325205ms step_avg:75.61ms +[2025-09-02 16:36:04] [Rank 0] step:4321/10000 train_time:326780ms step_avg:75.63ms +[2025-09-02 16:36:04] [Rank 0] step:4321/10000 train_time:326780ms step_avg:75.63ms +[2025-09-02 16:36:05] [Rank 0] step:4341/10000 train_time:328350ms step_avg:75.64ms +[2025-09-02 16:36:05] [Rank 0] step:4341/10000 train_time:328350ms step_avg:75.64ms +[2025-09-02 16:36:07] [Rank 0] step:4361/10000 train_time:329924ms step_avg:75.65ms +[2025-09-02 16:36:07] [Rank 0] step:4361/10000 train_time:329924ms step_avg:75.65ms +[2025-09-02 16:36:09] [Rank 0] step:4381/10000 train_time:331631ms step_avg:75.70ms +[2025-09-02 16:36:09] [Rank 0] step:4381/10000 train_time:331631ms step_avg:75.70ms +[2025-09-02 16:36:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:36:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:36:22] [Rank 0] PRINT: step:4400/10000 val_loss:3.9429 svd_entropy: attn_qk:H=0.7576,top10E=0.28,eRank=159.1,q75/q25=63.62 attn_vo:H=0.8270,top10E=0.16,eRank=273.1,q75/q25=55.91 mlp_w1:H=0.8931,top10E=0.16,eRank=380.9,q75/q25=4.96 mlp_w2:H=0.9700,top10E=0.04,eRank=629.4,q75/q25=2.96 vo_prod:H=0.7217,top10E=0.27,eRank=128.1,q75/q25=3344.84 train_time:333244ms step_avg:75.74ms +[2025-09-02 16:36:22] [Rank 0] PRINT: step:4400/10000 val_loss:3.9429 svd_entropy: attn_qk:H=0.7576,top10E=0.28,eRank=159.1,q75/q25=63.62 attn_vo:H=0.8270,top10E=0.16,eRank=273.1,q75/q25=55.91 mlp_w1:H=0.8931,top10E=0.16,eRank=380.9,q75/q25=4.96 mlp_w2:H=0.9700,top10E=0.04,eRank=629.4,q75/q25=2.96 vo_prod:H=0.7217,top10E=0.27,eRank=128.1,q75/q25=3344.84 train_time:333244ms step_avg:75.74ms +[2025-09-02 16:36:22] [Rank 0] step:4401/10000 train_time:333257ms step_avg:75.72ms +[2025-09-02 16:36:22] [Rank 0] step:4401/10000 train_time:333257ms step_avg:75.72ms +[2025-09-02 16:36:23] [Rank 0] step:4421/10000 train_time:334751ms step_avg:75.72ms +[2025-09-02 16:36:23] [Rank 0] step:4421/10000 train_time:334751ms step_avg:75.72ms +[2025-09-02 16:36:25] [Rank 0] step:4441/10000 train_time:336321ms step_avg:75.73ms +[2025-09-02 16:36:25] [Rank 0] step:4441/10000 train_time:336321ms step_avg:75.73ms +[2025-09-02 16:36:27] [Rank 0] step:4461/10000 train_time:337899ms step_avg:75.75ms +[2025-09-02 16:36:27] [Rank 0] step:4461/10000 train_time:337899ms step_avg:75.75ms +[2025-09-02 16:36:28] [Rank 0] step:4481/10000 train_time:339478ms step_avg:75.76ms +[2025-09-02 16:36:28] [Rank 0] step:4481/10000 train_time:339478ms step_avg:75.76ms +[2025-09-02 16:36:30] [Rank 0] step:4501/10000 train_time:341054ms step_avg:75.77ms +[2025-09-02 16:36:30] [Rank 0] step:4501/10000 train_time:341054ms step_avg:75.77ms +[2025-09-02 16:36:31] [Rank 0] step:4521/10000 train_time:342633ms step_avg:75.79ms +[2025-09-02 16:36:31] [Rank 0] step:4521/10000 train_time:342633ms step_avg:75.79ms +[2025-09-02 16:36:33] [Rank 0] step:4541/10000 train_time:344214ms step_avg:75.80ms +[2025-09-02 16:36:33] [Rank 0] step:4541/10000 train_time:344214ms step_avg:75.80ms +[2025-09-02 16:36:34] [Rank 0] step:4561/10000 train_time:345794ms step_avg:75.82ms +[2025-09-02 16:36:34] [Rank 0] step:4561/10000 train_time:345794ms step_avg:75.82ms +[2025-09-02 16:36:36] [Rank 0] step:4581/10000 train_time:347376ms step_avg:75.83ms +[2025-09-02 16:36:36] [Rank 0] step:4581/10000 train_time:347376ms step_avg:75.83ms +[2025-09-02 16:36:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:36:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:36:49] [Rank 0] PRINT: step:4600/10000 val_loss:3.9169 svd_entropy: attn_qk:H=0.7594,top10E=0.27,eRank=160.8,q75/q25=63.44 attn_vo:H=0.8287,top10E=0.15,eRank=275.6,q75/q25=54.98 mlp_w1:H=0.8950,top10E=0.15,eRank=385.7,q75/q25=4.91 mlp_w2:H=0.9701,top10E=0.04,eRank=629.7,q75/q25=2.95 vo_prod:H=0.7240,top10E=0.26,eRank=129.9,q75/q25=3260.86 train_time:349041ms step_avg:75.88ms +[2025-09-02 16:36:49] [Rank 0] PRINT: step:4600/10000 val_loss:3.9169 svd_entropy: attn_qk:H=0.7594,top10E=0.27,eRank=160.8,q75/q25=63.44 attn_vo:H=0.8287,top10E=0.15,eRank=275.6,q75/q25=54.98 mlp_w1:H=0.8950,top10E=0.15,eRank=385.7,q75/q25=4.91 mlp_w2:H=0.9701,top10E=0.04,eRank=629.7,q75/q25=2.95 vo_prod:H=0.7240,top10E=0.26,eRank=129.9,q75/q25=3260.86 train_time:349041ms step_avg:75.88ms +[2025-09-02 16:36:49] [Rank 0] step:4601/10000 train_time:349054ms step_avg:75.86ms +[2025-09-02 16:36:49] [Rank 0] step:4601/10000 train_time:349054ms step_avg:75.86ms +[2025-09-02 16:36:51] [Rank 0] step:4621/10000 train_time:350558ms step_avg:75.86ms +[2025-09-02 16:36:51] [Rank 0] step:4621/10000 train_time:350558ms step_avg:75.86ms +[2025-09-02 16:36:53] [Rank 0] step:4641/10000 train_time:352137ms step_avg:75.88ms +[2025-09-02 16:36:53] [Rank 0] step:4641/10000 train_time:352137ms step_avg:75.88ms +[2025-09-02 16:36:54] [Rank 0] step:4661/10000 train_time:353719ms step_avg:75.89ms +[2025-09-02 16:36:54] [Rank 0] step:4661/10000 train_time:353719ms step_avg:75.89ms +[2025-09-02 16:36:56] [Rank 0] step:4681/10000 train_time:355297ms step_avg:75.90ms +[2025-09-02 16:36:56] [Rank 0] step:4681/10000 train_time:355297ms step_avg:75.90ms +[2025-09-02 16:36:57] [Rank 0] step:4701/10000 train_time:356878ms step_avg:75.92ms +[2025-09-02 16:36:57] [Rank 0] step:4701/10000 train_time:356878ms step_avg:75.92ms +[2025-09-02 16:36:59] [Rank 0] step:4721/10000 train_time:358467ms step_avg:75.93ms +[2025-09-02 16:36:59] [Rank 0] step:4721/10000 train_time:358467ms step_avg:75.93ms +[2025-09-02 16:37:00] [Rank 0] step:4741/10000 train_time:360049ms step_avg:75.94ms +[2025-09-02 16:37:00] [Rank 0] step:4741/10000 train_time:360049ms step_avg:75.94ms +[2025-09-02 16:37:02] [Rank 0] step:4761/10000 train_time:361633ms step_avg:75.96ms +[2025-09-02 16:37:02] [Rank 0] step:4761/10000 train_time:361633ms step_avg:75.96ms +[2025-09-02 16:37:04] [Rank 0] step:4781/10000 train_time:363219ms step_avg:75.97ms +[2025-09-02 16:37:04] [Rank 0] step:4781/10000 train_time:363219ms step_avg:75.97ms +[2025-09-02 16:37:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:37:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:37:17] [Rank 0] PRINT: step:4800/10000 val_loss:3.9042 svd_entropy: attn_qk:H=0.7610,top10E=0.27,eRank=162.4,q75/q25=63.25 attn_vo:H=0.8302,top10E=0.15,eRank=277.8,q75/q25=54.26 mlp_w1:H=0.8967,top10E=0.15,eRank=389.9,q75/q25=4.85 mlp_w2:H=0.9701,top10E=0.04,eRank=629.9,q75/q25=2.95 vo_prod:H=0.7261,top10E=0.26,eRank=131.6,q75/q25=3181.52 train_time:364883ms step_avg:76.02ms +[2025-09-02 16:37:17] [Rank 0] PRINT: step:4800/10000 val_loss:3.9042 svd_entropy: attn_qk:H=0.7610,top10E=0.27,eRank=162.4,q75/q25=63.25 attn_vo:H=0.8302,top10E=0.15,eRank=277.8,q75/q25=54.26 mlp_w1:H=0.8967,top10E=0.15,eRank=389.9,q75/q25=4.85 mlp_w2:H=0.9701,top10E=0.04,eRank=629.9,q75/q25=2.95 vo_prod:H=0.7261,top10E=0.26,eRank=131.6,q75/q25=3181.52 train_time:364883ms step_avg:76.02ms +[2025-09-02 16:37:17] [Rank 0] step:4801/10000 train_time:364899ms step_avg:76.00ms +[2025-09-02 16:37:17] [Rank 0] step:4801/10000 train_time:364899ms step_avg:76.00ms +[2025-09-02 16:37:19] [Rank 0] step:4821/10000 train_time:366404ms step_avg:76.00ms +[2025-09-02 16:37:19] [Rank 0] step:4821/10000 train_time:366404ms step_avg:76.00ms +[2025-09-02 16:37:20] [Rank 0] step:4841/10000 train_time:367981ms step_avg:76.01ms +[2025-09-02 16:37:20] [Rank 0] step:4841/10000 train_time:367981ms step_avg:76.01ms +[2025-09-02 16:37:22] [Rank 0] step:4861/10000 train_time:369563ms step_avg:76.03ms +[2025-09-02 16:37:22] [Rank 0] step:4861/10000 train_time:369563ms step_avg:76.03ms +[2025-09-02 16:37:23] [Rank 0] step:4881/10000 train_time:371140ms step_avg:76.04ms +[2025-09-02 16:37:23] [Rank 0] step:4881/10000 train_time:371140ms step_avg:76.04ms +[2025-09-02 16:37:25] [Rank 0] step:4901/10000 train_time:372715ms step_avg:76.05ms +[2025-09-02 16:37:25] [Rank 0] step:4901/10000 train_time:372715ms step_avg:76.05ms +[2025-09-02 16:37:27] [Rank 0] step:4921/10000 train_time:374295ms step_avg:76.06ms +[2025-09-02 16:37:27] [Rank 0] step:4921/10000 train_time:374295ms step_avg:76.06ms +[2025-09-02 16:37:28] [Rank 0] step:4941/10000 train_time:375876ms step_avg:76.07ms +[2025-09-02 16:37:28] [Rank 0] step:4941/10000 train_time:375876ms step_avg:76.07ms +[2025-09-02 16:37:30] [Rank 0] step:4961/10000 train_time:377455ms step_avg:76.08ms +[2025-09-02 16:37:30] [Rank 0] step:4961/10000 train_time:377455ms step_avg:76.08ms +[2025-09-02 16:37:31] [Rank 0] step:4981/10000 train_time:379034ms step_avg:76.10ms +[2025-09-02 16:37:31] [Rank 0] step:4981/10000 train_time:379034ms step_avg:76.10ms +[2025-09-02 16:37:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:37:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:37:45] [Rank 0] PRINT: step:5000/10000 val_loss:3.8841 svd_entropy: attn_qk:H=0.7625,top10E=0.27,eRank=163.9,q75/q25=63.07 attn_vo:H=0.8316,top10E=0.15,eRank=279.9,q75/q25=53.63 mlp_w1:H=0.8983,top10E=0.15,eRank=393.9,q75/q25=4.80 mlp_w2:H=0.9702,top10E=0.04,eRank=630.0,q75/q25=2.95 vo_prod:H=0.7278,top10E=0.26,eRank=133.1,q75/q25=3123.40 train_time:380695ms step_avg:76.14ms +[2025-09-02 16:37:45] [Rank 0] PRINT: step:5000/10000 val_loss:3.8841 svd_entropy: attn_qk:H=0.7625,top10E=0.27,eRank=163.9,q75/q25=63.07 attn_vo:H=0.8316,top10E=0.15,eRank=279.9,q75/q25=53.63 mlp_w1:H=0.8983,top10E=0.15,eRank=393.9,q75/q25=4.80 mlp_w2:H=0.9702,top10E=0.04,eRank=630.0,q75/q25=2.95 vo_prod:H=0.7278,top10E=0.26,eRank=133.1,q75/q25=3123.40 train_time:380695ms step_avg:76.14ms +[2025-09-02 16:37:45] [Rank 0] step:5001/10000 train_time:380709ms step_avg:76.13ms +[2025-09-02 16:37:45] [Rank 0] step:5001/10000 train_time:380709ms step_avg:76.13ms +[2025-09-02 16:37:46] [Rank 0] step:5021/10000 train_time:382211ms step_avg:76.12ms +[2025-09-02 16:37:46] [Rank 0] step:5021/10000 train_time:382211ms step_avg:76.12ms +[2025-09-02 16:37:48] [Rank 0] step:5041/10000 train_time:383788ms step_avg:76.13ms +[2025-09-02 16:37:48] [Rank 0] step:5041/10000 train_time:383788ms step_avg:76.13ms +[2025-09-02 16:37:49] [Rank 0] step:5061/10000 train_time:385363ms step_avg:76.14ms +[2025-09-02 16:37:49] [Rank 0] step:5061/10000 train_time:385363ms step_avg:76.14ms +[2025-09-02 16:37:51] [Rank 0] step:5081/10000 train_time:386941ms step_avg:76.15ms +[2025-09-02 16:37:51] [Rank 0] step:5081/10000 train_time:386941ms step_avg:76.15ms +[2025-09-02 16:37:53] [Rank 0] step:5101/10000 train_time:388516ms step_avg:76.16ms +[2025-09-02 16:37:53] [Rank 0] step:5101/10000 train_time:388516ms step_avg:76.16ms +[2025-09-02 16:37:54] [Rank 0] step:5121/10000 train_time:390094ms step_avg:76.18ms +[2025-09-02 16:37:54] [Rank 0] step:5121/10000 train_time:390094ms step_avg:76.18ms +[2025-09-02 16:37:56] [Rank 0] step:5141/10000 train_time:391676ms step_avg:76.19ms +[2025-09-02 16:37:56] [Rank 0] step:5141/10000 train_time:391676ms step_avg:76.19ms +[2025-09-02 16:37:57] [Rank 0] step:5161/10000 train_time:393255ms step_avg:76.20ms +[2025-09-02 16:37:57] [Rank 0] step:5161/10000 train_time:393255ms step_avg:76.20ms +[2025-09-02 16:37:59] [Rank 0] step:5181/10000 train_time:394835ms step_avg:76.21ms +[2025-09-02 16:37:59] [Rank 0] step:5181/10000 train_time:394835ms step_avg:76.21ms +[2025-09-02 16:38:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:38:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:38:12] [Rank 0] PRINT: step:5200/10000 val_loss:3.8669 svd_entropy: attn_qk:H=0.7639,top10E=0.27,eRank=165.2,q75/q25=62.50 attn_vo:H=0.8329,top10E=0.15,eRank=281.9,q75/q25=52.84 mlp_w1:H=0.8997,top10E=0.15,eRank=397.5,q75/q25=4.77 mlp_w2:H=0.9702,top10E=0.04,eRank=630.1,q75/q25=2.94 vo_prod:H=0.7296,top10E=0.26,eRank=134.6,q75/q25=3054.98 train_time:396524ms step_avg:76.25ms +[2025-09-02 16:38:12] [Rank 0] PRINT: step:5200/10000 val_loss:3.8669 svd_entropy: attn_qk:H=0.7639,top10E=0.27,eRank=165.2,q75/q25=62.50 attn_vo:H=0.8329,top10E=0.15,eRank=281.9,q75/q25=52.84 mlp_w1:H=0.8997,top10E=0.15,eRank=397.5,q75/q25=4.77 mlp_w2:H=0.9702,top10E=0.04,eRank=630.1,q75/q25=2.94 vo_prod:H=0.7296,top10E=0.26,eRank=134.6,q75/q25=3054.98 train_time:396524ms step_avg:76.25ms +[2025-09-02 16:38:12] [Rank 0] step:5201/10000 train_time:396539ms step_avg:76.24ms +[2025-09-02 16:38:12] [Rank 0] step:5201/10000 train_time:396539ms step_avg:76.24ms +[2025-09-02 16:38:14] [Rank 0] step:5221/10000 train_time:398085ms step_avg:76.25ms +[2025-09-02 16:38:14] [Rank 0] step:5221/10000 train_time:398085ms step_avg:76.25ms +[2025-09-02 16:38:16] [Rank 0] step:5241/10000 train_time:399744ms step_avg:76.27ms +[2025-09-02 16:38:16] [Rank 0] step:5241/10000 train_time:399744ms step_avg:76.27ms +[2025-09-02 16:38:17] [Rank 0] step:5261/10000 train_time:401351ms step_avg:76.29ms +[2025-09-02 16:38:17] [Rank 0] step:5261/10000 train_time:401351ms step_avg:76.29ms +[2025-09-02 16:38:19] [Rank 0] step:5281/10000 train_time:403017ms step_avg:76.31ms +[2025-09-02 16:38:19] [Rank 0] step:5281/10000 train_time:403017ms step_avg:76.31ms +[2025-09-02 16:38:21] [Rank 0] step:5301/10000 train_time:404634ms step_avg:76.33ms +[2025-09-02 16:38:21] [Rank 0] step:5301/10000 train_time:404634ms step_avg:76.33ms +[2025-09-02 16:38:22] [Rank 0] step:5321/10000 train_time:406245ms step_avg:76.35ms +[2025-09-02 16:38:22] [Rank 0] step:5321/10000 train_time:406245ms step_avg:76.35ms +[2025-09-02 16:38:24] [Rank 0] step:5341/10000 train_time:407854ms step_avg:76.36ms +[2025-09-02 16:38:24] [Rank 0] step:5341/10000 train_time:407854ms step_avg:76.36ms +[2025-09-02 16:38:25] [Rank 0] step:5361/10000 train_time:409471ms step_avg:76.38ms +[2025-09-02 16:38:25] [Rank 0] step:5361/10000 train_time:409471ms step_avg:76.38ms +[2025-09-02 16:38:27] [Rank 0] step:5381/10000 train_time:411083ms step_avg:76.40ms +[2025-09-02 16:38:27] [Rank 0] step:5381/10000 train_time:411083ms step_avg:76.40ms +[2025-09-02 16:38:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:38:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:38:40] [Rank 0] PRINT: step:5400/10000 val_loss:3.8495 svd_entropy: attn_qk:H=0.7650,top10E=0.27,eRank=166.4,q75/q25=62.24 attn_vo:H=0.8341,top10E=0.15,eRank=283.7,q75/q25=52.23 mlp_w1:H=0.9011,top10E=0.15,eRank=401.0,q75/q25=4.72 mlp_w2:H=0.9702,top10E=0.04,eRank=630.2,q75/q25=2.94 vo_prod:H=0.7311,top10E=0.26,eRank=135.9,q75/q25=3050.85 train_time:412775ms step_avg:76.44ms +[2025-09-02 16:38:40] [Rank 0] PRINT: step:5400/10000 val_loss:3.8495 svd_entropy: attn_qk:H=0.7650,top10E=0.27,eRank=166.4,q75/q25=62.24 attn_vo:H=0.8341,top10E=0.15,eRank=283.7,q75/q25=52.23 mlp_w1:H=0.9011,top10E=0.15,eRank=401.0,q75/q25=4.72 mlp_w2:H=0.9702,top10E=0.04,eRank=630.2,q75/q25=2.94 vo_prod:H=0.7311,top10E=0.26,eRank=135.9,q75/q25=3050.85 train_time:412775ms step_avg:76.44ms +[2025-09-02 16:38:40] [Rank 0] step:5401/10000 train_time:412789ms step_avg:76.43ms +[2025-09-02 16:38:40] [Rank 0] step:5401/10000 train_time:412789ms step_avg:76.43ms +[2025-09-02 16:38:42] [Rank 0] step:5421/10000 train_time:414334ms step_avg:76.43ms +[2025-09-02 16:38:42] [Rank 0] step:5421/10000 train_time:414334ms step_avg:76.43ms +[2025-09-02 16:38:44] [Rank 0] step:5441/10000 train_time:415941ms step_avg:76.45ms +[2025-09-02 16:38:44] [Rank 0] step:5441/10000 train_time:415941ms step_avg:76.45ms +[2025-09-02 16:38:45] [Rank 0] step:5461/10000 train_time:417557ms step_avg:76.46ms +[2025-09-02 16:38:45] [Rank 0] step:5461/10000 train_time:417557ms step_avg:76.46ms +[2025-09-02 16:38:47] [Rank 0] step:5481/10000 train_time:419169ms step_avg:76.48ms +[2025-09-02 16:38:47] [Rank 0] step:5481/10000 train_time:419169ms step_avg:76.48ms +[2025-09-02 16:38:49] [Rank 0] step:5501/10000 train_time:420787ms step_avg:76.49ms +[2025-09-02 16:38:49] [Rank 0] step:5501/10000 train_time:420787ms step_avg:76.49ms +[2025-09-02 16:38:50] [Rank 0] step:5521/10000 train_time:422403ms step_avg:76.51ms +[2025-09-02 16:38:50] [Rank 0] step:5521/10000 train_time:422403ms step_avg:76.51ms +[2025-09-02 16:38:52] [Rank 0] step:5541/10000 train_time:424016ms step_avg:76.52ms +[2025-09-02 16:38:52] [Rank 0] step:5541/10000 train_time:424016ms step_avg:76.52ms +[2025-09-02 16:38:53] [Rank 0] step:5561/10000 train_time:425632ms step_avg:76.54ms +[2025-09-02 16:38:53] [Rank 0] step:5561/10000 train_time:425632ms step_avg:76.54ms +[2025-09-02 16:38:55] [Rank 0] step:5581/10000 train_time:427243ms step_avg:76.55ms +[2025-09-02 16:38:55] [Rank 0] step:5581/10000 train_time:427243ms step_avg:76.55ms +[2025-09-02 16:38:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:38:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:39:08] [Rank 0] PRINT: step:5600/10000 val_loss:3.8372 svd_entropy: attn_qk:H=0.7664,top10E=0.27,eRank=167.7,q75/q25=61.83 attn_vo:H=0.8352,top10E=0.15,eRank=285.4,q75/q25=51.38 mlp_w1:H=0.9023,top10E=0.15,eRank=404.1,q75/q25=4.69 mlp_w2:H=0.9702,top10E=0.04,eRank=630.2,q75/q25=2.94 vo_prod:H=0.7326,top10E=0.25,eRank=137.2,q75/q25=2987.15 train_time:428944ms step_avg:76.60ms +[2025-09-02 16:39:08] [Rank 0] PRINT: step:5600/10000 val_loss:3.8372 svd_entropy: attn_qk:H=0.7664,top10E=0.27,eRank=167.7,q75/q25=61.83 attn_vo:H=0.8352,top10E=0.15,eRank=285.4,q75/q25=51.38 mlp_w1:H=0.9023,top10E=0.15,eRank=404.1,q75/q25=4.69 mlp_w2:H=0.9702,top10E=0.04,eRank=630.2,q75/q25=2.94 vo_prod:H=0.7326,top10E=0.25,eRank=137.2,q75/q25=2987.15 train_time:428944ms step_avg:76.60ms +[2025-09-02 16:39:09] [Rank 0] step:5601/10000 train_time:428958ms step_avg:76.59ms +[2025-09-02 16:39:09] [Rank 0] step:5601/10000 train_time:428958ms step_avg:76.59ms +[2025-09-02 16:39:10] [Rank 0] step:5621/10000 train_time:430487ms step_avg:76.59ms +[2025-09-02 16:39:10] [Rank 0] step:5621/10000 train_time:430487ms step_avg:76.59ms +[2025-09-02 16:39:12] [Rank 0] step:5641/10000 train_time:432097ms step_avg:76.60ms +[2025-09-02 16:39:12] [Rank 0] step:5641/10000 train_time:432097ms step_avg:76.60ms +[2025-09-02 16:39:13] [Rank 0] step:5661/10000 train_time:433709ms step_avg:76.61ms +[2025-09-02 16:39:13] [Rank 0] step:5661/10000 train_time:433709ms step_avg:76.61ms +[2025-09-02 16:39:15] [Rank 0] step:5681/10000 train_time:435324ms step_avg:76.63ms +[2025-09-02 16:39:15] [Rank 0] step:5681/10000 train_time:435324ms step_avg:76.63ms +[2025-09-02 16:39:17] [Rank 0] step:5701/10000 train_time:436936ms step_avg:76.64ms +[2025-09-02 16:39:17] [Rank 0] step:5701/10000 train_time:436936ms step_avg:76.64ms +[2025-09-02 16:39:18] [Rank 0] step:5721/10000 train_time:438697ms step_avg:76.68ms +[2025-09-02 16:39:18] [Rank 0] step:5721/10000 train_time:438697ms step_avg:76.68ms +[2025-09-02 16:39:20] [Rank 0] step:5741/10000 train_time:440263ms step_avg:76.69ms +[2025-09-02 16:39:20] [Rank 0] step:5741/10000 train_time:440263ms step_avg:76.69ms +[2025-09-02 16:39:22] [Rank 0] step:5761/10000 train_time:441877ms step_avg:76.70ms +[2025-09-02 16:39:22] [Rank 0] step:5761/10000 train_time:441877ms step_avg:76.70ms +[2025-09-02 16:39:23] [Rank 0] step:5781/10000 train_time:443494ms step_avg:76.72ms +[2025-09-02 16:39:23] [Rank 0] step:5781/10000 train_time:443494ms step_avg:76.72ms +[2025-09-02 16:39:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:39:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:39:36] [Rank 0] PRINT: step:5800/10000 val_loss:3.8298 svd_entropy: attn_qk:H=0.7675,top10E=0.26,eRank=168.9,q75/q25=61.66 attn_vo:H=0.8363,top10E=0.15,eRank=287.2,q75/q25=50.72 mlp_w1:H=0.9034,top10E=0.14,eRank=407.1,q75/q25=4.66 mlp_w2:H=0.9702,top10E=0.04,eRank=630.3,q75/q25=2.93 vo_prod:H=0.7340,top10E=0.25,eRank=138.3,q75/q25=2953.48 train_time:445193ms step_avg:76.76ms +[2025-09-02 16:39:36] [Rank 0] PRINT: step:5800/10000 val_loss:3.8298 svd_entropy: attn_qk:H=0.7675,top10E=0.26,eRank=168.9,q75/q25=61.66 attn_vo:H=0.8363,top10E=0.15,eRank=287.2,q75/q25=50.72 mlp_w1:H=0.9034,top10E=0.14,eRank=407.1,q75/q25=4.66 mlp_w2:H=0.9702,top10E=0.04,eRank=630.3,q75/q25=2.93 vo_prod:H=0.7340,top10E=0.25,eRank=138.3,q75/q25=2953.48 train_time:445193ms step_avg:76.76ms +[2025-09-02 16:39:36] [Rank 0] step:5801/10000 train_time:445208ms step_avg:76.75ms +[2025-09-02 16:39:36] [Rank 0] step:5801/10000 train_time:445208ms step_avg:76.75ms +[2025-09-02 16:39:38] [Rank 0] step:5821/10000 train_time:446753ms step_avg:76.75ms +[2025-09-02 16:39:38] [Rank 0] step:5821/10000 train_time:446753ms step_avg:76.75ms +[2025-09-02 16:39:40] [Rank 0] step:5841/10000 train_time:448362ms step_avg:76.76ms +[2025-09-02 16:39:40] [Rank 0] step:5841/10000 train_time:448362ms step_avg:76.76ms +[2025-09-02 16:39:41] [Rank 0] step:5861/10000 train_time:449974ms step_avg:76.77ms +[2025-09-02 16:39:41] [Rank 0] step:5861/10000 train_time:449974ms step_avg:76.77ms +[2025-09-02 16:39:43] [Rank 0] step:5881/10000 train_time:451584ms step_avg:76.79ms +[2025-09-02 16:39:43] [Rank 0] step:5881/10000 train_time:451584ms step_avg:76.79ms +[2025-09-02 16:39:45] [Rank 0] step:5901/10000 train_time:453197ms step_avg:76.80ms +[2025-09-02 16:39:45] [Rank 0] step:5901/10000 train_time:453197ms step_avg:76.80ms +[2025-09-02 16:39:46] [Rank 0] step:5921/10000 train_time:454807ms step_avg:76.81ms +[2025-09-02 16:39:46] [Rank 0] step:5921/10000 train_time:454807ms step_avg:76.81ms +[2025-09-02 16:39:48] [Rank 0] step:5941/10000 train_time:456425ms step_avg:76.83ms +[2025-09-02 16:39:48] [Rank 0] step:5941/10000 train_time:456425ms step_avg:76.83ms +[2025-09-02 16:39:49] [Rank 0] step:5961/10000 train_time:458040ms step_avg:76.84ms +[2025-09-02 16:39:49] [Rank 0] step:5961/10000 train_time:458040ms step_avg:76.84ms +[2025-09-02 16:39:51] [Rank 0] step:5981/10000 train_time:459655ms step_avg:76.85ms +[2025-09-02 16:39:51] [Rank 0] step:5981/10000 train_time:459655ms step_avg:76.85ms +[2025-09-02 16:39:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:39:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:40:04] [Rank 0] PRINT: step:6000/10000 val_loss:3.8047 svd_entropy: attn_qk:H=0.7688,top10E=0.26,eRank=170.2,q75/q25=61.22 attn_vo:H=0.8374,top10E=0.14,eRank=288.9,q75/q25=50.15 mlp_w1:H=0.9045,top10E=0.14,eRank=410.0,q75/q25=4.63 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.93 vo_prod:H=0.7354,top10E=0.25,eRank=139.6,q75/q25=2935.50 train_time:461346ms step_avg:76.89ms +[2025-09-02 16:40:04] [Rank 0] PRINT: step:6000/10000 val_loss:3.8047 svd_entropy: attn_qk:H=0.7688,top10E=0.26,eRank=170.2,q75/q25=61.22 attn_vo:H=0.8374,top10E=0.14,eRank=288.9,q75/q25=50.15 mlp_w1:H=0.9045,top10E=0.14,eRank=410.0,q75/q25=4.63 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.93 vo_prod:H=0.7354,top10E=0.25,eRank=139.6,q75/q25=2935.50 train_time:461346ms step_avg:76.89ms +[2025-09-02 16:40:04] [Rank 0] step:6001/10000 train_time:461360ms step_avg:76.88ms +[2025-09-02 16:40:04] [Rank 0] step:6001/10000 train_time:461360ms step_avg:76.88ms +[2025-09-02 16:40:06] [Rank 0] step:6021/10000 train_time:462917ms step_avg:76.88ms +[2025-09-02 16:40:06] [Rank 0] step:6021/10000 train_time:462917ms step_avg:76.88ms +[2025-09-02 16:40:08] [Rank 0] step:6041/10000 train_time:464529ms step_avg:76.90ms +[2025-09-02 16:40:08] [Rank 0] step:6041/10000 train_time:464529ms step_avg:76.90ms +[2025-09-02 16:40:09] [Rank 0] step:6061/10000 train_time:466147ms step_avg:76.91ms +[2025-09-02 16:40:09] [Rank 0] step:6061/10000 train_time:466147ms step_avg:76.91ms +[2025-09-02 16:40:11] [Rank 0] step:6081/10000 train_time:467757ms step_avg:76.92ms +[2025-09-02 16:40:11] [Rank 0] step:6081/10000 train_time:467757ms step_avg:76.92ms +[2025-09-02 16:40:13] [Rank 0] step:6101/10000 train_time:469376ms step_avg:76.93ms +[2025-09-02 16:40:13] [Rank 0] step:6101/10000 train_time:469376ms step_avg:76.93ms +[2025-09-02 16:40:14] [Rank 0] step:6121/10000 train_time:471256ms step_avg:76.99ms +[2025-09-02 16:40:14] [Rank 0] step:6121/10000 train_time:471256ms step_avg:76.99ms +[2025-09-02 16:40:16] [Rank 0] step:6141/10000 train_time:472882ms step_avg:77.00ms +[2025-09-02 16:40:16] [Rank 0] step:6141/10000 train_time:472882ms step_avg:77.00ms +[2025-09-02 16:40:18] [Rank 0] step:6161/10000 train_time:474499ms step_avg:77.02ms +[2025-09-02 16:40:18] [Rank 0] step:6161/10000 train_time:474499ms step_avg:77.02ms +[2025-09-02 16:40:19] [Rank 0] step:6181/10000 train_time:476111ms step_avg:77.03ms +[2025-09-02 16:40:19] [Rank 0] step:6181/10000 train_time:476111ms step_avg:77.03ms +[2025-09-02 16:40:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:40:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:40:33] [Rank 0] PRINT: step:6200/10000 val_loss:3.7911 svd_entropy: attn_qk:H=0.7698,top10E=0.26,eRank=171.3,q75/q25=60.41 attn_vo:H=0.8383,top10E=0.14,eRank=290.4,q75/q25=49.50 mlp_w1:H=0.9055,top10E=0.14,eRank=412.7,q75/q25=4.60 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.92 vo_prod:H=0.7365,top10E=0.25,eRank=140.7,q75/q25=2919.53 train_time:477810ms step_avg:77.07ms +[2025-09-02 16:40:33] [Rank 0] PRINT: step:6200/10000 val_loss:3.7911 svd_entropy: attn_qk:H=0.7698,top10E=0.26,eRank=171.3,q75/q25=60.41 attn_vo:H=0.8383,top10E=0.14,eRank=290.4,q75/q25=49.50 mlp_w1:H=0.9055,top10E=0.14,eRank=412.7,q75/q25=4.60 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.92 vo_prod:H=0.7365,top10E=0.25,eRank=140.7,q75/q25=2919.53 train_time:477810ms step_avg:77.07ms +[2025-09-02 16:40:33] [Rank 0] step:6201/10000 train_time:477824ms step_avg:77.06ms +[2025-09-02 16:40:33] [Rank 0] step:6201/10000 train_time:477824ms step_avg:77.06ms +[2025-09-02 16:40:34] [Rank 0] step:6221/10000 train_time:479377ms step_avg:77.06ms +[2025-09-02 16:40:34] [Rank 0] step:6221/10000 train_time:479377ms step_avg:77.06ms +[2025-09-02 16:40:36] [Rank 0] step:6241/10000 train_time:480991ms step_avg:77.07ms +[2025-09-02 16:40:36] [Rank 0] step:6241/10000 train_time:480991ms step_avg:77.07ms +[2025-09-02 16:40:38] [Rank 0] step:6261/10000 train_time:482609ms step_avg:77.08ms +[2025-09-02 16:40:38] [Rank 0] step:6261/10000 train_time:482609ms step_avg:77.08ms +[2025-09-02 16:40:39] [Rank 0] step:6281/10000 train_time:484226ms step_avg:77.09ms +[2025-09-02 16:40:39] [Rank 0] step:6281/10000 train_time:484226ms step_avg:77.09ms +[2025-09-02 16:40:41] [Rank 0] step:6301/10000 train_time:485839ms step_avg:77.11ms +[2025-09-02 16:40:41] [Rank 0] step:6301/10000 train_time:485839ms step_avg:77.11ms +[2025-09-02 16:40:42] [Rank 0] step:6321/10000 train_time:487455ms step_avg:77.12ms +[2025-09-02 16:40:42] [Rank 0] step:6321/10000 train_time:487455ms step_avg:77.12ms +[2025-09-02 16:40:44] [Rank 0] step:6341/10000 train_time:489076ms step_avg:77.13ms +[2025-09-02 16:40:44] [Rank 0] step:6341/10000 train_time:489076ms step_avg:77.13ms +[2025-09-02 16:40:46] [Rank 0] step:6361/10000 train_time:490699ms step_avg:77.14ms +[2025-09-02 16:40:46] [Rank 0] step:6361/10000 train_time:490699ms step_avg:77.14ms +[2025-09-02 16:40:47] [Rank 0] step:6381/10000 train_time:492321ms step_avg:77.15ms +[2025-09-02 16:40:47] [Rank 0] step:6381/10000 train_time:492321ms step_avg:77.15ms +[2025-09-02 16:40:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:40:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:41:01] [Rank 0] PRINT: step:6400/10000 val_loss:3.7765 svd_entropy: attn_qk:H=0.7708,top10E=0.26,eRank=172.3,q75/q25=60.34 attn_vo:H=0.8392,top10E=0.14,eRank=291.7,q75/q25=49.09 mlp_w1:H=0.9064,top10E=0.14,eRank=415.1,q75/q25=4.57 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.92 vo_prod:H=0.7376,top10E=0.25,eRank=141.6,q75/q25=2863.69 train_time:494020ms step_avg:77.19ms +[2025-09-02 16:41:01] [Rank 0] PRINT: step:6400/10000 val_loss:3.7765 svd_entropy: attn_qk:H=0.7708,top10E=0.26,eRank=172.3,q75/q25=60.34 attn_vo:H=0.8392,top10E=0.14,eRank=291.7,q75/q25=49.09 mlp_w1:H=0.9064,top10E=0.14,eRank=415.1,q75/q25=4.57 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.92 vo_prod:H=0.7376,top10E=0.25,eRank=141.6,q75/q25=2863.69 train_time:494020ms step_avg:77.19ms +[2025-09-02 16:41:01] [Rank 0] step:6401/10000 train_time:494035ms step_avg:77.18ms +[2025-09-02 16:41:01] [Rank 0] step:6401/10000 train_time:494035ms step_avg:77.18ms +[2025-09-02 16:41:02] [Rank 0] step:6421/10000 train_time:495585ms step_avg:77.18ms +[2025-09-02 16:41:02] [Rank 0] step:6421/10000 train_time:495585ms step_avg:77.18ms +[2025-09-02 16:41:04] [Rank 0] step:6441/10000 train_time:497199ms step_avg:77.19ms +[2025-09-02 16:41:04] [Rank 0] step:6441/10000 train_time:497199ms step_avg:77.19ms +[2025-09-02 16:41:06] [Rank 0] step:6461/10000 train_time:498817ms step_avg:77.20ms +[2025-09-02 16:41:06] [Rank 0] step:6461/10000 train_time:498817ms step_avg:77.20ms +[2025-09-02 16:41:07] [Rank 0] step:6481/10000 train_time:500440ms step_avg:77.22ms +[2025-09-02 16:41:07] [Rank 0] step:6481/10000 train_time:500440ms step_avg:77.22ms +[2025-09-02 16:41:09] [Rank 0] step:6501/10000 train_time:502056ms step_avg:77.23ms +[2025-09-02 16:41:09] [Rank 0] step:6501/10000 train_time:502056ms step_avg:77.23ms +[2025-09-02 16:41:11] [Rank 0] step:6521/10000 train_time:503668ms step_avg:77.24ms +[2025-09-02 16:41:11] [Rank 0] step:6521/10000 train_time:503668ms step_avg:77.24ms +[2025-09-02 16:41:12] [Rank 0] step:6541/10000 train_time:505288ms step_avg:77.25ms +[2025-09-02 16:41:12] [Rank 0] step:6541/10000 train_time:505288ms step_avg:77.25ms +[2025-09-02 16:41:14] [Rank 0] step:6561/10000 train_time:506912ms step_avg:77.26ms +[2025-09-02 16:41:14] [Rank 0] step:6561/10000 train_time:506912ms step_avg:77.26ms +[2025-09-02 16:41:15] [Rank 0] step:6581/10000 train_time:508533ms step_avg:77.27ms +[2025-09-02 16:41:15] [Rank 0] step:6581/10000 train_time:508533ms step_avg:77.27ms +[2025-09-02 16:41:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:41:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:41:29] [Rank 0] PRINT: step:6600/10000 val_loss:3.7631 svd_entropy: attn_qk:H=0.7717,top10E=0.26,eRank=173.2,q75/q25=60.15 attn_vo:H=0.8400,top10E=0.14,eRank=293.1,q75/q25=48.43 mlp_w1:H=0.9072,top10E=0.14,eRank=417.2,q75/q25=4.54 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.92 vo_prod:H=0.7388,top10E=0.25,eRank=142.6,q75/q25=2871.31 train_time:510238ms step_avg:77.31ms +[2025-09-02 16:41:29] [Rank 0] PRINT: step:6600/10000 val_loss:3.7631 svd_entropy: attn_qk:H=0.7717,top10E=0.26,eRank=173.2,q75/q25=60.15 attn_vo:H=0.8400,top10E=0.14,eRank=293.1,q75/q25=48.43 mlp_w1:H=0.9072,top10E=0.14,eRank=417.2,q75/q25=4.54 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.92 vo_prod:H=0.7388,top10E=0.25,eRank=142.6,q75/q25=2871.31 train_time:510238ms step_avg:77.31ms +[2025-09-02 16:41:29] [Rank 0] step:6601/10000 train_time:510252ms step_avg:77.30ms +[2025-09-02 16:41:29] [Rank 0] step:6601/10000 train_time:510252ms step_avg:77.30ms +[2025-09-02 16:41:30] [Rank 0] step:6621/10000 train_time:511803ms step_avg:77.30ms +[2025-09-02 16:41:30] [Rank 0] step:6621/10000 train_time:511803ms step_avg:77.30ms +[2025-09-02 16:41:32] [Rank 0] step:6641/10000 train_time:513430ms step_avg:77.31ms +[2025-09-02 16:41:32] [Rank 0] step:6641/10000 train_time:513430ms step_avg:77.31ms +[2025-09-02 16:41:34] [Rank 0] step:6661/10000 train_time:515053ms step_avg:77.32ms +[2025-09-02 16:41:34] [Rank 0] step:6661/10000 train_time:515053ms step_avg:77.32ms +[2025-09-02 16:41:35] [Rank 0] step:6681/10000 train_time:516687ms step_avg:77.34ms +[2025-09-02 16:41:35] [Rank 0] step:6681/10000 train_time:516687ms step_avg:77.34ms +[2025-09-02 16:41:37] [Rank 0] step:6701/10000 train_time:518343ms step_avg:77.35ms +[2025-09-02 16:41:37] [Rank 0] step:6701/10000 train_time:518343ms step_avg:77.35ms +[2025-09-02 16:41:39] [Rank 0] step:6721/10000 train_time:519990ms step_avg:77.37ms +[2025-09-02 16:41:39] [Rank 0] step:6721/10000 train_time:519990ms step_avg:77.37ms +[2025-09-02 16:41:40] [Rank 0] step:6741/10000 train_time:521638ms step_avg:77.38ms +[2025-09-02 16:41:40] [Rank 0] step:6741/10000 train_time:521638ms step_avg:77.38ms +[2025-09-02 16:41:42] [Rank 0] step:6761/10000 train_time:523284ms step_avg:77.40ms +[2025-09-02 16:41:42] [Rank 0] step:6761/10000 train_time:523284ms step_avg:77.40ms +[2025-09-02 16:41:44] [Rank 0] step:6781/10000 train_time:524936ms step_avg:77.41ms +[2025-09-02 16:41:44] [Rank 0] step:6781/10000 train_time:524936ms step_avg:77.41ms +[2025-09-02 16:41:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:41:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:41:57] [Rank 0] PRINT: step:6800/10000 val_loss:3.7482 svd_entropy: attn_qk:H=0.7723,top10E=0.26,eRank=173.9,q75/q25=59.79 attn_vo:H=0.8408,top10E=0.14,eRank=294.2,q75/q25=48.18 mlp_w1:H=0.9079,top10E=0.14,eRank=419.2,q75/q25=4.51 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.92 vo_prod:H=0.7397,top10E=0.25,eRank=143.4,q75/q25=2890.63 train_time:526675ms step_avg:77.45ms +[2025-09-02 16:41:57] [Rank 0] PRINT: step:6800/10000 val_loss:3.7482 svd_entropy: attn_qk:H=0.7723,top10E=0.26,eRank=173.9,q75/q25=59.79 attn_vo:H=0.8408,top10E=0.14,eRank=294.2,q75/q25=48.18 mlp_w1:H=0.9079,top10E=0.14,eRank=419.2,q75/q25=4.51 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.92 vo_prod:H=0.7397,top10E=0.25,eRank=143.4,q75/q25=2890.63 train_time:526675ms step_avg:77.45ms +[2025-09-02 16:41:57] [Rank 0] step:6801/10000 train_time:526690ms step_avg:77.44ms +[2025-09-02 16:41:57] [Rank 0] step:6801/10000 train_time:526690ms step_avg:77.44ms +[2025-09-02 16:41:59] [Rank 0] step:6821/10000 train_time:528258ms step_avg:77.45ms +[2025-09-02 16:41:59] [Rank 0] step:6821/10000 train_time:528258ms step_avg:77.45ms +[2025-09-02 16:42:00] [Rank 0] step:6841/10000 train_time:529900ms step_avg:77.46ms +[2025-09-02 16:42:00] [Rank 0] step:6841/10000 train_time:529900ms step_avg:77.46ms +[2025-09-02 16:42:02] [Rank 0] step:6861/10000 train_time:531549ms step_avg:77.47ms +[2025-09-02 16:42:02] [Rank 0] step:6861/10000 train_time:531549ms step_avg:77.47ms +[2025-09-02 16:42:04] [Rank 0] step:6881/10000 train_time:533196ms step_avg:77.49ms +[2025-09-02 16:42:04] [Rank 0] step:6881/10000 train_time:533196ms step_avg:77.49ms +[2025-09-02 16:42:05] [Rank 0] step:6901/10000 train_time:534844ms step_avg:77.50ms +[2025-09-02 16:42:05] [Rank 0] step:6901/10000 train_time:534844ms step_avg:77.50ms +[2025-09-02 16:42:07] [Rank 0] step:6921/10000 train_time:536493ms step_avg:77.52ms +[2025-09-02 16:42:07] [Rank 0] step:6921/10000 train_time:536493ms step_avg:77.52ms +[2025-09-02 16:42:09] [Rank 0] step:6941/10000 train_time:538145ms step_avg:77.53ms +[2025-09-02 16:42:09] [Rank 0] step:6941/10000 train_time:538145ms step_avg:77.53ms +[2025-09-02 16:42:10] [Rank 0] step:6961/10000 train_time:539809ms step_avg:77.55ms +[2025-09-02 16:42:10] [Rank 0] step:6961/10000 train_time:539809ms step_avg:77.55ms +[2025-09-02 16:42:12] [Rank 0] step:6981/10000 train_time:541462ms step_avg:77.56ms +[2025-09-02 16:42:12] [Rank 0] step:6981/10000 train_time:541462ms step_avg:77.56ms +[2025-09-02 16:42:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:42:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:42:25] [Rank 0] PRINT: step:7000/10000 val_loss:3.7324 svd_entropy: attn_qk:H=0.7731,top10E=0.26,eRank=174.7,q75/q25=59.48 attn_vo:H=0.8415,top10E=0.14,eRank=295.4,q75/q25=47.82 mlp_w1:H=0.9086,top10E=0.14,eRank=421.1,q75/q25=4.49 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.92 vo_prod:H=0.7408,top10E=0.25,eRank=144.4,q75/q25=2797.64 train_time:543201ms step_avg:77.60ms +[2025-09-02 16:42:25] [Rank 0] PRINT: step:7000/10000 val_loss:3.7324 svd_entropy: attn_qk:H=0.7731,top10E=0.26,eRank=174.7,q75/q25=59.48 attn_vo:H=0.8415,top10E=0.14,eRank=295.4,q75/q25=47.82 mlp_w1:H=0.9086,top10E=0.14,eRank=421.1,q75/q25=4.49 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.92 vo_prod:H=0.7408,top10E=0.25,eRank=144.4,q75/q25=2797.64 train_time:543201ms step_avg:77.60ms +[2025-09-02 16:42:25] [Rank 0] step:7001/10000 train_time:543215ms step_avg:77.59ms +[2025-09-02 16:42:25] [Rank 0] step:7001/10000 train_time:543215ms step_avg:77.59ms +[2025-09-02 16:42:27] [Rank 0] step:7021/10000 train_time:544775ms step_avg:77.59ms +[2025-09-02 16:42:27] [Rank 0] step:7021/10000 train_time:544775ms step_avg:77.59ms +[2025-09-02 16:42:28] [Rank 0] step:7041/10000 train_time:546426ms step_avg:77.61ms +[2025-09-02 16:42:28] [Rank 0] step:7041/10000 train_time:546426ms step_avg:77.61ms +[2025-09-02 16:42:30] [Rank 0] step:7061/10000 train_time:548116ms step_avg:77.63ms +[2025-09-02 16:42:30] [Rank 0] step:7061/10000 train_time:548116ms step_avg:77.63ms +[2025-09-02 16:42:32] [Rank 0] step:7081/10000 train_time:549819ms step_avg:77.65ms +[2025-09-02 16:42:32] [Rank 0] step:7081/10000 train_time:549819ms step_avg:77.65ms +[2025-09-02 16:42:33] [Rank 0] step:7101/10000 train_time:551468ms step_avg:77.66ms +[2025-09-02 16:42:33] [Rank 0] step:7101/10000 train_time:551468ms step_avg:77.66ms +[2025-09-02 16:42:35] [Rank 0] step:7121/10000 train_time:553113ms step_avg:77.67ms +[2025-09-02 16:42:35] [Rank 0] step:7121/10000 train_time:553113ms step_avg:77.67ms +[2025-09-02 16:42:37] [Rank 0] step:7141/10000 train_time:554760ms step_avg:77.69ms +[2025-09-02 16:42:37] [Rank 0] step:7141/10000 train_time:554760ms step_avg:77.69ms +[2025-09-02 16:42:38] [Rank 0] step:7161/10000 train_time:556408ms step_avg:77.70ms +[2025-09-02 16:42:38] [Rank 0] step:7161/10000 train_time:556408ms step_avg:77.70ms +[2025-09-02 16:42:40] [Rank 0] step:7181/10000 train_time:558058ms step_avg:77.71ms +[2025-09-02 16:42:40] [Rank 0] step:7181/10000 train_time:558058ms step_avg:77.71ms +[2025-09-02 16:42:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:42:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:42:53] [Rank 0] PRINT: step:7200/10000 val_loss:3.7232 svd_entropy: attn_qk:H=0.7738,top10E=0.26,eRank=175.4,q75/q25=59.28 attn_vo:H=0.8422,top10E=0.14,eRank=296.5,q75/q25=47.14 mlp_w1:H=0.9092,top10E=0.14,eRank=422.7,q75/q25=4.47 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.92 vo_prod:H=0.7417,top10E=0.24,eRank=145.4,q75/q25=2809.93 train_time:559795ms step_avg:77.75ms +[2025-09-02 16:42:53] [Rank 0] PRINT: step:7200/10000 val_loss:3.7232 svd_entropy: attn_qk:H=0.7738,top10E=0.26,eRank=175.4,q75/q25=59.28 attn_vo:H=0.8422,top10E=0.14,eRank=296.5,q75/q25=47.14 mlp_w1:H=0.9092,top10E=0.14,eRank=422.7,q75/q25=4.47 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.92 vo_prod:H=0.7417,top10E=0.24,eRank=145.4,q75/q25=2809.93 train_time:559795ms step_avg:77.75ms +[2025-09-02 16:42:54] [Rank 0] step:7201/10000 train_time:559809ms step_avg:77.74ms +[2025-09-02 16:42:54] [Rank 0] step:7201/10000 train_time:559809ms step_avg:77.74ms +[2025-09-02 16:42:55] [Rank 0] step:7221/10000 train_time:561385ms step_avg:77.74ms +[2025-09-02 16:42:55] [Rank 0] step:7221/10000 train_time:561385ms step_avg:77.74ms +[2025-09-02 16:42:57] [Rank 0] step:7241/10000 train_time:563025ms step_avg:77.76ms +[2025-09-02 16:42:57] [Rank 0] step:7241/10000 train_time:563025ms step_avg:77.76ms +[2025-09-02 16:42:58] [Rank 0] step:7261/10000 train_time:564664ms step_avg:77.77ms +[2025-09-02 16:42:58] [Rank 0] step:7261/10000 train_time:564664ms step_avg:77.77ms +[2025-09-02 16:43:00] [Rank 0] step:7281/10000 train_time:566317ms step_avg:77.78ms +[2025-09-02 16:43:00] [Rank 0] step:7281/10000 train_time:566317ms step_avg:77.78ms +[2025-09-02 16:43:02] [Rank 0] step:7301/10000 train_time:567964ms step_avg:77.79ms +[2025-09-02 16:43:02] [Rank 0] step:7301/10000 train_time:567964ms step_avg:77.79ms +[2025-09-02 16:43:03] [Rank 0] step:7321/10000 train_time:569621ms step_avg:77.81ms +[2025-09-02 16:43:03] [Rank 0] step:7321/10000 train_time:569621ms step_avg:77.81ms +[2025-09-02 16:43:05] [Rank 0] step:7341/10000 train_time:571270ms step_avg:77.82ms +[2025-09-02 16:43:05] [Rank 0] step:7341/10000 train_time:571270ms step_avg:77.82ms +[2025-09-02 16:43:07] [Rank 0] step:7361/10000 train_time:572926ms step_avg:77.83ms +[2025-09-02 16:43:07] [Rank 0] step:7361/10000 train_time:572926ms step_avg:77.83ms +[2025-09-02 16:43:08] [Rank 0] step:7381/10000 train_time:574579ms step_avg:77.85ms +[2025-09-02 16:43:08] [Rank 0] step:7381/10000 train_time:574579ms step_avg:77.85ms +[2025-09-02 16:43:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:43:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:43:22] [Rank 0] PRINT: step:7400/10000 val_loss:3.7045 svd_entropy: attn_qk:H=0.7743,top10E=0.26,eRank=175.9,q75/q25=58.73 attn_vo:H=0.8427,top10E=0.14,eRank=297.4,q75/q25=46.97 mlp_w1:H=0.9098,top10E=0.14,eRank=424.2,q75/q25=4.45 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.91 vo_prod:H=0.7426,top10E=0.24,eRank=146.2,q75/q25=2798.32 train_time:576299ms step_avg:77.88ms +[2025-09-02 16:43:22] [Rank 0] PRINT: step:7400/10000 val_loss:3.7045 svd_entropy: attn_qk:H=0.7743,top10E=0.26,eRank=175.9,q75/q25=58.73 attn_vo:H=0.8427,top10E=0.14,eRank=297.4,q75/q25=46.97 mlp_w1:H=0.9098,top10E=0.14,eRank=424.2,q75/q25=4.45 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.91 vo_prod:H=0.7426,top10E=0.24,eRank=146.2,q75/q25=2798.32 train_time:576299ms step_avg:77.88ms +[2025-09-02 16:43:22] [Rank 0] step:7401/10000 train_time:576313ms step_avg:77.87ms +[2025-09-02 16:43:22] [Rank 0] step:7401/10000 train_time:576313ms step_avg:77.87ms +[2025-09-02 16:43:23] [Rank 0] step:7421/10000 train_time:577880ms step_avg:77.87ms +[2025-09-02 16:43:23] [Rank 0] step:7421/10000 train_time:577880ms step_avg:77.87ms +[2025-09-02 16:43:25] [Rank 0] step:7441/10000 train_time:579525ms step_avg:77.88ms +[2025-09-02 16:43:25] [Rank 0] step:7441/10000 train_time:579525ms step_avg:77.88ms +[2025-09-02 16:43:27] [Rank 0] step:7461/10000 train_time:581172ms step_avg:77.89ms +[2025-09-02 16:43:27] [Rank 0] step:7461/10000 train_time:581172ms step_avg:77.89ms +[2025-09-02 16:43:28] [Rank 0] step:7481/10000 train_time:582828ms step_avg:77.91ms +[2025-09-02 16:43:28] [Rank 0] step:7481/10000 train_time:582828ms step_avg:77.91ms +[2025-09-02 16:43:30] [Rank 0] step:7501/10000 train_time:584480ms step_avg:77.92ms +[2025-09-02 16:43:30] [Rank 0] step:7501/10000 train_time:584480ms step_avg:77.92ms +[2025-09-02 16:43:32] [Rank 0] step:7521/10000 train_time:586171ms step_avg:77.94ms +[2025-09-02 16:43:32] [Rank 0] step:7521/10000 train_time:586171ms step_avg:77.94ms +[2025-09-02 16:43:33] [Rank 0] step:7541/10000 train_time:587832ms step_avg:77.95ms +[2025-09-02 16:43:33] [Rank 0] step:7541/10000 train_time:587832ms step_avg:77.95ms +[2025-09-02 16:43:35] [Rank 0] step:7561/10000 train_time:589473ms step_avg:77.96ms +[2025-09-02 16:43:35] [Rank 0] step:7561/10000 train_time:589473ms step_avg:77.96ms +[2025-09-02 16:43:37] [Rank 0] step:7581/10000 train_time:591135ms step_avg:77.98ms +[2025-09-02 16:43:37] [Rank 0] step:7581/10000 train_time:591135ms step_avg:77.98ms +[2025-09-02 16:43:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:43:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:43:50] [Rank 0] PRINT: step:7600/10000 val_loss:3.6977 svd_entropy: attn_qk:H=0.7749,top10E=0.26,eRank=176.6,q75/q25=58.45 attn_vo:H=0.8433,top10E=0.14,eRank=298.3,q75/q25=46.44 mlp_w1:H=0.9103,top10E=0.14,eRank=425.5,q75/q25=4.42 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.92 vo_prod:H=0.7435,top10E=0.24,eRank=147.0,q75/q25=2796.16 train_time:592888ms step_avg:78.01ms +[2025-09-02 16:43:50] [Rank 0] PRINT: step:7600/10000 val_loss:3.6977 svd_entropy: attn_qk:H=0.7749,top10E=0.26,eRank=176.6,q75/q25=58.45 attn_vo:H=0.8433,top10E=0.14,eRank=298.3,q75/q25=46.44 mlp_w1:H=0.9103,top10E=0.14,eRank=425.5,q75/q25=4.42 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.92 vo_prod:H=0.7435,top10E=0.24,eRank=147.0,q75/q25=2796.16 train_time:592888ms step_avg:78.01ms +[2025-09-02 16:43:50] [Rank 0] step:7601/10000 train_time:592903ms step_avg:78.00ms +[2025-09-02 16:43:50] [Rank 0] step:7601/10000 train_time:592903ms step_avg:78.00ms +[2025-09-02 16:43:52] [Rank 0] step:7621/10000 train_time:594467ms step_avg:78.00ms +[2025-09-02 16:43:52] [Rank 0] step:7621/10000 train_time:594467ms step_avg:78.00ms +[2025-09-02 16:43:54] [Rank 0] step:7641/10000 train_time:596120ms step_avg:78.02ms +[2025-09-02 16:43:54] [Rank 0] step:7641/10000 train_time:596120ms step_avg:78.02ms +[2025-09-02 16:43:55] [Rank 0] step:7661/10000 train_time:597774ms step_avg:78.03ms +[2025-09-02 16:43:55] [Rank 0] step:7661/10000 train_time:597774ms step_avg:78.03ms +[2025-09-02 16:43:57] [Rank 0] step:7681/10000 train_time:599420ms step_avg:78.04ms +[2025-09-02 16:43:57] [Rank 0] step:7681/10000 train_time:599420ms step_avg:78.04ms +[2025-09-02 16:43:59] [Rank 0] step:7701/10000 train_time:601067ms step_avg:78.05ms +[2025-09-02 16:43:59] [Rank 0] step:7701/10000 train_time:601067ms step_avg:78.05ms +[2025-09-02 16:44:00] [Rank 0] step:7721/10000 train_time:602727ms step_avg:78.06ms +[2025-09-02 16:44:00] [Rank 0] step:7721/10000 train_time:602727ms step_avg:78.06ms +[2025-09-02 16:44:02] [Rank 0] step:7741/10000 train_time:604379ms step_avg:78.08ms +[2025-09-02 16:44:02] [Rank 0] step:7741/10000 train_time:604379ms step_avg:78.08ms +[2025-09-02 16:44:03] [Rank 0] step:7761/10000 train_time:606042ms step_avg:78.09ms +[2025-09-02 16:44:03] [Rank 0] step:7761/10000 train_time:606042ms step_avg:78.09ms +[2025-09-02 16:44:05] [Rank 0] step:7781/10000 train_time:607701ms step_avg:78.10ms +[2025-09-02 16:44:05] [Rank 0] step:7781/10000 train_time:607701ms step_avg:78.10ms +[2025-09-02 16:44:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:44:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:44:18] [Rank 0] PRINT: step:7800/10000 val_loss:3.6857 svd_entropy: attn_qk:H=0.7753,top10E=0.26,eRank=177.1,q75/q25=58.32 attn_vo:H=0.8438,top10E=0.14,eRank=299.1,q75/q25=46.11 mlp_w1:H=0.9107,top10E=0.14,eRank=426.8,q75/q25=4.40 mlp_w2:H=0.9703,top10E=0.04,eRank=630.5,q75/q25=2.92 vo_prod:H=0.7443,top10E=0.24,eRank=147.8,q75/q25=2760.36 train_time:609452ms step_avg:78.13ms +[2025-09-02 16:44:18] [Rank 0] PRINT: step:7800/10000 val_loss:3.6857 svd_entropy: attn_qk:H=0.7753,top10E=0.26,eRank=177.1,q75/q25=58.32 attn_vo:H=0.8438,top10E=0.14,eRank=299.1,q75/q25=46.11 mlp_w1:H=0.9107,top10E=0.14,eRank=426.8,q75/q25=4.40 mlp_w2:H=0.9703,top10E=0.04,eRank=630.5,q75/q25=2.92 vo_prod:H=0.7443,top10E=0.24,eRank=147.8,q75/q25=2760.36 train_time:609452ms step_avg:78.13ms +[2025-09-02 16:44:19] [Rank 0] step:7801/10000 train_time:609466ms step_avg:78.13ms +[2025-09-02 16:44:19] [Rank 0] step:7801/10000 train_time:609466ms step_avg:78.13ms +[2025-09-02 16:44:20] [Rank 0] step:7821/10000 train_time:611033ms step_avg:78.13ms +[2025-09-02 16:44:20] [Rank 0] step:7821/10000 train_time:611033ms step_avg:78.13ms +[2025-09-02 16:44:22] [Rank 0] step:7841/10000 train_time:612679ms step_avg:78.14ms +[2025-09-02 16:44:22] [Rank 0] step:7841/10000 train_time:612679ms step_avg:78.14ms +[2025-09-02 16:44:24] [Rank 0] step:7861/10000 train_time:614341ms step_avg:78.15ms +[2025-09-02 16:44:24] [Rank 0] step:7861/10000 train_time:614341ms step_avg:78.15ms +[2025-09-02 16:44:25] [Rank 0] step:7881/10000 train_time:616002ms step_avg:78.16ms +[2025-09-02 16:44:25] [Rank 0] step:7881/10000 train_time:616002ms step_avg:78.16ms +[2025-09-02 16:44:27] [Rank 0] step:7901/10000 train_time:617655ms step_avg:78.17ms +[2025-09-02 16:44:27] [Rank 0] step:7901/10000 train_time:617655ms step_avg:78.17ms +[2025-09-02 16:44:28] [Rank 0] step:7921/10000 train_time:619313ms step_avg:78.19ms +[2025-09-02 16:44:28] [Rank 0] step:7921/10000 train_time:619313ms step_avg:78.19ms +[2025-09-02 16:44:30] [Rank 0] step:7941/10000 train_time:620978ms step_avg:78.20ms +[2025-09-02 16:44:30] [Rank 0] step:7941/10000 train_time:620978ms step_avg:78.20ms +[2025-09-02 16:44:32] [Rank 0] step:7961/10000 train_time:622639ms step_avg:78.21ms +[2025-09-02 16:44:32] [Rank 0] step:7961/10000 train_time:622639ms step_avg:78.21ms +[2025-09-02 16:44:33] [Rank 0] step:7981/10000 train_time:624288ms step_avg:78.22ms +[2025-09-02 16:44:33] [Rank 0] step:7981/10000 train_time:624288ms step_avg:78.22ms +[2025-09-02 16:44:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:44:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:44:47] [Rank 0] PRINT: step:8000/10000 val_loss:3.6703 svd_entropy: attn_qk:H=0.7758,top10E=0.25,eRank=177.5,q75/q25=57.97 attn_vo:H=0.8443,top10E=0.14,eRank=299.9,q75/q25=45.77 mlp_w1:H=0.9112,top10E=0.14,eRank=428.0,q75/q25=4.39 mlp_w2:H=0.9703,top10E=0.04,eRank=630.5,q75/q25=2.92 vo_prod:H=0.7451,top10E=0.24,eRank=148.6,q75/q25=2771.34 train_time:626115ms step_avg:78.26ms +[2025-09-02 16:44:47] [Rank 0] PRINT: step:8000/10000 val_loss:3.6703 svd_entropy: attn_qk:H=0.7758,top10E=0.25,eRank=177.5,q75/q25=57.97 attn_vo:H=0.8443,top10E=0.14,eRank=299.9,q75/q25=45.77 mlp_w1:H=0.9112,top10E=0.14,eRank=428.0,q75/q25=4.39 mlp_w2:H=0.9703,top10E=0.04,eRank=630.5,q75/q25=2.92 vo_prod:H=0.7451,top10E=0.24,eRank=148.6,q75/q25=2771.34 train_time:626115ms step_avg:78.26ms +[2025-09-02 16:44:47] [Rank 0] step:8001/10000 train_time:626129ms step_avg:78.26ms +[2025-09-02 16:44:47] [Rank 0] step:8001/10000 train_time:626129ms step_avg:78.26ms +[2025-09-02 16:44:49] [Rank 0] step:8021/10000 train_time:627717ms step_avg:78.26ms +[2025-09-02 16:44:49] [Rank 0] step:8021/10000 train_time:627717ms step_avg:78.26ms +[2025-09-02 16:44:50] [Rank 0] step:8041/10000 train_time:629376ms step_avg:78.27ms +[2025-09-02 16:44:50] [Rank 0] step:8041/10000 train_time:629376ms step_avg:78.27ms +[2025-09-02 16:44:52] [Rank 0] step:8061/10000 train_time:631029ms step_avg:78.28ms +[2025-09-02 16:44:52] [Rank 0] step:8061/10000 train_time:631029ms step_avg:78.28ms +[2025-09-02 16:44:54] [Rank 0] step:8081/10000 train_time:632676ms step_avg:78.29ms +[2025-09-02 16:44:54] [Rank 0] step:8081/10000 train_time:632676ms step_avg:78.29ms +[2025-09-02 16:44:55] [Rank 0] step:8101/10000 train_time:634335ms step_avg:78.30ms +[2025-09-02 16:44:55] [Rank 0] step:8101/10000 train_time:634335ms step_avg:78.30ms +[2025-09-02 16:44:57] [Rank 0] step:8121/10000 train_time:635987ms step_avg:78.31ms +[2025-09-02 16:44:57] [Rank 0] step:8121/10000 train_time:635987ms step_avg:78.31ms +[2025-09-02 16:44:59] [Rank 0] step:8141/10000 train_time:637820ms step_avg:78.35ms +[2025-09-02 16:44:59] [Rank 0] step:8141/10000 train_time:637820ms step_avg:78.35ms +[2025-09-02 16:45:01] [Rank 0] step:8161/10000 train_time:639487ms step_avg:78.36ms +[2025-09-02 16:45:01] [Rank 0] step:8161/10000 train_time:639487ms step_avg:78.36ms +[2025-09-02 16:45:02] [Rank 0] step:8181/10000 train_time:641167ms step_avg:78.37ms +[2025-09-02 16:45:02] [Rank 0] step:8181/10000 train_time:641167ms step_avg:78.37ms +[2025-09-02 16:45:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:45:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:45:16] [Rank 0] PRINT: step:8200/10000 val_loss:3.6602 svd_entropy: attn_qk:H=0.7761,top10E=0.25,eRank=177.9,q75/q25=57.85 attn_vo:H=0.8447,top10E=0.14,eRank=300.6,q75/q25=45.39 mlp_w1:H=0.9115,top10E=0.14,eRank=429.0,q75/q25=4.37 mlp_w2:H=0.9703,top10E=0.04,eRank=630.5,q75/q25=2.92 vo_prod:H=0.7458,top10E=0.24,eRank=149.3,q75/q25=2757.42 train_time:642959ms step_avg:78.41ms +[2025-09-02 16:45:16] [Rank 0] PRINT: step:8200/10000 val_loss:3.6602 svd_entropy: attn_qk:H=0.7761,top10E=0.25,eRank=177.9,q75/q25=57.85 attn_vo:H=0.8447,top10E=0.14,eRank=300.6,q75/q25=45.39 mlp_w1:H=0.9115,top10E=0.14,eRank=429.0,q75/q25=4.37 mlp_w2:H=0.9703,top10E=0.04,eRank=630.5,q75/q25=2.92 vo_prod:H=0.7458,top10E=0.24,eRank=149.3,q75/q25=2757.42 train_time:642959ms step_avg:78.41ms +[2025-09-02 16:45:16] [Rank 0] step:8201/10000 train_time:642973ms step_avg:78.40ms +[2025-09-02 16:45:16] [Rank 0] step:8201/10000 train_time:642973ms step_avg:78.40ms +[2025-09-02 16:45:17] [Rank 0] step:8221/10000 train_time:644603ms step_avg:78.41ms +[2025-09-02 16:45:17] [Rank 0] step:8221/10000 train_time:644603ms step_avg:78.41ms +[2025-09-02 16:45:19] [Rank 0] step:8241/10000 train_time:646289ms step_avg:78.42ms +[2025-09-02 16:45:19] [Rank 0] step:8241/10000 train_time:646289ms step_avg:78.42ms +[2025-09-02 16:45:21] [Rank 0] step:8261/10000 train_time:647973ms step_avg:78.44ms +[2025-09-02 16:45:21] [Rank 0] step:8261/10000 train_time:647973ms step_avg:78.44ms +[2025-09-02 16:45:22] [Rank 0] step:8281/10000 train_time:649651ms step_avg:78.45ms +[2025-09-02 16:45:22] [Rank 0] step:8281/10000 train_time:649651ms step_avg:78.45ms +[2025-09-02 16:45:24] [Rank 0] step:8301/10000 train_time:651331ms step_avg:78.46ms +[2025-09-02 16:45:24] [Rank 0] step:8301/10000 train_time:651331ms step_avg:78.46ms +[2025-09-02 16:45:26] [Rank 0] step:8321/10000 train_time:653002ms step_avg:78.48ms +[2025-09-02 16:45:26] [Rank 0] step:8321/10000 train_time:653002ms step_avg:78.48ms +[2025-09-02 16:45:27] [Rank 0] step:8341/10000 train_time:654685ms step_avg:78.49ms +[2025-09-02 16:45:27] [Rank 0] step:8341/10000 train_time:654685ms step_avg:78.49ms +[2025-09-02 16:45:29] [Rank 0] step:8361/10000 train_time:656369ms step_avg:78.50ms +[2025-09-02 16:45:29] [Rank 0] step:8361/10000 train_time:656369ms step_avg:78.50ms +[2025-09-02 16:45:31] [Rank 0] step:8381/10000 train_time:658047ms step_avg:78.52ms +[2025-09-02 16:45:31] [Rank 0] step:8381/10000 train_time:658047ms step_avg:78.52ms +[2025-09-02 16:45:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:45:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:45:44] [Rank 0] PRINT: step:8400/10000 val_loss:3.6493 svd_entropy: attn_qk:H=0.7764,top10E=0.25,eRank=178.2,q75/q25=57.64 attn_vo:H=0.8451,top10E=0.14,eRank=301.3,q75/q25=45.18 mlp_w1:H=0.9119,top10E=0.13,eRank=430.0,q75/q25=4.35 mlp_w2:H=0.9703,top10E=0.04,eRank=630.5,q75/q25=2.91 vo_prod:H=0.7464,top10E=0.24,eRank=149.9,q75/q25=2729.57 train_time:659811ms step_avg:78.55ms +[2025-09-02 16:45:44] [Rank 0] PRINT: step:8400/10000 val_loss:3.6493 svd_entropy: attn_qk:H=0.7764,top10E=0.25,eRank=178.2,q75/q25=57.64 attn_vo:H=0.8451,top10E=0.14,eRank=301.3,q75/q25=45.18 mlp_w1:H=0.9119,top10E=0.13,eRank=430.0,q75/q25=4.35 mlp_w2:H=0.9703,top10E=0.04,eRank=630.5,q75/q25=2.91 vo_prod:H=0.7464,top10E=0.24,eRank=149.9,q75/q25=2729.57 train_time:659811ms step_avg:78.55ms +[2025-09-02 16:45:44] [Rank 0] step:8401/10000 train_time:659826ms step_avg:78.54ms +[2025-09-02 16:45:44] [Rank 0] step:8401/10000 train_time:659826ms step_avg:78.54ms +[2025-09-02 16:45:46] [Rank 0] step:8421/10000 train_time:661443ms step_avg:78.55ms +[2025-09-02 16:45:46] [Rank 0] step:8421/10000 train_time:661443ms step_avg:78.55ms +[2025-09-02 16:45:48] [Rank 0] step:8441/10000 train_time:663121ms step_avg:78.56ms +[2025-09-02 16:45:48] [Rank 0] step:8441/10000 train_time:663121ms step_avg:78.56ms +[2025-09-02 16:45:49] [Rank 0] step:8461/10000 train_time:664798ms step_avg:78.57ms +[2025-09-02 16:45:49] [Rank 0] step:8461/10000 train_time:664798ms step_avg:78.57ms +[2025-09-02 16:45:51] [Rank 0] step:8481/10000 train_time:666481ms step_avg:78.59ms +[2025-09-02 16:45:51] [Rank 0] step:8481/10000 train_time:666481ms step_avg:78.59ms +[2025-09-02 16:45:53] [Rank 0] step:8501/10000 train_time:668184ms step_avg:78.60ms +[2025-09-02 16:45:53] [Rank 0] step:8501/10000 train_time:668184ms step_avg:78.60ms +[2025-09-02 16:45:55] [Rank 0] step:8521/10000 train_time:669868ms step_avg:78.61ms +[2025-09-02 16:45:55] [Rank 0] step:8521/10000 train_time:669868ms step_avg:78.61ms +[2025-09-02 16:45:56] [Rank 0] step:8541/10000 train_time:671561ms step_avg:78.63ms +[2025-09-02 16:45:56] [Rank 0] step:8541/10000 train_time:671561ms step_avg:78.63ms +[2025-09-02 16:45:58] [Rank 0] step:8561/10000 train_time:673246ms step_avg:78.64ms +[2025-09-02 16:45:58] [Rank 0] step:8561/10000 train_time:673246ms step_avg:78.64ms +[2025-09-02 16:46:00] [Rank 0] step:8581/10000 train_time:674930ms step_avg:78.65ms +[2025-09-02 16:46:00] [Rank 0] step:8581/10000 train_time:674930ms step_avg:78.65ms +[2025-09-02 16:46:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:46:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:46:13] [Rank 0] PRINT: step:8600/10000 val_loss:3.6403 svd_entropy: attn_qk:H=0.7767,top10E=0.25,eRank=178.5,q75/q25=57.45 attn_vo:H=0.8455,top10E=0.14,eRank=301.8,q75/q25=45.09 mlp_w1:H=0.9122,top10E=0.13,eRank=430.8,q75/q25=4.35 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.91 vo_prod:H=0.7470,top10E=0.24,eRank=150.5,q75/q25=2733.04 train_time:676690ms step_avg:78.68ms +[2025-09-02 16:46:13] [Rank 0] PRINT: step:8600/10000 val_loss:3.6403 svd_entropy: attn_qk:H=0.7767,top10E=0.25,eRank=178.5,q75/q25=57.45 attn_vo:H=0.8455,top10E=0.14,eRank=301.8,q75/q25=45.09 mlp_w1:H=0.9122,top10E=0.13,eRank=430.8,q75/q25=4.35 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.91 vo_prod:H=0.7470,top10E=0.24,eRank=150.5,q75/q25=2733.04 train_time:676690ms step_avg:78.68ms +[2025-09-02 16:46:13] [Rank 0] step:8601/10000 train_time:676704ms step_avg:78.68ms +[2025-09-02 16:46:13] [Rank 0] step:8601/10000 train_time:676704ms step_avg:78.68ms +[2025-09-02 16:46:15] [Rank 0] step:8621/10000 train_time:678323ms step_avg:78.68ms +[2025-09-02 16:46:15] [Rank 0] step:8621/10000 train_time:678323ms step_avg:78.68ms +[2025-09-02 16:46:16] [Rank 0] step:8641/10000 train_time:680003ms step_avg:78.69ms +[2025-09-02 16:46:16] [Rank 0] step:8641/10000 train_time:680003ms step_avg:78.69ms +[2025-09-02 16:46:18] [Rank 0] step:8661/10000 train_time:681685ms step_avg:78.71ms +[2025-09-02 16:46:18] [Rank 0] step:8661/10000 train_time:681685ms step_avg:78.71ms +[2025-09-02 16:46:20] [Rank 0] step:8681/10000 train_time:683362ms step_avg:78.72ms +[2025-09-02 16:46:20] [Rank 0] step:8681/10000 train_time:683362ms step_avg:78.72ms +[2025-09-02 16:46:22] [Rank 0] step:8701/10000 train_time:685036ms step_avg:78.73ms +[2025-09-02 16:46:22] [Rank 0] step:8701/10000 train_time:685036ms step_avg:78.73ms +[2025-09-02 16:46:23] [Rank 0] step:8721/10000 train_time:686719ms step_avg:78.74ms +[2025-09-02 16:46:23] [Rank 0] step:8721/10000 train_time:686719ms step_avg:78.74ms +[2025-09-02 16:46:25] [Rank 0] step:8741/10000 train_time:688390ms step_avg:78.75ms +[2025-09-02 16:46:25] [Rank 0] step:8741/10000 train_time:688390ms step_avg:78.75ms +[2025-09-02 16:46:27] [Rank 0] step:8761/10000 train_time:690079ms step_avg:78.77ms +[2025-09-02 16:46:27] [Rank 0] step:8761/10000 train_time:690079ms step_avg:78.77ms +[2025-09-02 16:46:28] [Rank 0] step:8781/10000 train_time:691763ms step_avg:78.78ms +[2025-09-02 16:46:28] [Rank 0] step:8781/10000 train_time:691763ms step_avg:78.78ms +[2025-09-02 16:46:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:46:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:46:42] [Rank 0] PRINT: step:8800/10000 val_loss:3.6310 svd_entropy: attn_qk:H=0.7770,top10E=0.25,eRank=178.8,q75/q25=57.15 attn_vo:H=0.8458,top10E=0.14,eRank=302.4,q75/q25=44.98 mlp_w1:H=0.9124,top10E=0.13,eRank=431.6,q75/q25=4.33 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.92 vo_prod:H=0.7476,top10E=0.24,eRank=151.1,q75/q25=2756.21 train_time:693540ms step_avg:78.81ms +[2025-09-02 16:46:42] [Rank 0] PRINT: step:8800/10000 val_loss:3.6310 svd_entropy: attn_qk:H=0.7770,top10E=0.25,eRank=178.8,q75/q25=57.15 attn_vo:H=0.8458,top10E=0.14,eRank=302.4,q75/q25=44.98 mlp_w1:H=0.9124,top10E=0.13,eRank=431.6,q75/q25=4.33 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.92 vo_prod:H=0.7476,top10E=0.24,eRank=151.1,q75/q25=2756.21 train_time:693540ms step_avg:78.81ms +[2025-09-02 16:46:42] [Rank 0] step:8801/10000 train_time:693554ms step_avg:78.80ms +[2025-09-02 16:46:42] [Rank 0] step:8801/10000 train_time:693554ms step_avg:78.80ms +[2025-09-02 16:46:44] [Rank 0] step:8821/10000 train_time:695145ms step_avg:78.81ms +[2025-09-02 16:46:44] [Rank 0] step:8821/10000 train_time:695145ms step_avg:78.81ms +[2025-09-02 16:46:45] [Rank 0] step:8841/10000 train_time:696844ms step_avg:78.82ms +[2025-09-02 16:46:45] [Rank 0] step:8841/10000 train_time:696844ms step_avg:78.82ms +[2025-09-02 16:46:47] [Rank 0] step:8861/10000 train_time:698520ms step_avg:78.83ms +[2025-09-02 16:46:47] [Rank 0] step:8861/10000 train_time:698520ms step_avg:78.83ms +[2025-09-02 16:46:49] [Rank 0] step:8881/10000 train_time:700201ms step_avg:78.84ms +[2025-09-02 16:46:49] [Rank 0] step:8881/10000 train_time:700201ms step_avg:78.84ms +[2025-09-02 16:46:50] [Rank 0] step:8901/10000 train_time:701881ms step_avg:78.85ms +[2025-09-02 16:46:50] [Rank 0] step:8901/10000 train_time:701881ms step_avg:78.85ms +[2025-09-02 16:46:52] [Rank 0] step:8921/10000 train_time:703576ms step_avg:78.87ms +[2025-09-02 16:46:52] [Rank 0] step:8921/10000 train_time:703576ms step_avg:78.87ms +[2025-09-02 16:46:54] [Rank 0] step:8941/10000 train_time:705266ms step_avg:78.88ms +[2025-09-02 16:46:54] [Rank 0] step:8941/10000 train_time:705266ms step_avg:78.88ms +[2025-09-02 16:46:55] [Rank 0] step:8961/10000 train_time:706946ms step_avg:78.89ms +[2025-09-02 16:46:55] [Rank 0] step:8961/10000 train_time:706946ms step_avg:78.89ms +[2025-09-02 16:46:57] [Rank 0] step:8981/10000 train_time:708630ms step_avg:78.90ms +[2025-09-02 16:46:57] [Rank 0] step:8981/10000 train_time:708630ms step_avg:78.90ms +[2025-09-02 16:46:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:46:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:47:11] [Rank 0] PRINT: step:9000/10000 val_loss:3.6223 svd_entropy: attn_qk:H=0.7772,top10E=0.25,eRank=179.0,q75/q25=57.25 attn_vo:H=0.8461,top10E=0.14,eRank=302.9,q75/q25=44.75 mlp_w1:H=0.9127,top10E=0.13,eRank=432.3,q75/q25=4.33 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.92 vo_prod:H=0.7482,top10E=0.24,eRank=151.6,q75/q25=2771.03 train_time:710397ms step_avg:78.93ms +[2025-09-02 16:47:11] [Rank 0] PRINT: step:9000/10000 val_loss:3.6223 svd_entropy: attn_qk:H=0.7772,top10E=0.25,eRank=179.0,q75/q25=57.25 attn_vo:H=0.8461,top10E=0.14,eRank=302.9,q75/q25=44.75 mlp_w1:H=0.9127,top10E=0.13,eRank=432.3,q75/q25=4.33 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.92 vo_prod:H=0.7482,top10E=0.24,eRank=151.6,q75/q25=2771.03 train_time:710397ms step_avg:78.93ms +[2025-09-02 16:47:11] [Rank 0] step:9001/10000 train_time:710412ms step_avg:78.93ms +[2025-09-02 16:47:11] [Rank 0] step:9001/10000 train_time:710412ms step_avg:78.93ms +[2025-09-02 16:47:12] [Rank 0] step:9021/10000 train_time:712018ms step_avg:78.93ms +[2025-09-02 16:47:12] [Rank 0] step:9021/10000 train_time:712018ms step_avg:78.93ms +[2025-09-02 16:47:14] [Rank 0] step:9041/10000 train_time:713698ms step_avg:78.94ms +[2025-09-02 16:47:14] [Rank 0] step:9041/10000 train_time:713698ms step_avg:78.94ms +[2025-09-02 16:47:16] [Rank 0] step:9061/10000 train_time:715390ms step_avg:78.95ms +[2025-09-02 16:47:16] [Rank 0] step:9061/10000 train_time:715390ms step_avg:78.95ms +[2025-09-02 16:47:17] [Rank 0] step:9081/10000 train_time:717082ms step_avg:78.97ms +[2025-09-02 16:47:17] [Rank 0] step:9081/10000 train_time:717082ms step_avg:78.97ms +[2025-09-02 16:47:19] [Rank 0] step:9101/10000 train_time:718787ms step_avg:78.98ms +[2025-09-02 16:47:19] [Rank 0] step:9101/10000 train_time:718787ms step_avg:78.98ms +[2025-09-02 16:47:21] [Rank 0] step:9121/10000 train_time:720475ms step_avg:78.99ms +[2025-09-02 16:47:21] [Rank 0] step:9121/10000 train_time:720475ms step_avg:78.99ms +[2025-09-02 16:47:22] [Rank 0] step:9141/10000 train_time:722151ms step_avg:79.00ms +[2025-09-02 16:47:22] [Rank 0] step:9141/10000 train_time:722151ms step_avg:79.00ms +[2025-09-02 16:47:24] [Rank 0] step:9161/10000 train_time:723832ms step_avg:79.01ms +[2025-09-02 16:47:24] [Rank 0] step:9161/10000 train_time:723832ms step_avg:79.01ms +[2025-09-02 16:47:26] [Rank 0] step:9181/10000 train_time:725552ms step_avg:79.03ms +[2025-09-02 16:47:26] [Rank 0] step:9181/10000 train_time:725552ms step_avg:79.03ms +[2025-09-02 16:47:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:47:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:47:39] [Rank 0] PRINT: step:9200/10000 val_loss:3.6145 svd_entropy: attn_qk:H=0.7774,top10E=0.25,eRank=179.2,q75/q25=57.05 attn_vo:H=0.8463,top10E=0.14,eRank=303.3,q75/q25=44.67 mlp_w1:H=0.9129,top10E=0.13,eRank=432.8,q75/q25=4.32 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.92 vo_prod:H=0.7486,top10E=0.24,eRank=152.1,q75/q25=2730.50 train_time:727321ms step_avg:79.06ms +[2025-09-02 16:47:39] [Rank 0] PRINT: step:9200/10000 val_loss:3.6145 svd_entropy: attn_qk:H=0.7774,top10E=0.25,eRank=179.2,q75/q25=57.05 attn_vo:H=0.8463,top10E=0.14,eRank=303.3,q75/q25=44.67 mlp_w1:H=0.9129,top10E=0.13,eRank=432.8,q75/q25=4.32 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.92 vo_prod:H=0.7486,top10E=0.24,eRank=152.1,q75/q25=2730.50 train_time:727321ms step_avg:79.06ms +[2025-09-02 16:47:39] [Rank 0] step:9201/10000 train_time:727336ms step_avg:79.05ms +[2025-09-02 16:47:39] [Rank 0] step:9201/10000 train_time:727336ms step_avg:79.05ms +[2025-09-02 16:47:41] [Rank 0] step:9221/10000 train_time:728953ms step_avg:79.05ms +[2025-09-02 16:47:41] [Rank 0] step:9221/10000 train_time:728953ms step_avg:79.05ms +[2025-09-02 16:47:43] [Rank 0] step:9241/10000 train_time:730646ms step_avg:79.07ms +[2025-09-02 16:47:43] [Rank 0] step:9241/10000 train_time:730646ms step_avg:79.07ms +[2025-09-02 16:47:45] [Rank 0] step:9261/10000 train_time:732347ms step_avg:79.08ms +[2025-09-02 16:47:45] [Rank 0] step:9261/10000 train_time:732347ms step_avg:79.08ms +[2025-09-02 16:47:46] [Rank 0] step:9281/10000 train_time:734026ms step_avg:79.09ms +[2025-09-02 16:47:46] [Rank 0] step:9281/10000 train_time:734026ms step_avg:79.09ms +[2025-09-02 16:47:48] [Rank 0] step:9301/10000 train_time:735782ms step_avg:79.11ms +[2025-09-02 16:47:48] [Rank 0] step:9301/10000 train_time:735782ms step_avg:79.11ms +[2025-09-02 16:47:50] [Rank 0] step:9321/10000 train_time:737469ms step_avg:79.12ms +[2025-09-02 16:47:50] [Rank 0] step:9321/10000 train_time:737469ms step_avg:79.12ms +[2025-09-02 16:47:51] [Rank 0] step:9341/10000 train_time:739156ms step_avg:79.13ms +[2025-09-02 16:47:51] [Rank 0] step:9341/10000 train_time:739156ms step_avg:79.13ms +[2025-09-02 16:47:53] [Rank 0] step:9361/10000 train_time:740855ms step_avg:79.14ms +[2025-09-02 16:47:53] [Rank 0] step:9361/10000 train_time:740855ms step_avg:79.14ms +[2025-09-02 16:47:55] [Rank 0] step:9381/10000 train_time:742556ms step_avg:79.16ms +[2025-09-02 16:47:55] [Rank 0] step:9381/10000 train_time:742556ms step_avg:79.16ms +[2025-09-02 16:47:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:47:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:48:08] [Rank 0] PRINT: step:9400/10000 val_loss:3.6067 svd_entropy: attn_qk:H=0.7775,top10E=0.25,eRank=179.4,q75/q25=56.87 attn_vo:H=0.8466,top10E=0.14,eRank=303.7,q75/q25=44.53 mlp_w1:H=0.9130,top10E=0.13,eRank=433.3,q75/q25=4.31 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.92 vo_prod:H=0.7490,top10E=0.24,eRank=152.5,q75/q25=2734.57 train_time:744335ms step_avg:79.18ms +[2025-09-02 16:48:08] [Rank 0] PRINT: step:9400/10000 val_loss:3.6067 svd_entropy: attn_qk:H=0.7775,top10E=0.25,eRank=179.4,q75/q25=56.87 attn_vo:H=0.8466,top10E=0.14,eRank=303.7,q75/q25=44.53 mlp_w1:H=0.9130,top10E=0.13,eRank=433.3,q75/q25=4.31 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.92 vo_prod:H=0.7490,top10E=0.24,eRank=152.5,q75/q25=2734.57 train_time:744335ms step_avg:79.18ms +[2025-09-02 16:48:08] [Rank 0] step:9401/10000 train_time:744350ms step_avg:79.18ms +[2025-09-02 16:48:08] [Rank 0] step:9401/10000 train_time:744350ms step_avg:79.18ms +[2025-09-02 16:48:10] [Rank 0] step:9421/10000 train_time:745963ms step_avg:79.18ms +[2025-09-02 16:48:10] [Rank 0] step:9421/10000 train_time:745963ms step_avg:79.18ms +[2025-09-02 16:48:12] [Rank 0] step:9441/10000 train_time:747646ms step_avg:79.19ms +[2025-09-02 16:48:12] [Rank 0] step:9441/10000 train_time:747646ms step_avg:79.19ms +[2025-09-02 16:48:13] [Rank 0] step:9461/10000 train_time:749336ms step_avg:79.20ms +[2025-09-02 16:48:13] [Rank 0] step:9461/10000 train_time:749336ms step_avg:79.20ms +[2025-09-02 16:48:15] [Rank 0] step:9481/10000 train_time:751017ms step_avg:79.21ms +[2025-09-02 16:48:15] [Rank 0] step:9481/10000 train_time:751017ms step_avg:79.21ms +[2025-09-02 16:48:17] [Rank 0] step:9501/10000 train_time:752719ms step_avg:79.23ms +[2025-09-02 16:48:17] [Rank 0] step:9501/10000 train_time:752719ms step_avg:79.23ms +[2025-09-02 16:48:18] [Rank 0] step:9521/10000 train_time:754394ms step_avg:79.23ms +[2025-09-02 16:48:18] [Rank 0] step:9521/10000 train_time:754394ms step_avg:79.23ms +[2025-09-02 16:48:20] [Rank 0] step:9541/10000 train_time:756078ms step_avg:79.25ms +[2025-09-02 16:48:20] [Rank 0] step:9541/10000 train_time:756078ms step_avg:79.25ms +[2025-09-02 16:48:22] [Rank 0] step:9561/10000 train_time:757754ms step_avg:79.25ms +[2025-09-02 16:48:22] [Rank 0] step:9561/10000 train_time:757754ms step_avg:79.25ms +[2025-09-02 16:48:23] [Rank 0] step:9581/10000 train_time:759435ms step_avg:79.26ms +[2025-09-02 16:48:23] [Rank 0] step:9581/10000 train_time:759435ms step_avg:79.26ms +[2025-09-02 16:48:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:48:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:48:37] [Rank 0] PRINT: step:9600/10000 val_loss:3.6007 svd_entropy: attn_qk:H=0.7777,top10E=0.25,eRank=179.5,q75/q25=56.75 attn_vo:H=0.8468,top10E=0.14,eRank=304.0,q75/q25=44.41 mlp_w1:H=0.9132,top10E=0.13,eRank=433.7,q75/q25=4.30 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.92 vo_prod:H=0.7494,top10E=0.24,eRank=152.9,q75/q25=2726.83 train_time:761216ms step_avg:79.29ms +[2025-09-02 16:48:37] [Rank 0] PRINT: step:9600/10000 val_loss:3.6007 svd_entropy: attn_qk:H=0.7777,top10E=0.25,eRank=179.5,q75/q25=56.75 attn_vo:H=0.8468,top10E=0.14,eRank=304.0,q75/q25=44.41 mlp_w1:H=0.9132,top10E=0.13,eRank=433.7,q75/q25=4.30 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.92 vo_prod:H=0.7494,top10E=0.24,eRank=152.9,q75/q25=2726.83 train_time:761216ms step_avg:79.29ms +[2025-09-02 16:48:37] [Rank 0] step:9601/10000 train_time:761231ms step_avg:79.29ms +[2025-09-02 16:48:37] [Rank 0] step:9601/10000 train_time:761231ms step_avg:79.29ms +[2025-09-02 16:48:39] [Rank 0] step:9621/10000 train_time:762837ms step_avg:79.29ms +[2025-09-02 16:48:39] [Rank 0] step:9621/10000 train_time:762837ms step_avg:79.29ms +[2025-09-02 16:48:40] [Rank 0] step:9641/10000 train_time:764523ms step_avg:79.30ms +[2025-09-02 16:48:40] [Rank 0] step:9641/10000 train_time:764523ms step_avg:79.30ms +[2025-09-02 16:48:42] [Rank 0] step:9661/10000 train_time:766237ms step_avg:79.31ms +[2025-09-02 16:48:42] [Rank 0] step:9661/10000 train_time:766237ms step_avg:79.31ms +[2025-09-02 16:48:44] [Rank 0] step:9681/10000 train_time:767940ms step_avg:79.32ms +[2025-09-02 16:48:44] [Rank 0] step:9681/10000 train_time:767940ms step_avg:79.32ms +[2025-09-02 16:48:46] [Rank 0] step:9701/10000 train_time:769658ms step_avg:79.34ms +[2025-09-02 16:48:46] [Rank 0] step:9701/10000 train_time:769658ms step_avg:79.34ms +[2025-09-02 16:48:47] [Rank 0] step:9721/10000 train_time:771357ms step_avg:79.35ms +[2025-09-02 16:48:47] [Rank 0] step:9721/10000 train_time:771357ms step_avg:79.35ms +[2025-09-02 16:48:49] [Rank 0] step:9741/10000 train_time:773162ms step_avg:79.37ms +[2025-09-02 16:48:49] [Rank 0] step:9741/10000 train_time:773162ms step_avg:79.37ms +[2025-09-02 16:48:51] [Rank 0] step:9761/10000 train_time:774922ms step_avg:79.39ms +[2025-09-02 16:48:51] [Rank 0] step:9761/10000 train_time:774922ms step_avg:79.39ms +[2025-09-02 16:48:53] [Rank 0] step:9781/10000 train_time:776639ms step_avg:79.40ms +[2025-09-02 16:48:53] [Rank 0] step:9781/10000 train_time:776639ms step_avg:79.40ms +[2025-09-02 16:48:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:48:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:49:06] [Rank 0] PRINT: step:9800/10000 val_loss:3.5939 svd_entropy: attn_qk:H=0.7777,top10E=0.25,eRank=179.6,q75/q25=56.61 attn_vo:H=0.8469,top10E=0.13,eRank=304.2,q75/q25=44.28 mlp_w1:H=0.9133,top10E=0.13,eRank=434.0,q75/q25=4.29 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.92 vo_prod:H=0.7497,top10E=0.24,eRank=153.2,q75/q25=2733.85 train_time:778454ms step_avg:79.43ms +[2025-09-02 16:49:06] [Rank 0] PRINT: step:9800/10000 val_loss:3.5939 svd_entropy: attn_qk:H=0.7777,top10E=0.25,eRank=179.6,q75/q25=56.61 attn_vo:H=0.8469,top10E=0.13,eRank=304.2,q75/q25=44.28 mlp_w1:H=0.9133,top10E=0.13,eRank=434.0,q75/q25=4.29 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.92 vo_prod:H=0.7497,top10E=0.24,eRank=153.2,q75/q25=2733.85 train_time:778454ms step_avg:79.43ms +[2025-09-02 16:49:06] [Rank 0] step:9801/10000 train_time:778469ms step_avg:79.43ms +[2025-09-02 16:49:06] [Rank 0] step:9801/10000 train_time:778469ms step_avg:79.43ms +[2025-09-02 16:49:08] [Rank 0] step:9821/10000 train_time:780087ms step_avg:79.43ms +[2025-09-02 16:49:08] [Rank 0] step:9821/10000 train_time:780087ms step_avg:79.43ms +[2025-09-02 16:49:10] [Rank 0] step:9841/10000 train_time:781805ms step_avg:79.44ms +[2025-09-02 16:49:10] [Rank 0] step:9841/10000 train_time:781805ms step_avg:79.44ms +[2025-09-02 16:49:11] [Rank 0] step:9861/10000 train_time:783502ms step_avg:79.45ms +[2025-09-02 16:49:11] [Rank 0] step:9861/10000 train_time:783502ms step_avg:79.45ms +[2025-09-02 16:49:13] [Rank 0] step:9881/10000 train_time:785199ms step_avg:79.47ms +[2025-09-02 16:49:13] [Rank 0] step:9881/10000 train_time:785199ms step_avg:79.47ms +[2025-09-02 16:49:15] [Rank 0] step:9901/10000 train_time:786910ms step_avg:79.48ms +[2025-09-02 16:49:15] [Rank 0] step:9901/10000 train_time:786910ms step_avg:79.48ms +[2025-09-02 16:49:16] [Rank 0] step:9921/10000 train_time:788617ms step_avg:79.49ms +[2025-09-02 16:49:16] [Rank 0] step:9921/10000 train_time:788617ms step_avg:79.49ms +[2025-09-02 16:49:18] [Rank 0] step:9941/10000 train_time:790328ms step_avg:79.50ms +[2025-09-02 16:49:18] [Rank 0] step:9941/10000 train_time:790328ms step_avg:79.50ms +[2025-09-02 16:49:20] [Rank 0] step:9961/10000 train_time:792035ms step_avg:79.51ms +[2025-09-02 16:49:20] [Rank 0] step:9961/10000 train_time:792035ms step_avg:79.51ms +[2025-09-02 16:49:22] [Rank 0] step:9981/10000 train_time:793745ms step_avg:79.53ms +[2025-09-02 16:49:22] [Rank 0] step:9981/10000 train_time:793745ms step_avg:79.53ms +[2025-09-02 16:49:23] [Rank 0] step:10000/10000 train_time:795374ms step_avg:79.54ms +[2025-09-02 16:49:23] [Rank 0] step:10000/10000 train_time:795374ms step_avg:79.54ms +[2025-09-02 16:49:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:49:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:49:35] [Rank 0] PRINT: step:10000/10000 val_loss:3.5884 svd_entropy: attn_qk:H=0.7778,top10E=0.25,eRank=179.6,q75/q25=56.59 attn_vo:H=0.8470,top10E=0.13,eRank=304.4,q75/q25=44.21 mlp_w1:H=0.9134,top10E=0.13,eRank=434.3,q75/q25=4.29 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.92 vo_prod:H=0.7499,top10E=0.23,eRank=153.4,q75/q25=2735.82 train_time:795555ms step_avg:79.56ms +[2025-09-02 16:49:35] [Rank 0] PRINT: step:10000/10000 val_loss:3.5884 svd_entropy: attn_qk:H=0.7778,top10E=0.25,eRank=179.6,q75/q25=56.59 attn_vo:H=0.8470,top10E=0.13,eRank=304.4,q75/q25=44.21 mlp_w1:H=0.9134,top10E=0.13,eRank=434.3,q75/q25=4.29 mlp_w2:H=0.9703,top10E=0.04,eRank=630.4,q75/q25=2.92 vo_prod:H=0.7499,top10E=0.23,eRank=153.4,q75/q25=2735.82 train_time:795555ms step_avg:79.56ms +[2025-09-02 16:49:35] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 16:49:35 2025 --- +[2025-09-02 16:49:35] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 16:49:35 2025 --- +[2025-09-02 16:49:35] [Rank 0] PRINT: Peak memory allocated: 10086 MiB reserved: 15096 MiB +[2025-09-02 16:49:35] [Rank 0] PRINT: Peak memory allocated: 10086 MiB reserved: 15096 MiB diff --git a/logs_svd_qkvo/mode_13_param_qkvo_seed_49/config.json b/logs_svd_qkvo/mode_13_param_qkvo_seed_49/config.json new file mode 100644 index 0000000000000000000000000000000000000000..e3195f014665e6211e30691f31d3217ca3c4f97b --- /dev/null +++ b/logs_svd_qkvo/mode_13_param_qkvo_seed_49/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 49, + "optimizer_mode": 13, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "ffcb5fcc-c278-4ebe-a745-59552de364a3", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_13_param_qkvo_seed_49/training_log_ffcb5fcc-c278-4ebe-a745-59552de364a3.txt b/logs_svd_qkvo/mode_13_param_qkvo_seed_49/training_log_ffcb5fcc-c278-4ebe-a745-59552de364a3.txt new file mode 100644 index 0000000000000000000000000000000000000000..57dff7463951a0440c08e2688087a597cd155c67 --- /dev/null +++ b/logs_svd_qkvo/mode_13_param_qkvo_seed_49/training_log_ffcb5fcc-c278-4ebe-a745-59552de364a3.txt @@ -0,0 +1,2984 @@ +[2025-09-02 17:37:54] [Rank 0] PRINT: --- Script Start: Tue Sep 2 17:37:54 2025 --- +[2025-09-02 17:37:54] [Rank 0] PRINT: --- Script Start: Tue Sep 2 17:37:54 2025 --- +[2025-09-02 17:37:54] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=13, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 17:37:54] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=13, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 17:37:54] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 17:37:54] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 17:37:54] [Rank 0] PRINT: Using fixed seed: 49 +[2025-09-02 17:37:54] [Rank 0] PRINT: Using fixed seed: 49 +[2025-09-02 17:37:54] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_13_param_qkvo_seed_49 +[2025-09-02 17:37:54] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_13_param_qkvo_seed_49 +[2025-09-02 17:37:54] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 17:37:54] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 17:37:54] [Rank 0] PRINT: Constructing model... +[2025-09-02 17:37:54] [Rank 0] PRINT: Constructing model... +[2025-09-02 17:37:56] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 17:37:56] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 17:37:56] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 17:37:56] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 17:37:56] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 17:37:56] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 17:37:56] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 13 +[2025-09-02 17:37:56] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 13 +[2025-09-02 17:37:56] [Rank 0] PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: 0.008). +[2025-09-02 17:37:56] [Rank 0] PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: 0.008). +[2025-09-02 17:37:56] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 17:37:56] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 17:37:56] [Rank 0] PRINT: Muon optimizer is active with 23 parameters. +[2025-09-02 17:37:56] [Rank 0] PRINT: Muon optimizer is active with 23 parameters. +[2025-09-02 17:37:56] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 17:37:56] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 17:37:56] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 17:37:56] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 17:37:56] [Rank 0] PRINT: Starting warmup... +[2025-09-02 17:37:56] [Rank 0] PRINT: Starting warmup... +[2025-09-02 17:38:38] [Rank 0] PRINT: Warmup complete. +[2025-09-02 17:38:38] [Rank 0] PRINT: Warmup complete. +[2025-09-02 17:38:38] [Rank 0] PRINT: Starting training... +[2025-09-02 17:38:38] [Rank 0] PRINT: Starting training... +[2025-09-02 17:38:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:38:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:38:55] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.7,q75/q25=10.30 attn_vo:H=0.4624,top10E=0.02,eRank=232.9,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 17:38:55] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.7,q75/q25=10.30 attn_vo:H=0.4624,top10E=0.02,eRank=232.9,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 17:38:56] [Rank 0] step:21/10000 train_time:1412ms step_avg:67.26ms +[2025-09-02 17:38:56] [Rank 0] step:21/10000 train_time:1412ms step_avg:67.26ms +[2025-09-02 17:38:58] [Rank 0] step:41/10000 train_time:2861ms step_avg:69.78ms +[2025-09-02 17:38:58] [Rank 0] step:41/10000 train_time:2861ms step_avg:69.78ms +[2025-09-02 17:38:59] [Rank 0] step:61/10000 train_time:4310ms step_avg:70.66ms +[2025-09-02 17:38:59] [Rank 0] step:61/10000 train_time:4310ms step_avg:70.66ms +[2025-09-02 17:39:00] [Rank 0] step:81/10000 train_time:5760ms step_avg:71.11ms +[2025-09-02 17:39:00] [Rank 0] step:81/10000 train_time:5760ms step_avg:71.11ms +[2025-09-02 17:39:02] [Rank 0] step:101/10000 train_time:7210ms step_avg:71.39ms +[2025-09-02 17:39:02] [Rank 0] step:101/10000 train_time:7210ms step_avg:71.39ms +[2025-09-02 17:39:03] [Rank 0] step:121/10000 train_time:8661ms step_avg:71.58ms +[2025-09-02 17:39:03] [Rank 0] step:121/10000 train_time:8661ms step_avg:71.58ms +[2025-09-02 17:39:05] [Rank 0] step:141/10000 train_time:10113ms step_avg:71.72ms +[2025-09-02 17:39:05] [Rank 0] step:141/10000 train_time:10113ms step_avg:71.72ms +[2025-09-02 17:39:06] [Rank 0] step:161/10000 train_time:11565ms step_avg:71.83ms +[2025-09-02 17:39:06] [Rank 0] step:161/10000 train_time:11565ms step_avg:71.83ms +[2025-09-02 17:39:08] [Rank 0] step:181/10000 train_time:13016ms step_avg:71.91ms +[2025-09-02 17:39:08] [Rank 0] step:181/10000 train_time:13016ms step_avg:71.91ms +[2025-09-02 17:39:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:39:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:39:21] [Rank 0] PRINT: step:200/10000 val_loss:6.2371 svd_entropy: attn_qk:H=0.6072,top10E=0.55,eRank=96.8,q75/q25=13.14 attn_vo:H=0.5179,top10E=0.57,eRank=77.2,q75/q25=inf mlp_w1:H=0.6610,top10E=0.51,eRank=95.1,q75/q25=2.96 mlp_w2:H=0.7989,top10E=0.18,eRank=206.1,q75/q25=16.64 vo_prod:H=0.3265,top10E=0.81,eRank=14.6,q75/q25=inf train_time:14541ms step_avg:72.71ms +[2025-09-02 17:39:21] [Rank 0] PRINT: step:200/10000 val_loss:6.2371 svd_entropy: attn_qk:H=0.6072,top10E=0.55,eRank=96.8,q75/q25=13.14 attn_vo:H=0.5179,top10E=0.57,eRank=77.2,q75/q25=inf mlp_w1:H=0.6610,top10E=0.51,eRank=95.1,q75/q25=2.96 mlp_w2:H=0.7989,top10E=0.18,eRank=206.1,q75/q25=16.64 vo_prod:H=0.3265,top10E=0.81,eRank=14.6,q75/q25=inf train_time:14541ms step_avg:72.71ms +[2025-09-02 17:39:21] [Rank 0] step:201/10000 train_time:14556ms step_avg:72.42ms +[2025-09-02 17:39:21] [Rank 0] step:201/10000 train_time:14556ms step_avg:72.42ms +[2025-09-02 17:39:23] [Rank 0] step:221/10000 train_time:16063ms step_avg:72.69ms +[2025-09-02 17:39:23] [Rank 0] step:221/10000 train_time:16063ms step_avg:72.69ms +[2025-09-02 17:39:24] [Rank 0] step:241/10000 train_time:17511ms step_avg:72.66ms +[2025-09-02 17:39:24] [Rank 0] step:241/10000 train_time:17511ms step_avg:72.66ms +[2025-09-02 17:39:26] [Rank 0] step:261/10000 train_time:19003ms step_avg:72.81ms +[2025-09-02 17:39:26] [Rank 0] step:261/10000 train_time:19003ms step_avg:72.81ms +[2025-09-02 17:39:27] [Rank 0] step:281/10000 train_time:20451ms step_avg:72.78ms +[2025-09-02 17:39:27] [Rank 0] step:281/10000 train_time:20451ms step_avg:72.78ms +[2025-09-02 17:39:29] [Rank 0] step:301/10000 train_time:21899ms step_avg:72.76ms +[2025-09-02 17:39:29] [Rank 0] step:301/10000 train_time:21899ms step_avg:72.76ms +[2025-09-02 17:39:30] [Rank 0] step:321/10000 train_time:23348ms step_avg:72.74ms +[2025-09-02 17:39:30] [Rank 0] step:321/10000 train_time:23348ms step_avg:72.74ms +[2025-09-02 17:39:31] [Rank 0] step:341/10000 train_time:24799ms step_avg:72.72ms +[2025-09-02 17:39:31] [Rank 0] step:341/10000 train_time:24799ms step_avg:72.72ms +[2025-09-02 17:39:33] [Rank 0] step:361/10000 train_time:26249ms step_avg:72.71ms +[2025-09-02 17:39:33] [Rank 0] step:361/10000 train_time:26249ms step_avg:72.71ms +[2025-09-02 17:39:34] [Rank 0] step:381/10000 train_time:27699ms step_avg:72.70ms +[2025-09-02 17:39:34] [Rank 0] step:381/10000 train_time:27699ms step_avg:72.70ms +[2025-09-02 17:39:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:39:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:39:48] [Rank 0] PRINT: step:400/10000 val_loss:5.7258 svd_entropy: attn_qk:H=0.6140,top10E=0.50,eRank=72.1,q75/q25=16.11 attn_vo:H=0.6178,top10E=0.45,eRank=92.7,q75/q25=24.80 mlp_w1:H=0.6870,top10E=0.41,eRank=115.4,q75/q25=4.51 mlp_w2:H=0.9274,top10E=0.07,eRank=475.3,q75/q25=6.56 vo_prod:H=0.4686,top10E=0.72,eRank=26.7,q75/q25=216.40 train_time:29222ms step_avg:73.06ms +[2025-09-02 17:39:48] [Rank 0] PRINT: step:400/10000 val_loss:5.7258 svd_entropy: attn_qk:H=0.6140,top10E=0.50,eRank=72.1,q75/q25=16.11 attn_vo:H=0.6178,top10E=0.45,eRank=92.7,q75/q25=24.80 mlp_w1:H=0.6870,top10E=0.41,eRank=115.4,q75/q25=4.51 mlp_w2:H=0.9274,top10E=0.07,eRank=475.3,q75/q25=6.56 vo_prod:H=0.4686,top10E=0.72,eRank=26.7,q75/q25=216.40 train_time:29222ms step_avg:73.06ms +[2025-09-02 17:39:48] [Rank 0] step:401/10000 train_time:29239ms step_avg:72.91ms +[2025-09-02 17:39:48] [Rank 0] step:401/10000 train_time:29239ms step_avg:72.91ms +[2025-09-02 17:39:49] [Rank 0] step:421/10000 train_time:30618ms step_avg:72.73ms +[2025-09-02 17:39:49] [Rank 0] step:421/10000 train_time:30618ms step_avg:72.73ms +[2025-09-02 17:39:51] [Rank 0] step:441/10000 train_time:32066ms step_avg:72.71ms +[2025-09-02 17:39:51] [Rank 0] step:441/10000 train_time:32066ms step_avg:72.71ms +[2025-09-02 17:39:52] [Rank 0] step:461/10000 train_time:33511ms step_avg:72.69ms +[2025-09-02 17:39:52] [Rank 0] step:461/10000 train_time:33511ms step_avg:72.69ms +[2025-09-02 17:39:53] [Rank 0] step:481/10000 train_time:34959ms step_avg:72.68ms +[2025-09-02 17:39:53] [Rank 0] step:481/10000 train_time:34959ms step_avg:72.68ms +[2025-09-02 17:39:55] [Rank 0] step:501/10000 train_time:36408ms step_avg:72.67ms +[2025-09-02 17:39:55] [Rank 0] step:501/10000 train_time:36408ms step_avg:72.67ms +[2025-09-02 17:39:56] [Rank 0] step:521/10000 train_time:37858ms step_avg:72.66ms +[2025-09-02 17:39:56] [Rank 0] step:521/10000 train_time:37858ms step_avg:72.66ms +[2025-09-02 17:39:58] [Rank 0] step:541/10000 train_time:39307ms step_avg:72.66ms +[2025-09-02 17:39:58] [Rank 0] step:541/10000 train_time:39307ms step_avg:72.66ms +[2025-09-02 17:39:59] [Rank 0] step:561/10000 train_time:40753ms step_avg:72.64ms +[2025-09-02 17:39:59] [Rank 0] step:561/10000 train_time:40753ms step_avg:72.64ms +[2025-09-02 17:40:01] [Rank 0] step:581/10000 train_time:42202ms step_avg:72.64ms +[2025-09-02 17:40:01] [Rank 0] step:581/10000 train_time:42202ms step_avg:72.64ms +[2025-09-02 17:40:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:40:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:40:14] [Rank 0] PRINT: step:600/10000 val_loss:5.4316 svd_entropy: attn_qk:H=0.6443,top10E=0.44,eRank=84.4,q75/q25=21.28 attn_vo:H=0.6663,top10E=0.37,eRank=117.3,q75/q25=34.21 mlp_w1:H=0.7288,top10E=0.35,eRank=145.2,q75/q25=6.33 mlp_w2:H=0.9486,top10E=0.05,eRank=546.6,q75/q25=4.49 vo_prod:H=0.5289,top10E=0.60,eRank=37.1,q75/q25=458.09 train_time:43725ms step_avg:72.88ms +[2025-09-02 17:40:14] [Rank 0] PRINT: step:600/10000 val_loss:5.4316 svd_entropy: attn_qk:H=0.6443,top10E=0.44,eRank=84.4,q75/q25=21.28 attn_vo:H=0.6663,top10E=0.37,eRank=117.3,q75/q25=34.21 mlp_w1:H=0.7288,top10E=0.35,eRank=145.2,q75/q25=6.33 mlp_w2:H=0.9486,top10E=0.05,eRank=546.6,q75/q25=4.49 vo_prod:H=0.5289,top10E=0.60,eRank=37.1,q75/q25=458.09 train_time:43725ms step_avg:72.88ms +[2025-09-02 17:40:14] [Rank 0] step:601/10000 train_time:43741ms step_avg:72.78ms +[2025-09-02 17:40:14] [Rank 0] step:601/10000 train_time:43741ms step_avg:72.78ms +[2025-09-02 17:40:15] [Rank 0] step:621/10000 train_time:45128ms step_avg:72.67ms +[2025-09-02 17:40:15] [Rank 0] step:621/10000 train_time:45128ms step_avg:72.67ms +[2025-09-02 17:40:17] [Rank 0] step:641/10000 train_time:46573ms step_avg:72.66ms +[2025-09-02 17:40:17] [Rank 0] step:641/10000 train_time:46573ms step_avg:72.66ms +[2025-09-02 17:40:18] [Rank 0] step:661/10000 train_time:48021ms step_avg:72.65ms +[2025-09-02 17:40:18] [Rank 0] step:661/10000 train_time:48021ms step_avg:72.65ms +[2025-09-02 17:40:20] [Rank 0] step:681/10000 train_time:49469ms step_avg:72.64ms +[2025-09-02 17:40:20] [Rank 0] step:681/10000 train_time:49469ms step_avg:72.64ms +[2025-09-02 17:40:21] [Rank 0] step:701/10000 train_time:50918ms step_avg:72.64ms +[2025-09-02 17:40:21] [Rank 0] step:701/10000 train_time:50918ms step_avg:72.64ms +[2025-09-02 17:40:23] [Rank 0] step:721/10000 train_time:52367ms step_avg:72.63ms +[2025-09-02 17:40:23] [Rank 0] step:721/10000 train_time:52367ms step_avg:72.63ms +[2025-09-02 17:40:24] [Rank 0] step:741/10000 train_time:53813ms step_avg:72.62ms +[2025-09-02 17:40:24] [Rank 0] step:741/10000 train_time:53813ms step_avg:72.62ms +[2025-09-02 17:40:26] [Rank 0] step:761/10000 train_time:55336ms step_avg:72.72ms +[2025-09-02 17:40:26] [Rank 0] step:761/10000 train_time:55336ms step_avg:72.72ms +[2025-09-02 17:40:27] [Rank 0] step:781/10000 train_time:56875ms step_avg:72.82ms +[2025-09-02 17:40:27] [Rank 0] step:781/10000 train_time:56875ms step_avg:72.82ms +[2025-09-02 17:40:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:40:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:40:40] [Rank 0] PRINT: step:800/10000 val_loss:5.2009 svd_entropy: attn_qk:H=0.6659,top10E=0.40,eRank=94.4,q75/q25=28.54 attn_vo:H=0.6985,top10E=0.32,eRank=137.6,q75/q25=43.10 mlp_w1:H=0.7599,top10E=0.31,eRank=172.2,q75/q25=7.29 mlp_w2:H=0.9557,top10E=0.05,eRank=572.6,q75/q25=3.93 vo_prod:H=0.5677,top10E=0.52,eRank=46.4,q75/q25=961.84 train_time:58458ms step_avg:73.07ms +[2025-09-02 17:40:40] [Rank 0] PRINT: step:800/10000 val_loss:5.2009 svd_entropy: attn_qk:H=0.6659,top10E=0.40,eRank=94.4,q75/q25=28.54 attn_vo:H=0.6985,top10E=0.32,eRank=137.6,q75/q25=43.10 mlp_w1:H=0.7599,top10E=0.31,eRank=172.2,q75/q25=7.29 mlp_w2:H=0.9557,top10E=0.05,eRank=572.6,q75/q25=3.93 vo_prod:H=0.5677,top10E=0.52,eRank=46.4,q75/q25=961.84 train_time:58458ms step_avg:73.07ms +[2025-09-02 17:40:40] [Rank 0] step:801/10000 train_time:58474ms step_avg:73.00ms +[2025-09-02 17:40:40] [Rank 0] step:801/10000 train_time:58474ms step_avg:73.00ms +[2025-09-02 17:40:42] [Rank 0] step:821/10000 train_time:59878ms step_avg:72.93ms +[2025-09-02 17:40:42] [Rank 0] step:821/10000 train_time:59878ms step_avg:72.93ms +[2025-09-02 17:40:43] [Rank 0] step:841/10000 train_time:61338ms step_avg:72.93ms +[2025-09-02 17:40:43] [Rank 0] step:841/10000 train_time:61338ms step_avg:72.93ms +[2025-09-02 17:40:45] [Rank 0] step:861/10000 train_time:62799ms step_avg:72.94ms +[2025-09-02 17:40:45] [Rank 0] step:861/10000 train_time:62799ms step_avg:72.94ms +[2025-09-02 17:40:46] [Rank 0] step:881/10000 train_time:64263ms step_avg:72.94ms +[2025-09-02 17:40:46] [Rank 0] step:881/10000 train_time:64263ms step_avg:72.94ms +[2025-09-02 17:40:48] [Rank 0] step:901/10000 train_time:65727ms step_avg:72.95ms +[2025-09-02 17:40:48] [Rank 0] step:901/10000 train_time:65727ms step_avg:72.95ms +[2025-09-02 17:40:49] [Rank 0] step:921/10000 train_time:67191ms step_avg:72.95ms +[2025-09-02 17:40:49] [Rank 0] step:921/10000 train_time:67191ms step_avg:72.95ms +[2025-09-02 17:40:51] [Rank 0] step:941/10000 train_time:68655ms step_avg:72.96ms +[2025-09-02 17:40:51] [Rank 0] step:941/10000 train_time:68655ms step_avg:72.96ms +[2025-09-02 17:40:52] [Rank 0] step:961/10000 train_time:70119ms step_avg:72.96ms +[2025-09-02 17:40:52] [Rank 0] step:961/10000 train_time:70119ms step_avg:72.96ms +[2025-09-02 17:40:54] [Rank 0] step:981/10000 train_time:71583ms step_avg:72.97ms +[2025-09-02 17:40:54] [Rank 0] step:981/10000 train_time:71583ms step_avg:72.97ms +[2025-09-02 17:40:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:40:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:41:07] [Rank 0] PRINT: step:1000/10000 val_loss:5.0295 svd_entropy: attn_qk:H=0.6827,top10E=0.38,eRank=103.1,q75/q25=35.95 attn_vo:H=0.7221,top10E=0.29,eRank=155.4,q75/q25=50.34 mlp_w1:H=0.7837,top10E=0.28,eRank=196.8,q75/q25=7.54 mlp_w2:H=0.9607,top10E=0.04,eRank=591.7,q75/q25=3.56 vo_prod:H=0.5961,top10E=0.46,eRank=55.3,q75/q25=1820.35 train_time:73123ms step_avg:73.12ms +[2025-09-02 17:41:07] [Rank 0] PRINT: step:1000/10000 val_loss:5.0295 svd_entropy: attn_qk:H=0.6827,top10E=0.38,eRank=103.1,q75/q25=35.95 attn_vo:H=0.7221,top10E=0.29,eRank=155.4,q75/q25=50.34 mlp_w1:H=0.7837,top10E=0.28,eRank=196.8,q75/q25=7.54 mlp_w2:H=0.9607,top10E=0.04,eRank=591.7,q75/q25=3.56 vo_prod:H=0.5961,top10E=0.46,eRank=55.3,q75/q25=1820.35 train_time:73123ms step_avg:73.12ms +[2025-09-02 17:41:07] [Rank 0] step:1001/10000 train_time:73138ms step_avg:73.07ms +[2025-09-02 17:41:07] [Rank 0] step:1001/10000 train_time:73138ms step_avg:73.07ms +[2025-09-02 17:41:08] [Rank 0] step:1021/10000 train_time:74540ms step_avg:73.01ms +[2025-09-02 17:41:08] [Rank 0] step:1021/10000 train_time:74540ms step_avg:73.01ms +[2025-09-02 17:41:10] [Rank 0] step:1041/10000 train_time:76001ms step_avg:73.01ms +[2025-09-02 17:41:10] [Rank 0] step:1041/10000 train_time:76001ms step_avg:73.01ms +[2025-09-02 17:41:11] [Rank 0] step:1061/10000 train_time:77462ms step_avg:73.01ms +[2025-09-02 17:41:11] [Rank 0] step:1061/10000 train_time:77462ms step_avg:73.01ms +[2025-09-02 17:41:13] [Rank 0] step:1081/10000 train_time:78924ms step_avg:73.01ms +[2025-09-02 17:41:13] [Rank 0] step:1081/10000 train_time:78924ms step_avg:73.01ms +[2025-09-02 17:41:14] [Rank 0] step:1101/10000 train_time:80388ms step_avg:73.01ms +[2025-09-02 17:41:14] [Rank 0] step:1101/10000 train_time:80388ms step_avg:73.01ms +[2025-09-02 17:41:16] [Rank 0] step:1121/10000 train_time:81851ms step_avg:73.02ms +[2025-09-02 17:41:16] [Rank 0] step:1121/10000 train_time:81851ms step_avg:73.02ms +[2025-09-02 17:41:17] [Rank 0] step:1141/10000 train_time:83314ms step_avg:73.02ms +[2025-09-02 17:41:17] [Rank 0] step:1141/10000 train_time:83314ms step_avg:73.02ms +[2025-09-02 17:41:19] [Rank 0] step:1161/10000 train_time:84776ms step_avg:73.02ms +[2025-09-02 17:41:19] [Rank 0] step:1161/10000 train_time:84776ms step_avg:73.02ms +[2025-09-02 17:41:20] [Rank 0] step:1181/10000 train_time:86238ms step_avg:73.02ms +[2025-09-02 17:41:20] [Rank 0] step:1181/10000 train_time:86238ms step_avg:73.02ms +[2025-09-02 17:41:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:41:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:41:33] [Rank 0] PRINT: step:1200/10000 val_loss:4.8511 svd_entropy: attn_qk:H=0.6961,top10E=0.35,eRank=111.0,q75/q25=43.27 attn_vo:H=0.7414,top10E=0.26,eRank=172.2,q75/q25=55.86 mlp_w1:H=0.8035,top10E=0.26,eRank=220.4,q75/q25=7.46 mlp_w2:H=0.9639,top10E=0.04,eRank=604.6,q75/q25=3.34 vo_prod:H=0.6174,top10E=0.42,eRank=63.0,q75/q25=2981.12 train_time:87775ms step_avg:73.15ms +[2025-09-02 17:41:33] [Rank 0] PRINT: step:1200/10000 val_loss:4.8511 svd_entropy: attn_qk:H=0.6961,top10E=0.35,eRank=111.0,q75/q25=43.27 attn_vo:H=0.7414,top10E=0.26,eRank=172.2,q75/q25=55.86 mlp_w1:H=0.8035,top10E=0.26,eRank=220.4,q75/q25=7.46 mlp_w2:H=0.9639,top10E=0.04,eRank=604.6,q75/q25=3.34 vo_prod:H=0.6174,top10E=0.42,eRank=63.0,q75/q25=2981.12 train_time:87775ms step_avg:73.15ms +[2025-09-02 17:41:34] [Rank 0] step:1201/10000 train_time:87789ms step_avg:73.10ms +[2025-09-02 17:41:34] [Rank 0] step:1201/10000 train_time:87789ms step_avg:73.10ms +[2025-09-02 17:41:35] [Rank 0] step:1221/10000 train_time:89200ms step_avg:73.05ms +[2025-09-02 17:41:35] [Rank 0] step:1221/10000 train_time:89200ms step_avg:73.05ms +[2025-09-02 17:41:37] [Rank 0] step:1241/10000 train_time:90658ms step_avg:73.05ms +[2025-09-02 17:41:37] [Rank 0] step:1241/10000 train_time:90658ms step_avg:73.05ms +[2025-09-02 17:41:38] [Rank 0] step:1261/10000 train_time:92118ms step_avg:73.05ms +[2025-09-02 17:41:38] [Rank 0] step:1261/10000 train_time:92118ms step_avg:73.05ms +[2025-09-02 17:41:39] [Rank 0] step:1281/10000 train_time:93579ms step_avg:73.05ms +[2025-09-02 17:41:39] [Rank 0] step:1281/10000 train_time:93579ms step_avg:73.05ms +[2025-09-02 17:41:41] [Rank 0] step:1301/10000 train_time:95039ms step_avg:73.05ms +[2025-09-02 17:41:41] [Rank 0] step:1301/10000 train_time:95039ms step_avg:73.05ms +[2025-09-02 17:41:42] [Rank 0] step:1321/10000 train_time:96500ms step_avg:73.05ms +[2025-09-02 17:41:42] [Rank 0] step:1321/10000 train_time:96500ms step_avg:73.05ms +[2025-09-02 17:41:44] [Rank 0] step:1341/10000 train_time:97962ms step_avg:73.05ms +[2025-09-02 17:41:44] [Rank 0] step:1341/10000 train_time:97962ms step_avg:73.05ms +[2025-09-02 17:41:45] [Rank 0] step:1361/10000 train_time:99425ms step_avg:73.05ms +[2025-09-02 17:41:45] [Rank 0] step:1361/10000 train_time:99425ms step_avg:73.05ms +[2025-09-02 17:41:47] [Rank 0] step:1381/10000 train_time:100887ms step_avg:73.05ms +[2025-09-02 17:41:47] [Rank 0] step:1381/10000 train_time:100887ms step_avg:73.05ms +[2025-09-02 17:41:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:41:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:42:00] [Rank 0] PRINT: step:1400/10000 val_loss:4.7291 svd_entropy: attn_qk:H=0.7065,top10E=0.34,eRank=117.8,q75/q25=49.26 attn_vo:H=0.7562,top10E=0.24,eRank=186.6,q75/q25=58.78 mlp_w1:H=0.8193,top10E=0.24,eRank=242.0,q75/q25=7.25 mlp_w2:H=0.9661,top10E=0.04,eRank=613.2,q75/q25=3.20 vo_prod:H=0.6334,top10E=0.39,eRank=69.8,q75/q25=3695.27 train_time:102424ms step_avg:73.16ms +[2025-09-02 17:42:00] [Rank 0] PRINT: step:1400/10000 val_loss:4.7291 svd_entropy: attn_qk:H=0.7065,top10E=0.34,eRank=117.8,q75/q25=49.26 attn_vo:H=0.7562,top10E=0.24,eRank=186.6,q75/q25=58.78 mlp_w1:H=0.8193,top10E=0.24,eRank=242.0,q75/q25=7.25 mlp_w2:H=0.9661,top10E=0.04,eRank=613.2,q75/q25=3.20 vo_prod:H=0.6334,top10E=0.39,eRank=69.8,q75/q25=3695.27 train_time:102424ms step_avg:73.16ms +[2025-09-02 17:42:00] [Rank 0] step:1401/10000 train_time:102437ms step_avg:73.12ms +[2025-09-02 17:42:00] [Rank 0] step:1401/10000 train_time:102437ms step_avg:73.12ms +[2025-09-02 17:42:02] [Rank 0] step:1421/10000 train_time:103823ms step_avg:73.06ms +[2025-09-02 17:42:02] [Rank 0] step:1421/10000 train_time:103823ms step_avg:73.06ms +[2025-09-02 17:42:03] [Rank 0] step:1441/10000 train_time:105284ms step_avg:73.06ms +[2025-09-02 17:42:03] [Rank 0] step:1441/10000 train_time:105284ms step_avg:73.06ms +[2025-09-02 17:42:05] [Rank 0] step:1461/10000 train_time:106744ms step_avg:73.06ms +[2025-09-02 17:42:05] [Rank 0] step:1461/10000 train_time:106744ms step_avg:73.06ms +[2025-09-02 17:42:06] [Rank 0] step:1481/10000 train_time:108203ms step_avg:73.06ms +[2025-09-02 17:42:06] [Rank 0] step:1481/10000 train_time:108203ms step_avg:73.06ms +[2025-09-02 17:42:07] [Rank 0] step:1501/10000 train_time:109674ms step_avg:73.07ms +[2025-09-02 17:42:07] [Rank 0] step:1501/10000 train_time:109674ms step_avg:73.07ms +[2025-09-02 17:42:09] [Rank 0] step:1521/10000 train_time:111144ms step_avg:73.07ms +[2025-09-02 17:42:09] [Rank 0] step:1521/10000 train_time:111144ms step_avg:73.07ms +[2025-09-02 17:42:10] [Rank 0] step:1541/10000 train_time:112615ms step_avg:73.08ms +[2025-09-02 17:42:10] [Rank 0] step:1541/10000 train_time:112615ms step_avg:73.08ms +[2025-09-02 17:42:12] [Rank 0] step:1561/10000 train_time:114088ms step_avg:73.09ms +[2025-09-02 17:42:12] [Rank 0] step:1561/10000 train_time:114088ms step_avg:73.09ms +[2025-09-02 17:42:13] [Rank 0] step:1581/10000 train_time:115564ms step_avg:73.10ms +[2025-09-02 17:42:13] [Rank 0] step:1581/10000 train_time:115564ms step_avg:73.10ms +[2025-09-02 17:42:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:42:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:42:26] [Rank 0] PRINT: step:1600/10000 val_loss:4.5946 svd_entropy: attn_qk:H=0.7143,top10E=0.33,eRank=123.0,q75/q25=53.98 attn_vo:H=0.7683,top10E=0.23,eRank=199.1,q75/q25=60.31 mlp_w1:H=0.8319,top10E=0.23,eRank=260.8,q75/q25=6.98 mlp_w2:H=0.9675,top10E=0.04,eRank=619.2,q75/q25=3.10 vo_prod:H=0.6469,top10E=0.37,eRank=76.3,q75/q25=4062.28 train_time:117112ms step_avg:73.19ms +[2025-09-02 17:42:26] [Rank 0] PRINT: step:1600/10000 val_loss:4.5946 svd_entropy: attn_qk:H=0.7143,top10E=0.33,eRank=123.0,q75/q25=53.98 attn_vo:H=0.7683,top10E=0.23,eRank=199.1,q75/q25=60.31 mlp_w1:H=0.8319,top10E=0.23,eRank=260.8,q75/q25=6.98 mlp_w2:H=0.9675,top10E=0.04,eRank=619.2,q75/q25=3.10 vo_prod:H=0.6469,top10E=0.37,eRank=76.3,q75/q25=4062.28 train_time:117112ms step_avg:73.19ms +[2025-09-02 17:42:26] [Rank 0] step:1601/10000 train_time:117126ms step_avg:73.16ms +[2025-09-02 17:42:26] [Rank 0] step:1601/10000 train_time:117126ms step_avg:73.16ms +[2025-09-02 17:42:28] [Rank 0] step:1621/10000 train_time:118533ms step_avg:73.12ms +[2025-09-02 17:42:28] [Rank 0] step:1621/10000 train_time:118533ms step_avg:73.12ms +[2025-09-02 17:42:29] [Rank 0] step:1641/10000 train_time:120004ms step_avg:73.13ms +[2025-09-02 17:42:29] [Rank 0] step:1641/10000 train_time:120004ms step_avg:73.13ms +[2025-09-02 17:42:31] [Rank 0] step:1661/10000 train_time:121476ms step_avg:73.13ms +[2025-09-02 17:42:31] [Rank 0] step:1661/10000 train_time:121476ms step_avg:73.13ms +[2025-09-02 17:42:33] [Rank 0] step:1681/10000 train_time:123070ms step_avg:73.21ms +[2025-09-02 17:42:33] [Rank 0] step:1681/10000 train_time:123070ms step_avg:73.21ms +[2025-09-02 17:42:34] [Rank 0] step:1701/10000 train_time:124544ms step_avg:73.22ms +[2025-09-02 17:42:34] [Rank 0] step:1701/10000 train_time:124544ms step_avg:73.22ms +[2025-09-02 17:42:36] [Rank 0] step:1721/10000 train_time:126082ms step_avg:73.26ms +[2025-09-02 17:42:36] [Rank 0] step:1721/10000 train_time:126082ms step_avg:73.26ms +[2025-09-02 17:42:37] [Rank 0] step:1741/10000 train_time:127558ms step_avg:73.27ms +[2025-09-02 17:42:37] [Rank 0] step:1741/10000 train_time:127558ms step_avg:73.27ms +[2025-09-02 17:42:38] [Rank 0] step:1761/10000 train_time:129031ms step_avg:73.27ms +[2025-09-02 17:42:38] [Rank 0] step:1761/10000 train_time:129031ms step_avg:73.27ms +[2025-09-02 17:42:40] [Rank 0] step:1781/10000 train_time:130505ms step_avg:73.28ms +[2025-09-02 17:42:40] [Rank 0] step:1781/10000 train_time:130505ms step_avg:73.28ms +[2025-09-02 17:42:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:42:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:42:53] [Rank 0] PRINT: step:1800/10000 val_loss:4.4974 svd_entropy: attn_qk:H=0.7210,top10E=0.32,eRank=127.8,q75/q25=57.32 attn_vo:H=0.7778,top10E=0.21,eRank=209.6,q75/q25=61.11 mlp_w1:H=0.8420,top10E=0.21,eRank=277.3,q75/q25=6.70 mlp_w2:H=0.9685,top10E=0.04,eRank=623.1,q75/q25=3.04 vo_prod:H=0.6580,top10E=0.35,eRank=82.0,q75/q25=4329.17 train_time:132053ms step_avg:73.36ms +[2025-09-02 17:42:53] [Rank 0] PRINT: step:1800/10000 val_loss:4.4974 svd_entropy: attn_qk:H=0.7210,top10E=0.32,eRank=127.8,q75/q25=57.32 attn_vo:H=0.7778,top10E=0.21,eRank=209.6,q75/q25=61.11 mlp_w1:H=0.8420,top10E=0.21,eRank=277.3,q75/q25=6.70 mlp_w2:H=0.9685,top10E=0.04,eRank=623.1,q75/q25=3.04 vo_prod:H=0.6580,top10E=0.35,eRank=82.0,q75/q25=4329.17 train_time:132053ms step_avg:73.36ms +[2025-09-02 17:42:53] [Rank 0] step:1801/10000 train_time:132068ms step_avg:73.33ms +[2025-09-02 17:42:53] [Rank 0] step:1801/10000 train_time:132068ms step_avg:73.33ms +[2025-09-02 17:42:55] [Rank 0] step:1821/10000 train_time:133477ms step_avg:73.30ms +[2025-09-02 17:42:55] [Rank 0] step:1821/10000 train_time:133477ms step_avg:73.30ms +[2025-09-02 17:42:56] [Rank 0] step:1841/10000 train_time:134947ms step_avg:73.30ms +[2025-09-02 17:42:56] [Rank 0] step:1841/10000 train_time:134947ms step_avg:73.30ms +[2025-09-02 17:42:58] [Rank 0] step:1861/10000 train_time:136418ms step_avg:73.30ms +[2025-09-02 17:42:58] [Rank 0] step:1861/10000 train_time:136418ms step_avg:73.30ms +[2025-09-02 17:42:59] [Rank 0] step:1881/10000 train_time:137889ms step_avg:73.31ms +[2025-09-02 17:42:59] [Rank 0] step:1881/10000 train_time:137889ms step_avg:73.31ms +[2025-09-02 17:43:01] [Rank 0] step:1901/10000 train_time:139360ms step_avg:73.31ms +[2025-09-02 17:43:01] [Rank 0] step:1901/10000 train_time:139360ms step_avg:73.31ms +[2025-09-02 17:43:02] [Rank 0] step:1921/10000 train_time:140832ms step_avg:73.31ms +[2025-09-02 17:43:02] [Rank 0] step:1921/10000 train_time:140832ms step_avg:73.31ms +[2025-09-02 17:43:04] [Rank 0] step:1941/10000 train_time:142305ms step_avg:73.32ms +[2025-09-02 17:43:04] [Rank 0] step:1941/10000 train_time:142305ms step_avg:73.32ms +[2025-09-02 17:43:05] [Rank 0] step:1961/10000 train_time:143779ms step_avg:73.32ms +[2025-09-02 17:43:05] [Rank 0] step:1961/10000 train_time:143779ms step_avg:73.32ms +[2025-09-02 17:43:07] [Rank 0] step:1981/10000 train_time:145252ms step_avg:73.32ms +[2025-09-02 17:43:07] [Rank 0] step:1981/10000 train_time:145252ms step_avg:73.32ms +[2025-09-02 17:43:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:43:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:43:20] [Rank 0] PRINT: step:2000/10000 val_loss:4.4356 svd_entropy: attn_qk:H=0.7266,top10E=0.31,eRank=132.0,q75/q25=59.99 attn_vo:H=0.7858,top10E=0.20,eRank=218.8,q75/q25=61.05 mlp_w1:H=0.8501,top10E=0.20,eRank=291.5,q75/q25=6.46 mlp_w2:H=0.9692,top10E=0.04,eRank=626.0,q75/q25=3.00 vo_prod:H=0.6677,top10E=0.34,eRank=87.6,q75/q25=4228.01 train_time:146801ms step_avg:73.40ms +[2025-09-02 17:43:20] [Rank 0] PRINT: step:2000/10000 val_loss:4.4356 svd_entropy: attn_qk:H=0.7266,top10E=0.31,eRank=132.0,q75/q25=59.99 attn_vo:H=0.7858,top10E=0.20,eRank=218.8,q75/q25=61.05 mlp_w1:H=0.8501,top10E=0.20,eRank=291.5,q75/q25=6.46 mlp_w2:H=0.9692,top10E=0.04,eRank=626.0,q75/q25=3.00 vo_prod:H=0.6677,top10E=0.34,eRank=87.6,q75/q25=4228.01 train_time:146801ms step_avg:73.40ms +[2025-09-02 17:43:20] [Rank 0] step:2001/10000 train_time:146816ms step_avg:73.37ms +[2025-09-02 17:43:20] [Rank 0] step:2001/10000 train_time:146816ms step_avg:73.37ms +[2025-09-02 17:43:21] [Rank 0] step:2021/10000 train_time:148226ms step_avg:73.34ms +[2025-09-02 17:43:21] [Rank 0] step:2021/10000 train_time:148226ms step_avg:73.34ms +[2025-09-02 17:43:23] [Rank 0] step:2041/10000 train_time:149695ms step_avg:73.34ms +[2025-09-02 17:43:23] [Rank 0] step:2041/10000 train_time:149695ms step_avg:73.34ms +[2025-09-02 17:43:24] [Rank 0] step:2061/10000 train_time:151166ms step_avg:73.35ms +[2025-09-02 17:43:24] [Rank 0] step:2061/10000 train_time:151166ms step_avg:73.35ms +[2025-09-02 17:43:26] [Rank 0] step:2081/10000 train_time:152637ms step_avg:73.35ms +[2025-09-02 17:43:26] [Rank 0] step:2081/10000 train_time:152637ms step_avg:73.35ms +[2025-09-02 17:43:27] [Rank 0] step:2101/10000 train_time:154108ms step_avg:73.35ms +[2025-09-02 17:43:27] [Rank 0] step:2101/10000 train_time:154108ms step_avg:73.35ms +[2025-09-02 17:43:29] [Rank 0] step:2121/10000 train_time:155581ms step_avg:73.35ms +[2025-09-02 17:43:29] [Rank 0] step:2121/10000 train_time:155581ms step_avg:73.35ms +[2025-09-02 17:43:30] [Rank 0] step:2141/10000 train_time:157054ms step_avg:73.36ms +[2025-09-02 17:43:30] [Rank 0] step:2141/10000 train_time:157054ms step_avg:73.36ms +[2025-09-02 17:43:32] [Rank 0] step:2161/10000 train_time:158527ms step_avg:73.36ms +[2025-09-02 17:43:32] [Rank 0] step:2161/10000 train_time:158527ms step_avg:73.36ms +[2025-09-02 17:43:33] [Rank 0] step:2181/10000 train_time:159999ms step_avg:73.36ms +[2025-09-02 17:43:33] [Rank 0] step:2181/10000 train_time:159999ms step_avg:73.36ms +[2025-09-02 17:43:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:43:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:43:46] [Rank 0] PRINT: step:2200/10000 val_loss:4.3689 svd_entropy: attn_qk:H=0.7316,top10E=0.31,eRank=135.8,q75/q25=61.56 attn_vo:H=0.7921,top10E=0.19,eRank=226.3,q75/q25=60.36 mlp_w1:H=0.8567,top10E=0.20,eRank=303.5,q75/q25=6.23 mlp_w2:H=0.9697,top10E=0.04,eRank=628.0,q75/q25=2.97 vo_prod:H=0.6753,top10E=0.32,eRank=92.2,q75/q25=4030.76 train_time:161548ms step_avg:73.43ms +[2025-09-02 17:43:46] [Rank 0] PRINT: step:2200/10000 val_loss:4.3689 svd_entropy: attn_qk:H=0.7316,top10E=0.31,eRank=135.8,q75/q25=61.56 attn_vo:H=0.7921,top10E=0.19,eRank=226.3,q75/q25=60.36 mlp_w1:H=0.8567,top10E=0.20,eRank=303.5,q75/q25=6.23 mlp_w2:H=0.9697,top10E=0.04,eRank=628.0,q75/q25=2.97 vo_prod:H=0.6753,top10E=0.32,eRank=92.2,q75/q25=4030.76 train_time:161548ms step_avg:73.43ms +[2025-09-02 17:43:47] [Rank 0] step:2201/10000 train_time:161562ms step_avg:73.40ms +[2025-09-02 17:43:47] [Rank 0] step:2201/10000 train_time:161562ms step_avg:73.40ms +[2025-09-02 17:43:48] [Rank 0] step:2221/10000 train_time:162982ms step_avg:73.38ms +[2025-09-02 17:43:48] [Rank 0] step:2221/10000 train_time:162982ms step_avg:73.38ms +[2025-09-02 17:43:50] [Rank 0] step:2241/10000 train_time:164488ms step_avg:73.40ms +[2025-09-02 17:43:50] [Rank 0] step:2241/10000 train_time:164488ms step_avg:73.40ms +[2025-09-02 17:43:51] [Rank 0] step:2261/10000 train_time:166003ms step_avg:73.42ms +[2025-09-02 17:43:51] [Rank 0] step:2261/10000 train_time:166003ms step_avg:73.42ms +[2025-09-02 17:43:53] [Rank 0] step:2281/10000 train_time:167518ms step_avg:73.44ms +[2025-09-02 17:43:53] [Rank 0] step:2281/10000 train_time:167518ms step_avg:73.44ms +[2025-09-02 17:43:54] [Rank 0] step:2301/10000 train_time:169033ms step_avg:73.46ms +[2025-09-02 17:43:54] [Rank 0] step:2301/10000 train_time:169033ms step_avg:73.46ms +[2025-09-02 17:43:56] [Rank 0] step:2321/10000 train_time:170550ms step_avg:73.48ms +[2025-09-02 17:43:56] [Rank 0] step:2321/10000 train_time:170550ms step_avg:73.48ms +[2025-09-02 17:43:57] [Rank 0] step:2341/10000 train_time:172065ms step_avg:73.50ms +[2025-09-02 17:43:57] [Rank 0] step:2341/10000 train_time:172065ms step_avg:73.50ms +[2025-09-02 17:43:59] [Rank 0] step:2361/10000 train_time:173581ms step_avg:73.52ms +[2025-09-02 17:43:59] [Rank 0] step:2361/10000 train_time:173581ms step_avg:73.52ms +[2025-09-02 17:44:00] [Rank 0] step:2381/10000 train_time:175098ms step_avg:73.54ms +[2025-09-02 17:44:00] [Rank 0] step:2381/10000 train_time:175098ms step_avg:73.54ms +[2025-09-02 17:44:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:44:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:44:13] [Rank 0] PRINT: step:2400/10000 val_loss:4.2933 svd_entropy: attn_qk:H=0.7348,top10E=0.30,eRank=138.3,q75/q25=62.27 attn_vo:H=0.7975,top10E=0.19,eRank=232.8,q75/q25=59.71 mlp_w1:H=0.8624,top10E=0.19,eRank=314.4,q75/q25=6.02 mlp_w2:H=0.9701,top10E=0.04,eRank=629.5,q75/q25=2.95 vo_prod:H=0.6823,top10E=0.31,eRank=96.8,q75/q25=3889.36 train_time:176692ms step_avg:73.62ms +[2025-09-02 17:44:13] [Rank 0] PRINT: step:2400/10000 val_loss:4.2933 svd_entropy: attn_qk:H=0.7348,top10E=0.30,eRank=138.3,q75/q25=62.27 attn_vo:H=0.7975,top10E=0.19,eRank=232.8,q75/q25=59.71 mlp_w1:H=0.8624,top10E=0.19,eRank=314.4,q75/q25=6.02 mlp_w2:H=0.9701,top10E=0.04,eRank=629.5,q75/q25=2.95 vo_prod:H=0.6823,top10E=0.31,eRank=96.8,q75/q25=3889.36 train_time:176692ms step_avg:73.62ms +[2025-09-02 17:44:14] [Rank 0] step:2401/10000 train_time:176707ms step_avg:73.60ms +[2025-09-02 17:44:14] [Rank 0] step:2401/10000 train_time:176707ms step_avg:73.60ms +[2025-09-02 17:44:15] [Rank 0] step:2421/10000 train_time:178166ms step_avg:73.59ms +[2025-09-02 17:44:15] [Rank 0] step:2421/10000 train_time:178166ms step_avg:73.59ms +[2025-09-02 17:44:17] [Rank 0] step:2441/10000 train_time:179679ms step_avg:73.61ms +[2025-09-02 17:44:17] [Rank 0] step:2441/10000 train_time:179679ms step_avg:73.61ms +[2025-09-02 17:44:18] [Rank 0] step:2461/10000 train_time:181193ms step_avg:73.63ms +[2025-09-02 17:44:18] [Rank 0] step:2461/10000 train_time:181193ms step_avg:73.63ms +[2025-09-02 17:44:20] [Rank 0] step:2481/10000 train_time:182708ms step_avg:73.64ms +[2025-09-02 17:44:20] [Rank 0] step:2481/10000 train_time:182708ms step_avg:73.64ms +[2025-09-02 17:44:21] [Rank 0] step:2501/10000 train_time:184224ms step_avg:73.66ms +[2025-09-02 17:44:21] [Rank 0] step:2501/10000 train_time:184224ms step_avg:73.66ms +[2025-09-02 17:44:23] [Rank 0] step:2521/10000 train_time:185741ms step_avg:73.68ms +[2025-09-02 17:44:23] [Rank 0] step:2521/10000 train_time:185741ms step_avg:73.68ms +[2025-09-02 17:44:24] [Rank 0] step:2541/10000 train_time:187257ms step_avg:73.69ms +[2025-09-02 17:44:24] [Rank 0] step:2541/10000 train_time:187257ms step_avg:73.69ms +[2025-09-02 17:44:26] [Rank 0] step:2561/10000 train_time:188774ms step_avg:73.71ms +[2025-09-02 17:44:26] [Rank 0] step:2561/10000 train_time:188774ms step_avg:73.71ms +[2025-09-02 17:44:27] [Rank 0] step:2581/10000 train_time:190292ms step_avg:73.73ms +[2025-09-02 17:44:27] [Rank 0] step:2581/10000 train_time:190292ms step_avg:73.73ms +[2025-09-02 17:44:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:44:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:44:40] [Rank 0] PRINT: step:2600/10000 val_loss:4.2421 svd_entropy: attn_qk:H=0.7384,top10E=0.30,eRank=141.3,q75/q25=62.81 attn_vo:H=0.8022,top10E=0.18,eRank=238.7,q75/q25=59.23 mlp_w1:H=0.8673,top10E=0.19,eRank=324.1,q75/q25=5.85 mlp_w2:H=0.9703,top10E=0.04,eRank=630.5,q75/q25=2.94 vo_prod:H=0.6887,top10E=0.30,eRank=101.1,q75/q25=3685.91 train_time:191888ms step_avg:73.80ms +[2025-09-02 17:44:40] [Rank 0] PRINT: step:2600/10000 val_loss:4.2421 svd_entropy: attn_qk:H=0.7384,top10E=0.30,eRank=141.3,q75/q25=62.81 attn_vo:H=0.8022,top10E=0.18,eRank=238.7,q75/q25=59.23 mlp_w1:H=0.8673,top10E=0.19,eRank=324.1,q75/q25=5.85 mlp_w2:H=0.9703,top10E=0.04,eRank=630.5,q75/q25=2.94 vo_prod:H=0.6887,top10E=0.30,eRank=101.1,q75/q25=3685.91 train_time:191888ms step_avg:73.80ms +[2025-09-02 17:44:40] [Rank 0] step:2601/10000 train_time:191902ms step_avg:73.78ms +[2025-09-02 17:44:40] [Rank 0] step:2601/10000 train_time:191902ms step_avg:73.78ms +[2025-09-02 17:44:42] [Rank 0] step:2621/10000 train_time:193352ms step_avg:73.77ms +[2025-09-02 17:44:42] [Rank 0] step:2621/10000 train_time:193352ms step_avg:73.77ms +[2025-09-02 17:44:44] [Rank 0] step:2641/10000 train_time:194869ms step_avg:73.79ms +[2025-09-02 17:44:44] [Rank 0] step:2641/10000 train_time:194869ms step_avg:73.79ms +[2025-09-02 17:44:45] [Rank 0] step:2661/10000 train_time:196386ms step_avg:73.80ms +[2025-09-02 17:44:45] [Rank 0] step:2661/10000 train_time:196386ms step_avg:73.80ms +[2025-09-02 17:44:47] [Rank 0] step:2681/10000 train_time:197903ms step_avg:73.82ms +[2025-09-02 17:44:47] [Rank 0] step:2681/10000 train_time:197903ms step_avg:73.82ms +[2025-09-02 17:44:48] [Rank 0] step:2701/10000 train_time:199421ms step_avg:73.83ms +[2025-09-02 17:44:48] [Rank 0] step:2701/10000 train_time:199421ms step_avg:73.83ms +[2025-09-02 17:44:50] [Rank 0] step:2721/10000 train_time:200942ms step_avg:73.85ms +[2025-09-02 17:44:50] [Rank 0] step:2721/10000 train_time:200942ms step_avg:73.85ms +[2025-09-02 17:44:51] [Rank 0] step:2741/10000 train_time:202461ms step_avg:73.86ms +[2025-09-02 17:44:51] [Rank 0] step:2741/10000 train_time:202461ms step_avg:73.86ms +[2025-09-02 17:44:53] [Rank 0] step:2761/10000 train_time:203983ms step_avg:73.88ms +[2025-09-02 17:44:53] [Rank 0] step:2761/10000 train_time:203983ms step_avg:73.88ms +[2025-09-02 17:44:54] [Rank 0] step:2781/10000 train_time:205504ms step_avg:73.90ms +[2025-09-02 17:44:54] [Rank 0] step:2781/10000 train_time:205504ms step_avg:73.90ms +[2025-09-02 17:44:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:44:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:45:07] [Rank 0] PRINT: step:2800/10000 val_loss:4.2049 svd_entropy: attn_qk:H=0.7418,top10E=0.29,eRank=144.2,q75/q25=63.34 attn_vo:H=0.8064,top10E=0.18,eRank=244.1,q75/q25=57.90 mlp_w1:H=0.8716,top10E=0.18,eRank=333.1,q75/q25=5.69 mlp_w2:H=0.9705,top10E=0.04,eRank=631.4,q75/q25=2.93 vo_prod:H=0.6945,top10E=0.30,eRank=105.3,q75/q25=3535.79 train_time:207103ms step_avg:73.97ms +[2025-09-02 17:45:07] [Rank 0] PRINT: step:2800/10000 val_loss:4.2049 svd_entropy: attn_qk:H=0.7418,top10E=0.29,eRank=144.2,q75/q25=63.34 attn_vo:H=0.8064,top10E=0.18,eRank=244.1,q75/q25=57.90 mlp_w1:H=0.8716,top10E=0.18,eRank=333.1,q75/q25=5.69 mlp_w2:H=0.9705,top10E=0.04,eRank=631.4,q75/q25=2.93 vo_prod:H=0.6945,top10E=0.30,eRank=105.3,q75/q25=3535.79 train_time:207103ms step_avg:73.97ms +[2025-09-02 17:45:07] [Rank 0] step:2801/10000 train_time:207116ms step_avg:73.94ms +[2025-09-02 17:45:07] [Rank 0] step:2801/10000 train_time:207116ms step_avg:73.94ms +[2025-09-02 17:45:09] [Rank 0] step:2821/10000 train_time:208584ms step_avg:73.94ms +[2025-09-02 17:45:09] [Rank 0] step:2821/10000 train_time:208584ms step_avg:73.94ms +[2025-09-02 17:45:11] [Rank 0] step:2841/10000 train_time:210100ms step_avg:73.95ms +[2025-09-02 17:45:11] [Rank 0] step:2841/10000 train_time:210100ms step_avg:73.95ms +[2025-09-02 17:45:12] [Rank 0] step:2861/10000 train_time:211616ms step_avg:73.97ms +[2025-09-02 17:45:12] [Rank 0] step:2861/10000 train_time:211616ms step_avg:73.97ms +[2025-09-02 17:45:14] [Rank 0] step:2881/10000 train_time:213133ms step_avg:73.98ms +[2025-09-02 17:45:14] [Rank 0] step:2881/10000 train_time:213133ms step_avg:73.98ms +[2025-09-02 17:45:15] [Rank 0] step:2901/10000 train_time:214651ms step_avg:73.99ms +[2025-09-02 17:45:15] [Rank 0] step:2901/10000 train_time:214651ms step_avg:73.99ms +[2025-09-02 17:45:17] [Rank 0] step:2921/10000 train_time:216169ms step_avg:74.00ms +[2025-09-02 17:45:17] [Rank 0] step:2921/10000 train_time:216169ms step_avg:74.00ms +[2025-09-02 17:45:18] [Rank 0] step:2941/10000 train_time:217686ms step_avg:74.02ms +[2025-09-02 17:45:18] [Rank 0] step:2941/10000 train_time:217686ms step_avg:74.02ms +[2025-09-02 17:45:20] [Rank 0] step:2961/10000 train_time:219204ms step_avg:74.03ms +[2025-09-02 17:45:20] [Rank 0] step:2961/10000 train_time:219204ms step_avg:74.03ms +[2025-09-02 17:45:21] [Rank 0] step:2981/10000 train_time:220728ms step_avg:74.04ms +[2025-09-02 17:45:21] [Rank 0] step:2981/10000 train_time:220728ms step_avg:74.04ms +[2025-09-02 17:45:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:45:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:45:34] [Rank 0] PRINT: step:3000/10000 val_loss:4.1620 svd_entropy: attn_qk:H=0.7447,top10E=0.29,eRank=146.7,q75/q25=63.51 attn_vo:H=0.8102,top10E=0.17,eRank=249.0,q75/q25=57.28 mlp_w1:H=0.8753,top10E=0.18,eRank=340.9,q75/q25=5.56 mlp_w2:H=0.9706,top10E=0.04,eRank=631.9,q75/q25=2.92 vo_prod:H=0.6998,top10E=0.29,eRank=109.3,q75/q25=3389.94 train_time:222333ms step_avg:74.11ms +[2025-09-02 17:45:34] [Rank 0] PRINT: step:3000/10000 val_loss:4.1620 svd_entropy: attn_qk:H=0.7447,top10E=0.29,eRank=146.7,q75/q25=63.51 attn_vo:H=0.8102,top10E=0.17,eRank=249.0,q75/q25=57.28 mlp_w1:H=0.8753,top10E=0.18,eRank=340.9,q75/q25=5.56 mlp_w2:H=0.9706,top10E=0.04,eRank=631.9,q75/q25=2.92 vo_prod:H=0.6998,top10E=0.29,eRank=109.3,q75/q25=3389.94 train_time:222333ms step_avg:74.11ms +[2025-09-02 17:45:34] [Rank 0] step:3001/10000 train_time:222347ms step_avg:74.09ms +[2025-09-02 17:45:34] [Rank 0] step:3001/10000 train_time:222347ms step_avg:74.09ms +[2025-09-02 17:45:36] [Rank 0] step:3021/10000 train_time:223809ms step_avg:74.08ms +[2025-09-02 17:45:36] [Rank 0] step:3021/10000 train_time:223809ms step_avg:74.08ms +[2025-09-02 17:45:38] [Rank 0] step:3041/10000 train_time:225330ms step_avg:74.10ms +[2025-09-02 17:45:38] [Rank 0] step:3041/10000 train_time:225330ms step_avg:74.10ms +[2025-09-02 17:45:39] [Rank 0] step:3061/10000 train_time:226859ms step_avg:74.11ms +[2025-09-02 17:45:39] [Rank 0] step:3061/10000 train_time:226859ms step_avg:74.11ms +[2025-09-02 17:45:41] [Rank 0] step:3081/10000 train_time:228383ms step_avg:74.13ms +[2025-09-02 17:45:41] [Rank 0] step:3081/10000 train_time:228383ms step_avg:74.13ms +[2025-09-02 17:45:42] [Rank 0] step:3101/10000 train_time:229969ms step_avg:74.16ms +[2025-09-02 17:45:42] [Rank 0] step:3101/10000 train_time:229969ms step_avg:74.16ms +[2025-09-02 17:45:44] [Rank 0] step:3121/10000 train_time:231495ms step_avg:74.17ms +[2025-09-02 17:45:44] [Rank 0] step:3121/10000 train_time:231495ms step_avg:74.17ms +[2025-09-02 17:45:45] [Rank 0] step:3141/10000 train_time:233019ms step_avg:74.19ms +[2025-09-02 17:45:45] [Rank 0] step:3141/10000 train_time:233019ms step_avg:74.19ms +[2025-09-02 17:45:47] [Rank 0] step:3161/10000 train_time:234547ms step_avg:74.20ms +[2025-09-02 17:45:47] [Rank 0] step:3161/10000 train_time:234547ms step_avg:74.20ms +[2025-09-02 17:45:48] [Rank 0] step:3181/10000 train_time:236074ms step_avg:74.21ms +[2025-09-02 17:45:48] [Rank 0] step:3181/10000 train_time:236074ms step_avg:74.21ms +[2025-09-02 17:45:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:45:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:46:01] [Rank 0] PRINT: step:3200/10000 val_loss:4.1283 svd_entropy: attn_qk:H=0.7472,top10E=0.29,eRank=148.8,q75/q25=63.57 attn_vo:H=0.8160,top10E=0.17,eRank=256.0,q75/q25=55.94 mlp_w1:H=0.8788,top10E=0.17,eRank=348.5,q75/q25=5.44 mlp_w2:H=0.9707,top10E=0.04,eRank=632.2,q75/q25=2.91 vo_prod:H=0.7053,top10E=0.28,eRank=113.2,q75/q25=3260.47 train_time:237678ms step_avg:74.27ms +[2025-09-02 17:46:01] [Rank 0] PRINT: step:3200/10000 val_loss:4.1283 svd_entropy: attn_qk:H=0.7472,top10E=0.29,eRank=148.8,q75/q25=63.57 attn_vo:H=0.8160,top10E=0.17,eRank=256.0,q75/q25=55.94 mlp_w1:H=0.8788,top10E=0.17,eRank=348.5,q75/q25=5.44 mlp_w2:H=0.9707,top10E=0.04,eRank=632.2,q75/q25=2.91 vo_prod:H=0.7053,top10E=0.28,eRank=113.2,q75/q25=3260.47 train_time:237678ms step_avg:74.27ms +[2025-09-02 17:46:02] [Rank 0] step:3201/10000 train_time:237692ms step_avg:74.26ms +[2025-09-02 17:46:02] [Rank 0] step:3201/10000 train_time:237692ms step_avg:74.26ms +[2025-09-02 17:46:03] [Rank 0] step:3221/10000 train_time:239161ms step_avg:74.25ms +[2025-09-02 17:46:03] [Rank 0] step:3221/10000 train_time:239161ms step_avg:74.25ms +[2025-09-02 17:46:05] [Rank 0] step:3241/10000 train_time:240682ms step_avg:74.26ms +[2025-09-02 17:46:05] [Rank 0] step:3241/10000 train_time:240682ms step_avg:74.26ms +[2025-09-02 17:46:06] [Rank 0] step:3261/10000 train_time:242204ms step_avg:74.27ms +[2025-09-02 17:46:06] [Rank 0] step:3261/10000 train_time:242204ms step_avg:74.27ms +[2025-09-02 17:46:08] [Rank 0] step:3281/10000 train_time:243728ms step_avg:74.28ms +[2025-09-02 17:46:08] [Rank 0] step:3281/10000 train_time:243728ms step_avg:74.28ms +[2025-09-02 17:46:09] [Rank 0] step:3301/10000 train_time:245252ms step_avg:74.30ms +[2025-09-02 17:46:09] [Rank 0] step:3301/10000 train_time:245252ms step_avg:74.30ms +[2025-09-02 17:46:11] [Rank 0] step:3321/10000 train_time:246776ms step_avg:74.31ms +[2025-09-02 17:46:11] [Rank 0] step:3321/10000 train_time:246776ms step_avg:74.31ms +[2025-09-02 17:46:12] [Rank 0] step:3341/10000 train_time:248302ms step_avg:74.32ms +[2025-09-02 17:46:12] [Rank 0] step:3341/10000 train_time:248302ms step_avg:74.32ms +[2025-09-02 17:46:14] [Rank 0] step:3361/10000 train_time:249828ms step_avg:74.33ms +[2025-09-02 17:46:14] [Rank 0] step:3361/10000 train_time:249828ms step_avg:74.33ms +[2025-09-02 17:46:15] [Rank 0] step:3381/10000 train_time:251356ms step_avg:74.34ms +[2025-09-02 17:46:15] [Rank 0] step:3381/10000 train_time:251356ms step_avg:74.34ms +[2025-09-02 17:46:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:46:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:46:29] [Rank 0] PRINT: step:3400/10000 val_loss:4.0887 svd_entropy: attn_qk:H=0.7499,top10E=0.28,eRank=151.2,q75/q25=63.83 attn_vo:H=0.8214,top10E=0.16,eRank=263.8,q75/q25=57.56 mlp_w1:H=0.8820,top10E=0.17,eRank=355.6,q75/q25=5.33 mlp_w2:H=0.9708,top10E=0.04,eRank=632.5,q75/q25=2.90 vo_prod:H=0.7129,top10E=0.27,eRank=118.1,q75/q25=3734.57 train_time:252962ms step_avg:74.40ms +[2025-09-02 17:46:29] [Rank 0] PRINT: step:3400/10000 val_loss:4.0887 svd_entropy: attn_qk:H=0.7499,top10E=0.28,eRank=151.2,q75/q25=63.83 attn_vo:H=0.8214,top10E=0.16,eRank=263.8,q75/q25=57.56 mlp_w1:H=0.8820,top10E=0.17,eRank=355.6,q75/q25=5.33 mlp_w2:H=0.9708,top10E=0.04,eRank=632.5,q75/q25=2.90 vo_prod:H=0.7129,top10E=0.27,eRank=118.1,q75/q25=3734.57 train_time:252962ms step_avg:74.40ms +[2025-09-02 17:46:29] [Rank 0] step:3401/10000 train_time:252976ms step_avg:74.38ms +[2025-09-02 17:46:29] [Rank 0] step:3401/10000 train_time:252976ms step_avg:74.38ms +[2025-09-02 17:46:30] [Rank 0] step:3421/10000 train_time:254449ms step_avg:74.38ms +[2025-09-02 17:46:30] [Rank 0] step:3421/10000 train_time:254449ms step_avg:74.38ms +[2025-09-02 17:46:32] [Rank 0] step:3441/10000 train_time:255970ms step_avg:74.39ms +[2025-09-02 17:46:32] [Rank 0] step:3441/10000 train_time:255970ms step_avg:74.39ms +[2025-09-02 17:46:33] [Rank 0] step:3461/10000 train_time:257494ms step_avg:74.40ms +[2025-09-02 17:46:33] [Rank 0] step:3461/10000 train_time:257494ms step_avg:74.40ms +[2025-09-02 17:46:35] [Rank 0] step:3481/10000 train_time:259018ms step_avg:74.41ms +[2025-09-02 17:46:35] [Rank 0] step:3481/10000 train_time:259018ms step_avg:74.41ms +[2025-09-02 17:46:36] [Rank 0] step:3501/10000 train_time:260544ms step_avg:74.42ms +[2025-09-02 17:46:36] [Rank 0] step:3501/10000 train_time:260544ms step_avg:74.42ms +[2025-09-02 17:46:38] [Rank 0] step:3521/10000 train_time:262072ms step_avg:74.43ms +[2025-09-02 17:46:38] [Rank 0] step:3521/10000 train_time:262072ms step_avg:74.43ms +[2025-09-02 17:46:39] [Rank 0] step:3541/10000 train_time:263598ms step_avg:74.44ms +[2025-09-02 17:46:39] [Rank 0] step:3541/10000 train_time:263598ms step_avg:74.44ms +[2025-09-02 17:46:41] [Rank 0] step:3561/10000 train_time:265123ms step_avg:74.45ms +[2025-09-02 17:46:41] [Rank 0] step:3561/10000 train_time:265123ms step_avg:74.45ms +[2025-09-02 17:46:42] [Rank 0] step:3581/10000 train_time:266650ms step_avg:74.46ms +[2025-09-02 17:46:42] [Rank 0] step:3581/10000 train_time:266650ms step_avg:74.46ms +[2025-09-02 17:46:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:46:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:46:56] [Rank 0] PRINT: step:3600/10000 val_loss:4.0752 svd_entropy: attn_qk:H=0.7523,top10E=0.28,eRank=153.4,q75/q25=63.49 attn_vo:H=0.8242,top10E=0.16,eRank=267.5,q75/q25=57.83 mlp_w1:H=0.8848,top10E=0.17,eRank=362.1,q75/q25=5.21 mlp_w2:H=0.9708,top10E=0.04,eRank=632.8,q75/q25=2.90 vo_prod:H=0.7164,top10E=0.27,eRank=121.2,q75/q25=3885.08 train_time:268256ms step_avg:74.52ms +[2025-09-02 17:46:56] [Rank 0] PRINT: step:3600/10000 val_loss:4.0752 svd_entropy: attn_qk:H=0.7523,top10E=0.28,eRank=153.4,q75/q25=63.49 attn_vo:H=0.8242,top10E=0.16,eRank=267.5,q75/q25=57.83 mlp_w1:H=0.8848,top10E=0.17,eRank=362.1,q75/q25=5.21 mlp_w2:H=0.9708,top10E=0.04,eRank=632.8,q75/q25=2.90 vo_prod:H=0.7164,top10E=0.27,eRank=121.2,q75/q25=3885.08 train_time:268256ms step_avg:74.52ms +[2025-09-02 17:46:56] [Rank 0] step:3601/10000 train_time:268270ms step_avg:74.50ms +[2025-09-02 17:46:56] [Rank 0] step:3601/10000 train_time:268270ms step_avg:74.50ms +[2025-09-02 17:46:57] [Rank 0] step:3621/10000 train_time:269735ms step_avg:74.49ms +[2025-09-02 17:46:57] [Rank 0] step:3621/10000 train_time:269735ms step_avg:74.49ms +[2025-09-02 17:46:59] [Rank 0] step:3641/10000 train_time:271265ms step_avg:74.50ms +[2025-09-02 17:46:59] [Rank 0] step:3641/10000 train_time:271265ms step_avg:74.50ms +[2025-09-02 17:47:00] [Rank 0] step:3661/10000 train_time:272794ms step_avg:74.51ms +[2025-09-02 17:47:00] [Rank 0] step:3661/10000 train_time:272794ms step_avg:74.51ms +[2025-09-02 17:47:02] [Rank 0] step:3681/10000 train_time:274319ms step_avg:74.52ms +[2025-09-02 17:47:02] [Rank 0] step:3681/10000 train_time:274319ms step_avg:74.52ms +[2025-09-02 17:47:03] [Rank 0] step:3701/10000 train_time:275848ms step_avg:74.53ms +[2025-09-02 17:47:03] [Rank 0] step:3701/10000 train_time:275848ms step_avg:74.53ms +[2025-09-02 17:47:05] [Rank 0] step:3721/10000 train_time:277403ms step_avg:74.55ms +[2025-09-02 17:47:05] [Rank 0] step:3721/10000 train_time:277403ms step_avg:74.55ms +[2025-09-02 17:47:06] [Rank 0] step:3741/10000 train_time:278966ms step_avg:74.57ms +[2025-09-02 17:47:06] [Rank 0] step:3741/10000 train_time:278966ms step_avg:74.57ms +[2025-09-02 17:47:08] [Rank 0] step:3761/10000 train_time:280531ms step_avg:74.59ms +[2025-09-02 17:47:08] [Rank 0] step:3761/10000 train_time:280531ms step_avg:74.59ms +[2025-09-02 17:47:10] [Rank 0] step:3781/10000 train_time:282098ms step_avg:74.61ms +[2025-09-02 17:47:10] [Rank 0] step:3781/10000 train_time:282098ms step_avg:74.61ms +[2025-09-02 17:47:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:47:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:47:23] [Rank 0] PRINT: step:3800/10000 val_loss:4.0282 svd_entropy: attn_qk:H=0.7541,top10E=0.28,eRank=155.0,q75/q25=63.64 attn_vo:H=0.8266,top10E=0.16,eRank=270.9,q75/q25=56.93 mlp_w1:H=0.8875,top10E=0.16,eRank=368.3,q75/q25=5.13 mlp_w2:H=0.9709,top10E=0.04,eRank=633.0,q75/q25=2.89 vo_prod:H=0.7197,top10E=0.26,eRank=124.1,q75/q25=3816.49 train_time:283745ms step_avg:74.67ms +[2025-09-02 17:47:23] [Rank 0] PRINT: step:3800/10000 val_loss:4.0282 svd_entropy: attn_qk:H=0.7541,top10E=0.28,eRank=155.0,q75/q25=63.64 attn_vo:H=0.8266,top10E=0.16,eRank=270.9,q75/q25=56.93 mlp_w1:H=0.8875,top10E=0.16,eRank=368.3,q75/q25=5.13 mlp_w2:H=0.9709,top10E=0.04,eRank=633.0,q75/q25=2.89 vo_prod:H=0.7197,top10E=0.26,eRank=124.1,q75/q25=3816.49 train_time:283745ms step_avg:74.67ms +[2025-09-02 17:47:23] [Rank 0] step:3801/10000 train_time:283759ms step_avg:74.65ms +[2025-09-02 17:47:23] [Rank 0] step:3801/10000 train_time:283759ms step_avg:74.65ms +[2025-09-02 17:47:25] [Rank 0] step:3821/10000 train_time:285265ms step_avg:74.66ms +[2025-09-02 17:47:25] [Rank 0] step:3821/10000 train_time:285265ms step_avg:74.66ms +[2025-09-02 17:47:26] [Rank 0] step:3841/10000 train_time:286829ms step_avg:74.68ms +[2025-09-02 17:47:26] [Rank 0] step:3841/10000 train_time:286829ms step_avg:74.68ms +[2025-09-02 17:47:28] [Rank 0] step:3861/10000 train_time:288390ms step_avg:74.69ms +[2025-09-02 17:47:28] [Rank 0] step:3861/10000 train_time:288390ms step_avg:74.69ms +[2025-09-02 17:47:29] [Rank 0] step:3881/10000 train_time:289951ms step_avg:74.71ms +[2025-09-02 17:47:29] [Rank 0] step:3881/10000 train_time:289951ms step_avg:74.71ms +[2025-09-02 17:47:31] [Rank 0] step:3901/10000 train_time:291513ms step_avg:74.73ms +[2025-09-02 17:47:31] [Rank 0] step:3901/10000 train_time:291513ms step_avg:74.73ms +[2025-09-02 17:47:32] [Rank 0] step:3921/10000 train_time:293076ms step_avg:74.75ms +[2025-09-02 17:47:32] [Rank 0] step:3921/10000 train_time:293076ms step_avg:74.75ms +[2025-09-02 17:47:34] [Rank 0] step:3941/10000 train_time:294637ms step_avg:74.76ms +[2025-09-02 17:47:34] [Rank 0] step:3941/10000 train_time:294637ms step_avg:74.76ms +[2025-09-02 17:47:35] [Rank 0] step:3961/10000 train_time:296198ms step_avg:74.78ms +[2025-09-02 17:47:35] [Rank 0] step:3961/10000 train_time:296198ms step_avg:74.78ms +[2025-09-02 17:47:37] [Rank 0] step:3981/10000 train_time:297760ms step_avg:74.80ms +[2025-09-02 17:47:37] [Rank 0] step:3981/10000 train_time:297760ms step_avg:74.80ms +[2025-09-02 17:47:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:47:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:47:50] [Rank 0] PRINT: step:4000/10000 val_loss:4.0003 svd_entropy: attn_qk:H=0.7562,top10E=0.28,eRank=156.9,q75/q25=62.60 attn_vo:H=0.8287,top10E=0.15,eRank=273.8,q75/q25=55.78 mlp_w1:H=0.8900,top10E=0.16,eRank=374.2,q75/q25=5.06 mlp_w2:H=0.9710,top10E=0.04,eRank=633.2,q75/q25=2.88 vo_prod:H=0.7227,top10E=0.26,eRank=126.8,q75/q25=3743.20 train_time:299401ms step_avg:74.85ms +[2025-09-02 17:47:50] [Rank 0] PRINT: step:4000/10000 val_loss:4.0003 svd_entropy: attn_qk:H=0.7562,top10E=0.28,eRank=156.9,q75/q25=62.60 attn_vo:H=0.8287,top10E=0.15,eRank=273.8,q75/q25=55.78 mlp_w1:H=0.8900,top10E=0.16,eRank=374.2,q75/q25=5.06 mlp_w2:H=0.9710,top10E=0.04,eRank=633.2,q75/q25=2.88 vo_prod:H=0.7227,top10E=0.26,eRank=126.8,q75/q25=3743.20 train_time:299401ms step_avg:74.85ms +[2025-09-02 17:47:50] [Rank 0] step:4001/10000 train_time:299415ms step_avg:74.84ms +[2025-09-02 17:47:50] [Rank 0] step:4001/10000 train_time:299415ms step_avg:74.84ms +[2025-09-02 17:47:52] [Rank 0] step:4021/10000 train_time:300926ms step_avg:74.84ms +[2025-09-02 17:47:52] [Rank 0] step:4021/10000 train_time:300926ms step_avg:74.84ms +[2025-09-02 17:47:53] [Rank 0] step:4041/10000 train_time:302487ms step_avg:74.85ms +[2025-09-02 17:47:53] [Rank 0] step:4041/10000 train_time:302487ms step_avg:74.85ms +[2025-09-02 17:47:55] [Rank 0] step:4061/10000 train_time:304046ms step_avg:74.87ms +[2025-09-02 17:47:55] [Rank 0] step:4061/10000 train_time:304046ms step_avg:74.87ms +[2025-09-02 17:47:57] [Rank 0] step:4081/10000 train_time:305792ms step_avg:74.93ms +[2025-09-02 17:47:57] [Rank 0] step:4081/10000 train_time:305792ms step_avg:74.93ms +[2025-09-02 17:47:58] [Rank 0] step:4101/10000 train_time:307352ms step_avg:74.95ms +[2025-09-02 17:47:58] [Rank 0] step:4101/10000 train_time:307352ms step_avg:74.95ms +[2025-09-02 17:48:00] [Rank 0] step:4121/10000 train_time:308914ms step_avg:74.96ms +[2025-09-02 17:48:00] [Rank 0] step:4121/10000 train_time:308914ms step_avg:74.96ms +[2025-09-02 17:48:01] [Rank 0] step:4141/10000 train_time:310475ms step_avg:74.98ms +[2025-09-02 17:48:01] [Rank 0] step:4141/10000 train_time:310475ms step_avg:74.98ms +[2025-09-02 17:48:03] [Rank 0] step:4161/10000 train_time:312035ms step_avg:74.99ms +[2025-09-02 17:48:03] [Rank 0] step:4161/10000 train_time:312035ms step_avg:74.99ms +[2025-09-02 17:48:05] [Rank 0] step:4181/10000 train_time:313598ms step_avg:75.01ms +[2025-09-02 17:48:05] [Rank 0] step:4181/10000 train_time:313598ms step_avg:75.01ms +[2025-09-02 17:48:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:48:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:48:18] [Rank 0] PRINT: step:4200/10000 val_loss:3.9801 svd_entropy: attn_qk:H=0.7580,top10E=0.28,eRank=158.6,q75/q25=62.09 attn_vo:H=0.8306,top10E=0.15,eRank=276.7,q75/q25=55.01 mlp_w1:H=0.8922,top10E=0.16,eRank=379.5,q75/q25=4.98 mlp_w2:H=0.9710,top10E=0.04,eRank=633.4,q75/q25=2.88 vo_prod:H=0.7254,top10E=0.25,eRank=129.3,q75/q25=3654.60 train_time:315240ms step_avg:75.06ms +[2025-09-02 17:48:18] [Rank 0] PRINT: step:4200/10000 val_loss:3.9801 svd_entropy: attn_qk:H=0.7580,top10E=0.28,eRank=158.6,q75/q25=62.09 attn_vo:H=0.8306,top10E=0.15,eRank=276.7,q75/q25=55.01 mlp_w1:H=0.8922,top10E=0.16,eRank=379.5,q75/q25=4.98 mlp_w2:H=0.9710,top10E=0.04,eRank=633.4,q75/q25=2.88 vo_prod:H=0.7254,top10E=0.25,eRank=129.3,q75/q25=3654.60 train_time:315240ms step_avg:75.06ms +[2025-09-02 17:48:18] [Rank 0] step:4201/10000 train_time:315255ms step_avg:75.04ms +[2025-09-02 17:48:18] [Rank 0] step:4201/10000 train_time:315255ms step_avg:75.04ms +[2025-09-02 17:48:19] [Rank 0] step:4221/10000 train_time:316737ms step_avg:75.04ms +[2025-09-02 17:48:19] [Rank 0] step:4221/10000 train_time:316737ms step_avg:75.04ms +[2025-09-02 17:48:21] [Rank 0] step:4241/10000 train_time:318297ms step_avg:75.05ms +[2025-09-02 17:48:21] [Rank 0] step:4241/10000 train_time:318297ms step_avg:75.05ms +[2025-09-02 17:48:23] [Rank 0] step:4261/10000 train_time:319857ms step_avg:75.07ms +[2025-09-02 17:48:23] [Rank 0] step:4261/10000 train_time:319857ms step_avg:75.07ms +[2025-09-02 17:48:24] [Rank 0] step:4281/10000 train_time:321418ms step_avg:75.08ms +[2025-09-02 17:48:24] [Rank 0] step:4281/10000 train_time:321418ms step_avg:75.08ms +[2025-09-02 17:48:26] [Rank 0] step:4301/10000 train_time:322978ms step_avg:75.09ms +[2025-09-02 17:48:26] [Rank 0] step:4301/10000 train_time:322978ms step_avg:75.09ms +[2025-09-02 17:48:27] [Rank 0] step:4321/10000 train_time:324541ms step_avg:75.11ms +[2025-09-02 17:48:27] [Rank 0] step:4321/10000 train_time:324541ms step_avg:75.11ms +[2025-09-02 17:48:29] [Rank 0] step:4341/10000 train_time:326102ms step_avg:75.12ms +[2025-09-02 17:48:29] [Rank 0] step:4341/10000 train_time:326102ms step_avg:75.12ms +[2025-09-02 17:48:30] [Rank 0] step:4361/10000 train_time:327666ms step_avg:75.14ms +[2025-09-02 17:48:30] [Rank 0] step:4361/10000 train_time:327666ms step_avg:75.14ms +[2025-09-02 17:48:32] [Rank 0] step:4381/10000 train_time:329226ms step_avg:75.15ms +[2025-09-02 17:48:32] [Rank 0] step:4381/10000 train_time:329226ms step_avg:75.15ms +[2025-09-02 17:48:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:48:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:48:45] [Rank 0] PRINT: step:4400/10000 val_loss:3.9553 svd_entropy: attn_qk:H=0.7599,top10E=0.27,eRank=160.4,q75/q25=61.71 attn_vo:H=0.8324,top10E=0.15,eRank=279.3,q75/q25=54.12 mlp_w1:H=0.8943,top10E=0.16,eRank=384.6,q75/q25=4.94 mlp_w2:H=0.9711,top10E=0.04,eRank=633.7,q75/q25=2.87 vo_prod:H=0.7279,top10E=0.25,eRank=131.5,q75/q25=3548.89 train_time:330871ms step_avg:75.20ms +[2025-09-02 17:48:45] [Rank 0] PRINT: step:4400/10000 val_loss:3.9553 svd_entropy: attn_qk:H=0.7599,top10E=0.27,eRank=160.4,q75/q25=61.71 attn_vo:H=0.8324,top10E=0.15,eRank=279.3,q75/q25=54.12 mlp_w1:H=0.8943,top10E=0.16,eRank=384.6,q75/q25=4.94 mlp_w2:H=0.9711,top10E=0.04,eRank=633.7,q75/q25=2.87 vo_prod:H=0.7279,top10E=0.25,eRank=131.5,q75/q25=3548.89 train_time:330871ms step_avg:75.20ms +[2025-09-02 17:48:45] [Rank 0] step:4401/10000 train_time:330885ms step_avg:75.18ms +[2025-09-02 17:48:45] [Rank 0] step:4401/10000 train_time:330885ms step_avg:75.18ms +[2025-09-02 17:48:47] [Rank 0] step:4421/10000 train_time:332372ms step_avg:75.18ms +[2025-09-02 17:48:47] [Rank 0] step:4421/10000 train_time:332372ms step_avg:75.18ms +[2025-09-02 17:48:48] [Rank 0] step:4441/10000 train_time:333931ms step_avg:75.19ms +[2025-09-02 17:48:48] [Rank 0] step:4441/10000 train_time:333931ms step_avg:75.19ms +[2025-09-02 17:48:50] [Rank 0] step:4461/10000 train_time:335496ms step_avg:75.21ms +[2025-09-02 17:48:50] [Rank 0] step:4461/10000 train_time:335496ms step_avg:75.21ms +[2025-09-02 17:48:52] [Rank 0] step:4481/10000 train_time:337159ms step_avg:75.24ms +[2025-09-02 17:48:52] [Rank 0] step:4481/10000 train_time:337159ms step_avg:75.24ms +[2025-09-02 17:48:53] [Rank 0] step:4501/10000 train_time:338726ms step_avg:75.26ms +[2025-09-02 17:48:53] [Rank 0] step:4501/10000 train_time:338726ms step_avg:75.26ms +[2025-09-02 17:48:55] [Rank 0] step:4521/10000 train_time:340299ms step_avg:75.27ms +[2025-09-02 17:48:55] [Rank 0] step:4521/10000 train_time:340299ms step_avg:75.27ms +[2025-09-02 17:48:56] [Rank 0] step:4541/10000 train_time:341869ms step_avg:75.28ms +[2025-09-02 17:48:56] [Rank 0] step:4541/10000 train_time:341869ms step_avg:75.28ms +[2025-09-02 17:48:58] [Rank 0] step:4561/10000 train_time:343435ms step_avg:75.30ms +[2025-09-02 17:48:58] [Rank 0] step:4561/10000 train_time:343435ms step_avg:75.30ms +[2025-09-02 17:49:00] [Rank 0] step:4581/10000 train_time:345007ms step_avg:75.31ms +[2025-09-02 17:49:00] [Rank 0] step:4581/10000 train_time:345007ms step_avg:75.31ms +[2025-09-02 17:49:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:49:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:49:13] [Rank 0] PRINT: step:4600/10000 val_loss:3.9307 svd_entropy: attn_qk:H=0.7615,top10E=0.27,eRank=162.1,q75/q25=61.37 attn_vo:H=0.8342,top10E=0.15,eRank=281.9,q75/q25=53.43 mlp_w1:H=0.8962,top10E=0.15,eRank=389.4,q75/q25=4.89 mlp_w2:H=0.9711,top10E=0.04,eRank=633.9,q75/q25=2.87 vo_prod:H=0.7304,top10E=0.25,eRank=133.7,q75/q25=3587.05 train_time:346657ms step_avg:75.36ms +[2025-09-02 17:49:13] [Rank 0] PRINT: step:4600/10000 val_loss:3.9307 svd_entropy: attn_qk:H=0.7615,top10E=0.27,eRank=162.1,q75/q25=61.37 attn_vo:H=0.8342,top10E=0.15,eRank=281.9,q75/q25=53.43 mlp_w1:H=0.8962,top10E=0.15,eRank=389.4,q75/q25=4.89 mlp_w2:H=0.9711,top10E=0.04,eRank=633.9,q75/q25=2.87 vo_prod:H=0.7304,top10E=0.25,eRank=133.7,q75/q25=3587.05 train_time:346657ms step_avg:75.36ms +[2025-09-02 17:49:13] [Rank 0] step:4601/10000 train_time:346673ms step_avg:75.35ms +[2025-09-02 17:49:13] [Rank 0] step:4601/10000 train_time:346673ms step_avg:75.35ms +[2025-09-02 17:49:15] [Rank 0] step:4621/10000 train_time:348181ms step_avg:75.35ms +[2025-09-02 17:49:15] [Rank 0] step:4621/10000 train_time:348181ms step_avg:75.35ms +[2025-09-02 17:49:17] [Rank 0] step:4641/10000 train_time:349750ms step_avg:75.36ms +[2025-09-02 17:49:17] [Rank 0] step:4641/10000 train_time:349750ms step_avg:75.36ms +[2025-09-02 17:49:18] [Rank 0] step:4661/10000 train_time:351320ms step_avg:75.37ms +[2025-09-02 17:49:18] [Rank 0] step:4661/10000 train_time:351320ms step_avg:75.37ms +[2025-09-02 17:49:20] [Rank 0] step:4681/10000 train_time:352890ms step_avg:75.39ms +[2025-09-02 17:49:20] [Rank 0] step:4681/10000 train_time:352890ms step_avg:75.39ms +[2025-09-02 17:49:21] [Rank 0] step:4701/10000 train_time:354460ms step_avg:75.40ms +[2025-09-02 17:49:21] [Rank 0] step:4701/10000 train_time:354460ms step_avg:75.40ms +[2025-09-02 17:49:23] [Rank 0] step:4721/10000 train_time:356030ms step_avg:75.41ms +[2025-09-02 17:49:23] [Rank 0] step:4721/10000 train_time:356030ms step_avg:75.41ms +[2025-09-02 17:49:24] [Rank 0] step:4741/10000 train_time:357602ms step_avg:75.43ms +[2025-09-02 17:49:24] [Rank 0] step:4741/10000 train_time:357602ms step_avg:75.43ms +[2025-09-02 17:49:26] [Rank 0] step:4761/10000 train_time:359173ms step_avg:75.44ms +[2025-09-02 17:49:26] [Rank 0] step:4761/10000 train_time:359173ms step_avg:75.44ms +[2025-09-02 17:49:28] [Rank 0] step:4781/10000 train_time:360743ms step_avg:75.45ms +[2025-09-02 17:49:28] [Rank 0] step:4781/10000 train_time:360743ms step_avg:75.45ms +[2025-09-02 17:49:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:49:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:49:41] [Rank 0] PRINT: step:4800/10000 val_loss:3.9144 svd_entropy: attn_qk:H=0.7632,top10E=0.27,eRank=163.7,q75/q25=61.16 attn_vo:H=0.8358,top10E=0.15,eRank=284.3,q75/q25=52.51 mlp_w1:H=0.8979,top10E=0.15,eRank=393.7,q75/q25=4.84 mlp_w2:H=0.9712,top10E=0.04,eRank=634.1,q75/q25=2.87 vo_prod:H=0.7326,top10E=0.25,eRank=135.7,q75/q25=3580.50 train_time:362402ms step_avg:75.50ms +[2025-09-02 17:49:41] [Rank 0] PRINT: step:4800/10000 val_loss:3.9144 svd_entropy: attn_qk:H=0.7632,top10E=0.27,eRank=163.7,q75/q25=61.16 attn_vo:H=0.8358,top10E=0.15,eRank=284.3,q75/q25=52.51 mlp_w1:H=0.8979,top10E=0.15,eRank=393.7,q75/q25=4.84 mlp_w2:H=0.9712,top10E=0.04,eRank=634.1,q75/q25=2.87 vo_prod:H=0.7326,top10E=0.25,eRank=135.7,q75/q25=3580.50 train_time:362402ms step_avg:75.50ms +[2025-09-02 17:49:41] [Rank 0] step:4801/10000 train_time:362416ms step_avg:75.49ms +[2025-09-02 17:49:41] [Rank 0] step:4801/10000 train_time:362416ms step_avg:75.49ms +[2025-09-02 17:49:43] [Rank 0] step:4821/10000 train_time:363913ms step_avg:75.49ms +[2025-09-02 17:49:43] [Rank 0] step:4821/10000 train_time:363913ms step_avg:75.49ms +[2025-09-02 17:49:44] [Rank 0] step:4841/10000 train_time:365479ms step_avg:75.50ms +[2025-09-02 17:49:44] [Rank 0] step:4841/10000 train_time:365479ms step_avg:75.50ms +[2025-09-02 17:49:46] [Rank 0] step:4861/10000 train_time:367047ms step_avg:75.51ms +[2025-09-02 17:49:46] [Rank 0] step:4861/10000 train_time:367047ms step_avg:75.51ms +[2025-09-02 17:49:47] [Rank 0] step:4881/10000 train_time:368613ms step_avg:75.52ms +[2025-09-02 17:49:47] [Rank 0] step:4881/10000 train_time:368613ms step_avg:75.52ms +[2025-09-02 17:49:49] [Rank 0] step:4901/10000 train_time:370179ms step_avg:75.53ms +[2025-09-02 17:49:49] [Rank 0] step:4901/10000 train_time:370179ms step_avg:75.53ms +[2025-09-02 17:49:50] [Rank 0] step:4921/10000 train_time:371749ms step_avg:75.54ms +[2025-09-02 17:49:50] [Rank 0] step:4921/10000 train_time:371749ms step_avg:75.54ms +[2025-09-02 17:49:52] [Rank 0] step:4941/10000 train_time:373322ms step_avg:75.56ms +[2025-09-02 17:49:52] [Rank 0] step:4941/10000 train_time:373322ms step_avg:75.56ms +[2025-09-02 17:49:54] [Rank 0] step:4961/10000 train_time:374890ms step_avg:75.57ms +[2025-09-02 17:49:54] [Rank 0] step:4961/10000 train_time:374890ms step_avg:75.57ms +[2025-09-02 17:49:55] [Rank 0] step:4981/10000 train_time:376518ms step_avg:75.59ms +[2025-09-02 17:49:55] [Rank 0] step:4981/10000 train_time:376518ms step_avg:75.59ms +[2025-09-02 17:49:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:49:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:50:08] [Rank 0] PRINT: step:5000/10000 val_loss:3.8955 svd_entropy: attn_qk:H=0.7648,top10E=0.27,eRank=165.3,q75/q25=60.95 attn_vo:H=0.8373,top10E=0.14,eRank=286.6,q75/q25=51.82 mlp_w1:H=0.8994,top10E=0.15,eRank=397.5,q75/q25=4.78 mlp_w2:H=0.9712,top10E=0.04,eRank=634.2,q75/q25=2.86 vo_prod:H=0.7347,top10E=0.24,eRank=137.6,q75/q25=3474.82 train_time:378167ms step_avg:75.63ms +[2025-09-02 17:50:08] [Rank 0] PRINT: step:5000/10000 val_loss:3.8955 svd_entropy: attn_qk:H=0.7648,top10E=0.27,eRank=165.3,q75/q25=60.95 attn_vo:H=0.8373,top10E=0.14,eRank=286.6,q75/q25=51.82 mlp_w1:H=0.8994,top10E=0.15,eRank=397.5,q75/q25=4.78 mlp_w2:H=0.9712,top10E=0.04,eRank=634.2,q75/q25=2.86 vo_prod:H=0.7347,top10E=0.24,eRank=137.6,q75/q25=3474.82 train_time:378167ms step_avg:75.63ms +[2025-09-02 17:50:08] [Rank 0] step:5001/10000 train_time:378182ms step_avg:75.62ms +[2025-09-02 17:50:08] [Rank 0] step:5001/10000 train_time:378182ms step_avg:75.62ms +[2025-09-02 17:50:10] [Rank 0] step:5021/10000 train_time:379671ms step_avg:75.62ms +[2025-09-02 17:50:10] [Rank 0] step:5021/10000 train_time:379671ms step_avg:75.62ms +[2025-09-02 17:50:11] [Rank 0] step:5041/10000 train_time:381241ms step_avg:75.63ms +[2025-09-02 17:50:11] [Rank 0] step:5041/10000 train_time:381241ms step_avg:75.63ms +[2025-09-02 17:50:13] [Rank 0] step:5061/10000 train_time:382805ms step_avg:75.64ms +[2025-09-02 17:50:13] [Rank 0] step:5061/10000 train_time:382805ms step_avg:75.64ms +[2025-09-02 17:50:15] [Rank 0] step:5081/10000 train_time:384373ms step_avg:75.65ms +[2025-09-02 17:50:15] [Rank 0] step:5081/10000 train_time:384373ms step_avg:75.65ms +[2025-09-02 17:50:16] [Rank 0] step:5101/10000 train_time:385940ms step_avg:75.66ms +[2025-09-02 17:50:16] [Rank 0] step:5101/10000 train_time:385940ms step_avg:75.66ms +[2025-09-02 17:50:18] [Rank 0] step:5121/10000 train_time:387509ms step_avg:75.67ms +[2025-09-02 17:50:18] [Rank 0] step:5121/10000 train_time:387509ms step_avg:75.67ms +[2025-09-02 17:50:19] [Rank 0] step:5141/10000 train_time:389080ms step_avg:75.68ms +[2025-09-02 17:50:19] [Rank 0] step:5141/10000 train_time:389080ms step_avg:75.68ms +[2025-09-02 17:50:21] [Rank 0] step:5161/10000 train_time:390649ms step_avg:75.69ms +[2025-09-02 17:50:21] [Rank 0] step:5161/10000 train_time:390649ms step_avg:75.69ms +[2025-09-02 17:50:22] [Rank 0] step:5181/10000 train_time:392219ms step_avg:75.70ms +[2025-09-02 17:50:22] [Rank 0] step:5181/10000 train_time:392219ms step_avg:75.70ms +[2025-09-02 17:50:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:50:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:50:36] [Rank 0] PRINT: step:5200/10000 val_loss:3.8766 svd_entropy: attn_qk:H=0.7662,top10E=0.27,eRank=166.7,q75/q25=60.62 attn_vo:H=0.8386,top10E=0.14,eRank=288.6,q75/q25=51.01 mlp_w1:H=0.9009,top10E=0.15,eRank=401.2,q75/q25=4.73 mlp_w2:H=0.9712,top10E=0.04,eRank=634.4,q75/q25=2.86 vo_prod:H=0.7364,top10E=0.24,eRank=139.1,q75/q25=3434.28 train_time:393891ms step_avg:75.75ms +[2025-09-02 17:50:36] [Rank 0] PRINT: step:5200/10000 val_loss:3.8766 svd_entropy: attn_qk:H=0.7662,top10E=0.27,eRank=166.7,q75/q25=60.62 attn_vo:H=0.8386,top10E=0.14,eRank=288.6,q75/q25=51.01 mlp_w1:H=0.9009,top10E=0.15,eRank=401.2,q75/q25=4.73 mlp_w2:H=0.9712,top10E=0.04,eRank=634.4,q75/q25=2.86 vo_prod:H=0.7364,top10E=0.24,eRank=139.1,q75/q25=3434.28 train_time:393891ms step_avg:75.75ms +[2025-09-02 17:50:36] [Rank 0] step:5201/10000 train_time:393906ms step_avg:75.74ms +[2025-09-02 17:50:36] [Rank 0] step:5201/10000 train_time:393906ms step_avg:75.74ms +[2025-09-02 17:50:38] [Rank 0] step:5221/10000 train_time:395421ms step_avg:75.74ms +[2025-09-02 17:50:38] [Rank 0] step:5221/10000 train_time:395421ms step_avg:75.74ms +[2025-09-02 17:50:39] [Rank 0] step:5241/10000 train_time:397018ms step_avg:75.75ms +[2025-09-02 17:50:39] [Rank 0] step:5241/10000 train_time:397018ms step_avg:75.75ms +[2025-09-02 17:50:41] [Rank 0] step:5261/10000 train_time:398614ms step_avg:75.77ms +[2025-09-02 17:50:41] [Rank 0] step:5261/10000 train_time:398614ms step_avg:75.77ms +[2025-09-02 17:50:42] [Rank 0] step:5281/10000 train_time:400214ms step_avg:75.78ms +[2025-09-02 17:50:42] [Rank 0] step:5281/10000 train_time:400214ms step_avg:75.78ms +[2025-09-02 17:50:44] [Rank 0] step:5301/10000 train_time:401819ms step_avg:75.80ms +[2025-09-02 17:50:44] [Rank 0] step:5301/10000 train_time:401819ms step_avg:75.80ms +[2025-09-02 17:50:46] [Rank 0] step:5321/10000 train_time:403421ms step_avg:75.82ms +[2025-09-02 17:50:46] [Rank 0] step:5321/10000 train_time:403421ms step_avg:75.82ms +[2025-09-02 17:50:47] [Rank 0] step:5341/10000 train_time:405016ms step_avg:75.83ms +[2025-09-02 17:50:47] [Rank 0] step:5341/10000 train_time:405016ms step_avg:75.83ms +[2025-09-02 17:50:49] [Rank 0] step:5361/10000 train_time:406619ms step_avg:75.85ms +[2025-09-02 17:50:49] [Rank 0] step:5361/10000 train_time:406619ms step_avg:75.85ms +[2025-09-02 17:50:50] [Rank 0] step:5381/10000 train_time:408228ms step_avg:75.86ms +[2025-09-02 17:50:50] [Rank 0] step:5381/10000 train_time:408228ms step_avg:75.86ms +[2025-09-02 17:50:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:50:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:51:04] [Rank 0] PRINT: step:5400/10000 val_loss:3.8582 svd_entropy: attn_qk:H=0.7674,top10E=0.26,eRank=168.0,q75/q25=60.14 attn_vo:H=0.8398,top10E=0.14,eRank=290.4,q75/q25=50.46 mlp_w1:H=0.9022,top10E=0.15,eRank=404.7,q75/q25=4.69 mlp_w2:H=0.9713,top10E=0.04,eRank=634.5,q75/q25=2.85 vo_prod:H=0.7381,top10E=0.24,eRank=140.7,q75/q25=3452.09 train_time:409909ms step_avg:75.91ms +[2025-09-02 17:51:04] [Rank 0] PRINT: step:5400/10000 val_loss:3.8582 svd_entropy: attn_qk:H=0.7674,top10E=0.26,eRank=168.0,q75/q25=60.14 attn_vo:H=0.8398,top10E=0.14,eRank=290.4,q75/q25=50.46 mlp_w1:H=0.9022,top10E=0.15,eRank=404.7,q75/q25=4.69 mlp_w2:H=0.9713,top10E=0.04,eRank=634.5,q75/q25=2.85 vo_prod:H=0.7381,top10E=0.24,eRank=140.7,q75/q25=3452.09 train_time:409909ms step_avg:75.91ms +[2025-09-02 17:51:04] [Rank 0] step:5401/10000 train_time:409924ms step_avg:75.90ms +[2025-09-02 17:51:04] [Rank 0] step:5401/10000 train_time:409924ms step_avg:75.90ms +[2025-09-02 17:51:05] [Rank 0] step:5421/10000 train_time:411446ms step_avg:75.90ms +[2025-09-02 17:51:05] [Rank 0] step:5421/10000 train_time:411446ms step_avg:75.90ms +[2025-09-02 17:51:07] [Rank 0] step:5441/10000 train_time:413040ms step_avg:75.91ms +[2025-09-02 17:51:07] [Rank 0] step:5441/10000 train_time:413040ms step_avg:75.91ms +[2025-09-02 17:51:09] [Rank 0] step:5461/10000 train_time:414644ms step_avg:75.93ms +[2025-09-02 17:51:09] [Rank 0] step:5461/10000 train_time:414644ms step_avg:75.93ms +[2025-09-02 17:51:10] [Rank 0] step:5481/10000 train_time:416244ms step_avg:75.94ms +[2025-09-02 17:51:10] [Rank 0] step:5481/10000 train_time:416244ms step_avg:75.94ms +[2025-09-02 17:51:12] [Rank 0] step:5501/10000 train_time:417848ms step_avg:75.96ms +[2025-09-02 17:51:12] [Rank 0] step:5501/10000 train_time:417848ms step_avg:75.96ms +[2025-09-02 17:51:13] [Rank 0] step:5521/10000 train_time:419453ms step_avg:75.97ms +[2025-09-02 17:51:13] [Rank 0] step:5521/10000 train_time:419453ms step_avg:75.97ms +[2025-09-02 17:51:15] [Rank 0] step:5541/10000 train_time:421053ms step_avg:75.99ms +[2025-09-02 17:51:15] [Rank 0] step:5541/10000 train_time:421053ms step_avg:75.99ms +[2025-09-02 17:51:17] [Rank 0] step:5561/10000 train_time:422656ms step_avg:76.00ms +[2025-09-02 17:51:17] [Rank 0] step:5561/10000 train_time:422656ms step_avg:76.00ms +[2025-09-02 17:51:18] [Rank 0] step:5581/10000 train_time:424257ms step_avg:76.02ms +[2025-09-02 17:51:18] [Rank 0] step:5581/10000 train_time:424257ms step_avg:76.02ms +[2025-09-02 17:51:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:51:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:51:32] [Rank 0] PRINT: step:5600/10000 val_loss:3.8458 svd_entropy: attn_qk:H=0.7687,top10E=0.26,eRank=169.3,q75/q25=59.87 attn_vo:H=0.8409,top10E=0.14,eRank=292.2,q75/q25=49.89 mlp_w1:H=0.9035,top10E=0.14,eRank=407.9,q75/q25=4.66 mlp_w2:H=0.9713,top10E=0.04,eRank=634.6,q75/q25=2.85 vo_prod:H=0.7397,top10E=0.24,eRank=142.1,q75/q25=3374.52 train_time:425941ms step_avg:76.06ms +[2025-09-02 17:51:32] [Rank 0] PRINT: step:5600/10000 val_loss:3.8458 svd_entropy: attn_qk:H=0.7687,top10E=0.26,eRank=169.3,q75/q25=59.87 attn_vo:H=0.8409,top10E=0.14,eRank=292.2,q75/q25=49.89 mlp_w1:H=0.9035,top10E=0.14,eRank=407.9,q75/q25=4.66 mlp_w2:H=0.9713,top10E=0.04,eRank=634.6,q75/q25=2.85 vo_prod:H=0.7397,top10E=0.24,eRank=142.1,q75/q25=3374.52 train_time:425941ms step_avg:76.06ms +[2025-09-02 17:51:32] [Rank 0] step:5601/10000 train_time:425956ms step_avg:76.05ms +[2025-09-02 17:51:32] [Rank 0] step:5601/10000 train_time:425956ms step_avg:76.05ms +[2025-09-02 17:51:33] [Rank 0] step:5621/10000 train_time:427484ms step_avg:76.05ms +[2025-09-02 17:51:33] [Rank 0] step:5621/10000 train_time:427484ms step_avg:76.05ms +[2025-09-02 17:51:35] [Rank 0] step:5641/10000 train_time:429083ms step_avg:76.06ms +[2025-09-02 17:51:35] [Rank 0] step:5641/10000 train_time:429083ms step_avg:76.06ms +[2025-09-02 17:51:36] [Rank 0] step:5661/10000 train_time:430681ms step_avg:76.08ms +[2025-09-02 17:51:36] [Rank 0] step:5661/10000 train_time:430681ms step_avg:76.08ms +[2025-09-02 17:51:38] [Rank 0] step:5681/10000 train_time:432286ms step_avg:76.09ms +[2025-09-02 17:51:38] [Rank 0] step:5681/10000 train_time:432286ms step_avg:76.09ms +[2025-09-02 17:51:40] [Rank 0] step:5701/10000 train_time:433887ms step_avg:76.11ms +[2025-09-02 17:51:40] [Rank 0] step:5701/10000 train_time:433887ms step_avg:76.11ms +[2025-09-02 17:51:41] [Rank 0] step:5721/10000 train_time:435491ms step_avg:76.12ms +[2025-09-02 17:51:41] [Rank 0] step:5721/10000 train_time:435491ms step_avg:76.12ms +[2025-09-02 17:51:43] [Rank 0] step:5741/10000 train_time:437093ms step_avg:76.14ms +[2025-09-02 17:51:43] [Rank 0] step:5741/10000 train_time:437093ms step_avg:76.14ms +[2025-09-02 17:51:44] [Rank 0] step:5761/10000 train_time:438699ms step_avg:76.15ms +[2025-09-02 17:51:44] [Rank 0] step:5761/10000 train_time:438699ms step_avg:76.15ms +[2025-09-02 17:51:46] [Rank 0] step:5781/10000 train_time:440302ms step_avg:76.16ms +[2025-09-02 17:51:46] [Rank 0] step:5781/10000 train_time:440302ms step_avg:76.16ms +[2025-09-02 17:51:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:51:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:51:59] [Rank 0] PRINT: step:5800/10000 val_loss:3.8375 svd_entropy: attn_qk:H=0.7699,top10E=0.26,eRank=170.6,q75/q25=59.39 attn_vo:H=0.8420,top10E=0.14,eRank=294.0,q75/q25=49.03 mlp_w1:H=0.9046,top10E=0.14,eRank=410.9,q75/q25=4.63 mlp_w2:H=0.9713,top10E=0.04,eRank=634.7,q75/q25=2.85 vo_prod:H=0.7412,top10E=0.24,eRank=143.5,q75/q25=3379.30 train_time:441988ms step_avg:76.20ms +[2025-09-02 17:51:59] [Rank 0] PRINT: step:5800/10000 val_loss:3.8375 svd_entropy: attn_qk:H=0.7699,top10E=0.26,eRank=170.6,q75/q25=59.39 attn_vo:H=0.8420,top10E=0.14,eRank=294.0,q75/q25=49.03 mlp_w1:H=0.9046,top10E=0.14,eRank=410.9,q75/q25=4.63 mlp_w2:H=0.9713,top10E=0.04,eRank=634.7,q75/q25=2.85 vo_prod:H=0.7412,top10E=0.24,eRank=143.5,q75/q25=3379.30 train_time:441988ms step_avg:76.20ms +[2025-09-02 17:51:59] [Rank 0] step:5801/10000 train_time:442003ms step_avg:76.19ms +[2025-09-02 17:51:59] [Rank 0] step:5801/10000 train_time:442003ms step_avg:76.19ms +[2025-09-02 17:52:01] [Rank 0] step:5821/10000 train_time:443524ms step_avg:76.19ms +[2025-09-02 17:52:01] [Rank 0] step:5821/10000 train_time:443524ms step_avg:76.19ms +[2025-09-02 17:52:03] [Rank 0] step:5841/10000 train_time:445136ms step_avg:76.21ms +[2025-09-02 17:52:03] [Rank 0] step:5841/10000 train_time:445136ms step_avg:76.21ms +[2025-09-02 17:52:04] [Rank 0] step:5861/10000 train_time:446740ms step_avg:76.22ms +[2025-09-02 17:52:04] [Rank 0] step:5861/10000 train_time:446740ms step_avg:76.22ms +[2025-09-02 17:52:06] [Rank 0] step:5881/10000 train_time:448341ms step_avg:76.24ms +[2025-09-02 17:52:06] [Rank 0] step:5881/10000 train_time:448341ms step_avg:76.24ms +[2025-09-02 17:52:07] [Rank 0] step:5901/10000 train_time:449939ms step_avg:76.25ms +[2025-09-02 17:52:07] [Rank 0] step:5901/10000 train_time:449939ms step_avg:76.25ms +[2025-09-02 17:52:09] [Rank 0] step:5921/10000 train_time:451537ms step_avg:76.26ms +[2025-09-02 17:52:09] [Rank 0] step:5921/10000 train_time:451537ms step_avg:76.26ms +[2025-09-02 17:52:11] [Rank 0] step:5941/10000 train_time:453142ms step_avg:76.27ms +[2025-09-02 17:52:11] [Rank 0] step:5941/10000 train_time:453142ms step_avg:76.27ms +[2025-09-02 17:52:12] [Rank 0] step:5961/10000 train_time:454748ms step_avg:76.29ms +[2025-09-02 17:52:12] [Rank 0] step:5961/10000 train_time:454748ms step_avg:76.29ms +[2025-09-02 17:52:14] [Rank 0] step:5981/10000 train_time:456351ms step_avg:76.30ms +[2025-09-02 17:52:14] [Rank 0] step:5981/10000 train_time:456351ms step_avg:76.30ms +[2025-09-02 17:52:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:52:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:52:27] [Rank 0] PRINT: step:6000/10000 val_loss:3.8136 svd_entropy: attn_qk:H=0.7711,top10E=0.26,eRank=171.9,q75/q25=58.98 attn_vo:H=0.8431,top10E=0.14,eRank=295.6,q75/q25=48.63 mlp_w1:H=0.9057,top10E=0.14,eRank=413.7,q75/q25=4.60 mlp_w2:H=0.9713,top10E=0.04,eRank=634.8,q75/q25=2.84 vo_prod:H=0.7426,top10E=0.24,eRank=144.8,q75/q25=3301.91 train_time:458033ms step_avg:76.34ms +[2025-09-02 17:52:27] [Rank 0] PRINT: step:6000/10000 val_loss:3.8136 svd_entropy: attn_qk:H=0.7711,top10E=0.26,eRank=171.9,q75/q25=58.98 attn_vo:H=0.8431,top10E=0.14,eRank=295.6,q75/q25=48.63 mlp_w1:H=0.9057,top10E=0.14,eRank=413.7,q75/q25=4.60 mlp_w2:H=0.9713,top10E=0.04,eRank=634.8,q75/q25=2.84 vo_prod:H=0.7426,top10E=0.24,eRank=144.8,q75/q25=3301.91 train_time:458033ms step_avg:76.34ms +[2025-09-02 17:52:27] [Rank 0] step:6001/10000 train_time:458048ms step_avg:76.33ms +[2025-09-02 17:52:27] [Rank 0] step:6001/10000 train_time:458048ms step_avg:76.33ms +[2025-09-02 17:52:29] [Rank 0] step:6021/10000 train_time:459580ms step_avg:76.33ms +[2025-09-02 17:52:29] [Rank 0] step:6021/10000 train_time:459580ms step_avg:76.33ms +[2025-09-02 17:52:31] [Rank 0] step:6041/10000 train_time:461182ms step_avg:76.34ms +[2025-09-02 17:52:31] [Rank 0] step:6041/10000 train_time:461182ms step_avg:76.34ms +[2025-09-02 17:52:32] [Rank 0] step:6061/10000 train_time:462789ms step_avg:76.36ms +[2025-09-02 17:52:32] [Rank 0] step:6061/10000 train_time:462789ms step_avg:76.36ms +[2025-09-02 17:52:34] [Rank 0] step:6081/10000 train_time:464392ms step_avg:76.37ms +[2025-09-02 17:52:34] [Rank 0] step:6081/10000 train_time:464392ms step_avg:76.37ms +[2025-09-02 17:52:35] [Rank 0] step:6101/10000 train_time:466001ms step_avg:76.38ms +[2025-09-02 17:52:35] [Rank 0] step:6101/10000 train_time:466001ms step_avg:76.38ms +[2025-09-02 17:52:37] [Rank 0] step:6121/10000 train_time:467873ms step_avg:76.44ms +[2025-09-02 17:52:37] [Rank 0] step:6121/10000 train_time:467873ms step_avg:76.44ms +[2025-09-02 17:52:39] [Rank 0] step:6141/10000 train_time:469489ms step_avg:76.45ms +[2025-09-02 17:52:39] [Rank 0] step:6141/10000 train_time:469489ms step_avg:76.45ms +[2025-09-02 17:52:40] [Rank 0] step:6161/10000 train_time:471094ms step_avg:76.46ms +[2025-09-02 17:52:40] [Rank 0] step:6161/10000 train_time:471094ms step_avg:76.46ms +[2025-09-02 17:52:42] [Rank 0] step:6181/10000 train_time:472696ms step_avg:76.48ms +[2025-09-02 17:52:42] [Rank 0] step:6181/10000 train_time:472696ms step_avg:76.48ms +[2025-09-02 17:52:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:52:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:52:55] [Rank 0] PRINT: step:6200/10000 val_loss:3.7972 svd_entropy: attn_qk:H=0.7722,top10E=0.26,eRank=173.0,q75/q25=58.73 attn_vo:H=0.8441,top10E=0.14,eRank=297.3,q75/q25=48.26 mlp_w1:H=0.9067,top10E=0.14,eRank=416.4,q75/q25=4.57 mlp_w2:H=0.9713,top10E=0.04,eRank=634.8,q75/q25=2.84 vo_prod:H=0.7441,top10E=0.24,eRank=146.3,q75/q25=3353.91 train_time:474382ms step_avg:76.51ms +[2025-09-02 17:52:55] [Rank 0] PRINT: step:6200/10000 val_loss:3.7972 svd_entropy: attn_qk:H=0.7722,top10E=0.26,eRank=173.0,q75/q25=58.73 attn_vo:H=0.8441,top10E=0.14,eRank=297.3,q75/q25=48.26 mlp_w1:H=0.9067,top10E=0.14,eRank=416.4,q75/q25=4.57 mlp_w2:H=0.9713,top10E=0.04,eRank=634.8,q75/q25=2.84 vo_prod:H=0.7441,top10E=0.24,eRank=146.3,q75/q25=3353.91 train_time:474382ms step_avg:76.51ms +[2025-09-02 17:52:55] [Rank 0] step:6201/10000 train_time:474397ms step_avg:76.50ms +[2025-09-02 17:52:55] [Rank 0] step:6201/10000 train_time:474397ms step_avg:76.50ms +[2025-09-02 17:52:57] [Rank 0] step:6221/10000 train_time:475933ms step_avg:76.50ms +[2025-09-02 17:52:57] [Rank 0] step:6221/10000 train_time:475933ms step_avg:76.50ms +[2025-09-02 17:52:59] [Rank 0] step:6241/10000 train_time:477534ms step_avg:76.52ms +[2025-09-02 17:52:59] [Rank 0] step:6241/10000 train_time:477534ms step_avg:76.52ms +[2025-09-02 17:53:00] [Rank 0] step:6261/10000 train_time:479138ms step_avg:76.53ms +[2025-09-02 17:53:00] [Rank 0] step:6261/10000 train_time:479138ms step_avg:76.53ms +[2025-09-02 17:53:02] [Rank 0] step:6281/10000 train_time:480747ms step_avg:76.54ms +[2025-09-02 17:53:02] [Rank 0] step:6281/10000 train_time:480747ms step_avg:76.54ms +[2025-09-02 17:53:03] [Rank 0] step:6301/10000 train_time:482355ms step_avg:76.55ms +[2025-09-02 17:53:03] [Rank 0] step:6301/10000 train_time:482355ms step_avg:76.55ms +[2025-09-02 17:53:05] [Rank 0] step:6321/10000 train_time:484054ms step_avg:76.58ms +[2025-09-02 17:53:05] [Rank 0] step:6321/10000 train_time:484054ms step_avg:76.58ms +[2025-09-02 17:53:07] [Rank 0] step:6341/10000 train_time:485694ms step_avg:76.60ms +[2025-09-02 17:53:07] [Rank 0] step:6341/10000 train_time:485694ms step_avg:76.60ms +[2025-09-02 17:53:08] [Rank 0] step:6361/10000 train_time:487306ms step_avg:76.61ms +[2025-09-02 17:53:08] [Rank 0] step:6361/10000 train_time:487306ms step_avg:76.61ms +[2025-09-02 17:53:10] [Rank 0] step:6381/10000 train_time:488916ms step_avg:76.62ms +[2025-09-02 17:53:10] [Rank 0] step:6381/10000 train_time:488916ms step_avg:76.62ms +[2025-09-02 17:53:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:53:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:53:24] [Rank 0] PRINT: step:6400/10000 val_loss:3.7813 svd_entropy: attn_qk:H=0.7733,top10E=0.26,eRank=174.1,q75/q25=58.35 attn_vo:H=0.8450,top10E=0.14,eRank=298.7,q75/q25=47.77 mlp_w1:H=0.9076,top10E=0.14,eRank=418.8,q75/q25=4.54 mlp_w2:H=0.9713,top10E=0.04,eRank=634.8,q75/q25=2.85 vo_prod:H=0.7453,top10E=0.23,eRank=147.4,q75/q25=3351.56 train_time:490601ms step_avg:76.66ms +[2025-09-02 17:53:24] [Rank 0] PRINT: step:6400/10000 val_loss:3.7813 svd_entropy: attn_qk:H=0.7733,top10E=0.26,eRank=174.1,q75/q25=58.35 attn_vo:H=0.8450,top10E=0.14,eRank=298.7,q75/q25=47.77 mlp_w1:H=0.9076,top10E=0.14,eRank=418.8,q75/q25=4.54 mlp_w2:H=0.9713,top10E=0.04,eRank=634.8,q75/q25=2.85 vo_prod:H=0.7453,top10E=0.23,eRank=147.4,q75/q25=3351.56 train_time:490601ms step_avg:76.66ms +[2025-09-02 17:53:24] [Rank 0] step:6401/10000 train_time:490616ms step_avg:76.65ms +[2025-09-02 17:53:24] [Rank 0] step:6401/10000 train_time:490616ms step_avg:76.65ms +[2025-09-02 17:53:25] [Rank 0] step:6421/10000 train_time:492145ms step_avg:76.65ms +[2025-09-02 17:53:25] [Rank 0] step:6421/10000 train_time:492145ms step_avg:76.65ms +[2025-09-02 17:53:27] [Rank 0] step:6441/10000 train_time:493749ms step_avg:76.66ms +[2025-09-02 17:53:27] [Rank 0] step:6441/10000 train_time:493749ms step_avg:76.66ms +[2025-09-02 17:53:28] [Rank 0] step:6461/10000 train_time:495357ms step_avg:76.67ms +[2025-09-02 17:53:28] [Rank 0] step:6461/10000 train_time:495357ms step_avg:76.67ms +[2025-09-02 17:53:30] [Rank 0] step:6481/10000 train_time:496971ms step_avg:76.68ms +[2025-09-02 17:53:30] [Rank 0] step:6481/10000 train_time:496971ms step_avg:76.68ms +[2025-09-02 17:53:32] [Rank 0] step:6501/10000 train_time:498577ms step_avg:76.69ms +[2025-09-02 17:53:32] [Rank 0] step:6501/10000 train_time:498577ms step_avg:76.69ms +[2025-09-02 17:53:33] [Rank 0] step:6521/10000 train_time:500179ms step_avg:76.70ms +[2025-09-02 17:53:33] [Rank 0] step:6521/10000 train_time:500179ms step_avg:76.70ms +[2025-09-02 17:53:35] [Rank 0] step:6541/10000 train_time:501789ms step_avg:76.71ms +[2025-09-02 17:53:35] [Rank 0] step:6541/10000 train_time:501789ms step_avg:76.71ms +[2025-09-02 17:53:37] [Rank 0] step:6561/10000 train_time:503400ms step_avg:76.73ms +[2025-09-02 17:53:37] [Rank 0] step:6561/10000 train_time:503400ms step_avg:76.73ms +[2025-09-02 17:53:38] [Rank 0] step:6581/10000 train_time:505010ms step_avg:76.74ms +[2025-09-02 17:53:38] [Rank 0] step:6581/10000 train_time:505010ms step_avg:76.74ms +[2025-09-02 17:53:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:53:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:53:52] [Rank 0] PRINT: step:6600/10000 val_loss:3.7668 svd_entropy: attn_qk:H=0.7742,top10E=0.26,eRank=175.0,q75/q25=58.07 attn_vo:H=0.8458,top10E=0.13,eRank=300.0,q75/q25=47.29 mlp_w1:H=0.9084,top10E=0.14,eRank=421.0,q75/q25=4.52 mlp_w2:H=0.9713,top10E=0.04,eRank=634.8,q75/q25=2.85 vo_prod:H=0.7464,top10E=0.23,eRank=148.5,q75/q25=3340.23 train_time:506702ms step_avg:76.77ms +[2025-09-02 17:53:52] [Rank 0] PRINT: step:6600/10000 val_loss:3.7668 svd_entropy: attn_qk:H=0.7742,top10E=0.26,eRank=175.0,q75/q25=58.07 attn_vo:H=0.8458,top10E=0.13,eRank=300.0,q75/q25=47.29 mlp_w1:H=0.9084,top10E=0.14,eRank=421.0,q75/q25=4.52 mlp_w2:H=0.9713,top10E=0.04,eRank=634.8,q75/q25=2.85 vo_prod:H=0.7464,top10E=0.23,eRank=148.5,q75/q25=3340.23 train_time:506702ms step_avg:76.77ms +[2025-09-02 17:53:52] [Rank 0] step:6601/10000 train_time:506717ms step_avg:76.76ms +[2025-09-02 17:53:52] [Rank 0] step:6601/10000 train_time:506717ms step_avg:76.76ms +[2025-09-02 17:53:53] [Rank 0] step:6621/10000 train_time:508260ms step_avg:76.76ms +[2025-09-02 17:53:53] [Rank 0] step:6621/10000 train_time:508260ms step_avg:76.76ms +[2025-09-02 17:53:55] [Rank 0] step:6641/10000 train_time:509872ms step_avg:76.78ms +[2025-09-02 17:53:55] [Rank 0] step:6641/10000 train_time:509872ms step_avg:76.78ms +[2025-09-02 17:53:57] [Rank 0] step:6661/10000 train_time:511479ms step_avg:76.79ms +[2025-09-02 17:53:57] [Rank 0] step:6661/10000 train_time:511479ms step_avg:76.79ms +[2025-09-02 17:53:58] [Rank 0] step:6681/10000 train_time:513104ms step_avg:76.80ms +[2025-09-02 17:53:58] [Rank 0] step:6681/10000 train_time:513104ms step_avg:76.80ms +[2025-09-02 17:54:00] [Rank 0] step:6701/10000 train_time:514750ms step_avg:76.82ms +[2025-09-02 17:54:00] [Rank 0] step:6701/10000 train_time:514750ms step_avg:76.82ms +[2025-09-02 17:54:02] [Rank 0] step:6721/10000 train_time:516384ms step_avg:76.83ms +[2025-09-02 17:54:02] [Rank 0] step:6721/10000 train_time:516384ms step_avg:76.83ms +[2025-09-02 17:54:03] [Rank 0] step:6741/10000 train_time:518018ms step_avg:76.85ms +[2025-09-02 17:54:03] [Rank 0] step:6741/10000 train_time:518018ms step_avg:76.85ms +[2025-09-02 17:54:05] [Rank 0] step:6761/10000 train_time:519653ms step_avg:76.86ms +[2025-09-02 17:54:05] [Rank 0] step:6761/10000 train_time:519653ms step_avg:76.86ms +[2025-09-02 17:54:06] [Rank 0] step:6781/10000 train_time:521291ms step_avg:76.88ms +[2025-09-02 17:54:06] [Rank 0] step:6781/10000 train_time:521291ms step_avg:76.88ms +[2025-09-02 17:54:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:54:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:54:20] [Rank 0] PRINT: step:6800/10000 val_loss:3.7510 svd_entropy: attn_qk:H=0.7748,top10E=0.26,eRank=175.8,q75/q25=57.64 attn_vo:H=0.8466,top10E=0.13,eRank=301.3,q75/q25=46.83 mlp_w1:H=0.9091,top10E=0.14,eRank=423.0,q75/q25=4.49 mlp_w2:H=0.9713,top10E=0.04,eRank=634.8,q75/q25=2.84 vo_prod:H=0.7478,top10E=0.23,eRank=149.8,q75/q25=3406.90 train_time:523048ms step_avg:76.92ms +[2025-09-02 17:54:20] [Rank 0] PRINT: step:6800/10000 val_loss:3.7510 svd_entropy: attn_qk:H=0.7748,top10E=0.26,eRank=175.8,q75/q25=57.64 attn_vo:H=0.8466,top10E=0.13,eRank=301.3,q75/q25=46.83 mlp_w1:H=0.9091,top10E=0.14,eRank=423.0,q75/q25=4.49 mlp_w2:H=0.9713,top10E=0.04,eRank=634.8,q75/q25=2.84 vo_prod:H=0.7478,top10E=0.23,eRank=149.8,q75/q25=3406.90 train_time:523048ms step_avg:76.92ms +[2025-09-02 17:54:20] [Rank 0] step:6801/10000 train_time:523063ms step_avg:76.91ms +[2025-09-02 17:54:20] [Rank 0] step:6801/10000 train_time:523063ms step_avg:76.91ms +[2025-09-02 17:54:21] [Rank 0] step:6821/10000 train_time:524619ms step_avg:76.91ms +[2025-09-02 17:54:21] [Rank 0] step:6821/10000 train_time:524619ms step_avg:76.91ms +[2025-09-02 17:54:23] [Rank 0] step:6841/10000 train_time:526256ms step_avg:76.93ms +[2025-09-02 17:54:23] [Rank 0] step:6841/10000 train_time:526256ms step_avg:76.93ms +[2025-09-02 17:54:25] [Rank 0] step:6861/10000 train_time:527887ms step_avg:76.94ms +[2025-09-02 17:54:25] [Rank 0] step:6861/10000 train_time:527887ms step_avg:76.94ms +[2025-09-02 17:54:26] [Rank 0] step:6881/10000 train_time:529520ms step_avg:76.95ms +[2025-09-02 17:54:26] [Rank 0] step:6881/10000 train_time:529520ms step_avg:76.95ms +[2025-09-02 17:54:28] [Rank 0] step:6901/10000 train_time:531156ms step_avg:76.97ms +[2025-09-02 17:54:28] [Rank 0] step:6901/10000 train_time:531156ms step_avg:76.97ms +[2025-09-02 17:54:30] [Rank 0] step:6921/10000 train_time:532791ms step_avg:76.98ms +[2025-09-02 17:54:30] [Rank 0] step:6921/10000 train_time:532791ms step_avg:76.98ms +[2025-09-02 17:54:31] [Rank 0] step:6941/10000 train_time:534433ms step_avg:77.00ms +[2025-09-02 17:54:31] [Rank 0] step:6941/10000 train_time:534433ms step_avg:77.00ms +[2025-09-02 17:54:33] [Rank 0] step:6961/10000 train_time:536085ms step_avg:77.01ms +[2025-09-02 17:54:33] [Rank 0] step:6961/10000 train_time:536085ms step_avg:77.01ms +[2025-09-02 17:54:34] [Rank 0] step:6981/10000 train_time:537722ms step_avg:77.03ms +[2025-09-02 17:54:34] [Rank 0] step:6981/10000 train_time:537722ms step_avg:77.03ms +[2025-09-02 17:54:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:54:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:54:48] [Rank 0] PRINT: step:7000/10000 val_loss:3.7359 svd_entropy: attn_qk:H=0.7755,top10E=0.25,eRank=176.5,q75/q25=57.26 attn_vo:H=0.8473,top10E=0.13,eRank=302.5,q75/q25=46.57 mlp_w1:H=0.9098,top10E=0.14,eRank=424.9,q75/q25=4.46 mlp_w2:H=0.9713,top10E=0.04,eRank=634.8,q75/q25=2.84 vo_prod:H=0.7488,top10E=0.23,eRank=150.8,q75/q25=3399.08 train_time:539445ms step_avg:77.06ms +[2025-09-02 17:54:48] [Rank 0] PRINT: step:7000/10000 val_loss:3.7359 svd_entropy: attn_qk:H=0.7755,top10E=0.25,eRank=176.5,q75/q25=57.26 attn_vo:H=0.8473,top10E=0.13,eRank=302.5,q75/q25=46.57 mlp_w1:H=0.9098,top10E=0.14,eRank=424.9,q75/q25=4.46 mlp_w2:H=0.9713,top10E=0.04,eRank=634.8,q75/q25=2.84 vo_prod:H=0.7488,top10E=0.23,eRank=150.8,q75/q25=3399.08 train_time:539445ms step_avg:77.06ms +[2025-09-02 17:54:48] [Rank 0] step:7001/10000 train_time:539459ms step_avg:77.05ms +[2025-09-02 17:54:48] [Rank 0] step:7001/10000 train_time:539459ms step_avg:77.05ms +[2025-09-02 17:54:50] [Rank 0] step:7021/10000 train_time:541027ms step_avg:77.06ms +[2025-09-02 17:54:50] [Rank 0] step:7021/10000 train_time:541027ms step_avg:77.06ms +[2025-09-02 17:54:51] [Rank 0] step:7041/10000 train_time:542662ms step_avg:77.07ms +[2025-09-02 17:54:51] [Rank 0] step:7041/10000 train_time:542662ms step_avg:77.07ms +[2025-09-02 17:54:53] [Rank 0] step:7061/10000 train_time:544293ms step_avg:77.08ms +[2025-09-02 17:54:53] [Rank 0] step:7061/10000 train_time:544293ms step_avg:77.08ms +[2025-09-02 17:54:54] [Rank 0] step:7081/10000 train_time:545924ms step_avg:77.10ms +[2025-09-02 17:54:54] [Rank 0] step:7081/10000 train_time:545924ms step_avg:77.10ms +[2025-09-02 17:54:56] [Rank 0] step:7101/10000 train_time:547563ms step_avg:77.11ms +[2025-09-02 17:54:56] [Rank 0] step:7101/10000 train_time:547563ms step_avg:77.11ms +[2025-09-02 17:54:58] [Rank 0] step:7121/10000 train_time:549200ms step_avg:77.12ms +[2025-09-02 17:54:58] [Rank 0] step:7121/10000 train_time:549200ms step_avg:77.12ms +[2025-09-02 17:54:59] [Rank 0] step:7141/10000 train_time:550835ms step_avg:77.14ms +[2025-09-02 17:54:59] [Rank 0] step:7141/10000 train_time:550835ms step_avg:77.14ms +[2025-09-02 17:55:01] [Rank 0] step:7161/10000 train_time:552502ms step_avg:77.15ms +[2025-09-02 17:55:01] [Rank 0] step:7161/10000 train_time:552502ms step_avg:77.15ms +[2025-09-02 17:55:03] [Rank 0] step:7181/10000 train_time:554108ms step_avg:77.16ms +[2025-09-02 17:55:03] [Rank 0] step:7181/10000 train_time:554108ms step_avg:77.16ms +[2025-09-02 17:55:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:55:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:55:16] [Rank 0] PRINT: step:7200/10000 val_loss:3.7268 svd_entropy: attn_qk:H=0.7762,top10E=0.25,eRank=177.2,q75/q25=57.07 attn_vo:H=0.8480,top10E=0.13,eRank=303.6,q75/q25=46.06 mlp_w1:H=0.9104,top10E=0.14,eRank=426.6,q75/q25=4.44 mlp_w2:H=0.9713,top10E=0.04,eRank=634.9,q75/q25=2.84 vo_prod:H=0.7499,top10E=0.23,eRank=151.8,q75/q25=3416.10 train_time:555831ms step_avg:77.20ms +[2025-09-02 17:55:16] [Rank 0] PRINT: step:7200/10000 val_loss:3.7268 svd_entropy: attn_qk:H=0.7762,top10E=0.25,eRank=177.2,q75/q25=57.07 attn_vo:H=0.8480,top10E=0.13,eRank=303.6,q75/q25=46.06 mlp_w1:H=0.9104,top10E=0.14,eRank=426.6,q75/q25=4.44 mlp_w2:H=0.9713,top10E=0.04,eRank=634.9,q75/q25=2.84 vo_prod:H=0.7499,top10E=0.23,eRank=151.8,q75/q25=3416.10 train_time:555831ms step_avg:77.20ms +[2025-09-02 17:55:16] [Rank 0] step:7201/10000 train_time:555844ms step_avg:77.19ms +[2025-09-02 17:55:16] [Rank 0] step:7201/10000 train_time:555844ms step_avg:77.19ms +[2025-09-02 17:55:18] [Rank 0] step:7221/10000 train_time:557436ms step_avg:77.20ms +[2025-09-02 17:55:18] [Rank 0] step:7221/10000 train_time:557436ms step_avg:77.20ms +[2025-09-02 17:55:19] [Rank 0] step:7241/10000 train_time:559066ms step_avg:77.21ms +[2025-09-02 17:55:19] [Rank 0] step:7241/10000 train_time:559066ms step_avg:77.21ms +[2025-09-02 17:55:21] [Rank 0] step:7261/10000 train_time:560694ms step_avg:77.22ms +[2025-09-02 17:55:21] [Rank 0] step:7261/10000 train_time:560694ms step_avg:77.22ms +[2025-09-02 17:55:23] [Rank 0] step:7281/10000 train_time:562338ms step_avg:77.23ms +[2025-09-02 17:55:23] [Rank 0] step:7281/10000 train_time:562338ms step_avg:77.23ms +[2025-09-02 17:55:24] [Rank 0] step:7301/10000 train_time:563971ms step_avg:77.25ms +[2025-09-02 17:55:24] [Rank 0] step:7301/10000 train_time:563971ms step_avg:77.25ms +[2025-09-02 17:55:26] [Rank 0] step:7321/10000 train_time:565613ms step_avg:77.26ms +[2025-09-02 17:55:26] [Rank 0] step:7321/10000 train_time:565613ms step_avg:77.26ms +[2025-09-02 17:55:28] [Rank 0] step:7341/10000 train_time:567250ms step_avg:77.27ms +[2025-09-02 17:55:28] [Rank 0] step:7341/10000 train_time:567250ms step_avg:77.27ms +[2025-09-02 17:55:29] [Rank 0] step:7361/10000 train_time:568891ms step_avg:77.28ms +[2025-09-02 17:55:29] [Rank 0] step:7361/10000 train_time:568891ms step_avg:77.28ms +[2025-09-02 17:55:31] [Rank 0] step:7381/10000 train_time:570535ms step_avg:77.30ms +[2025-09-02 17:55:31] [Rank 0] step:7381/10000 train_time:570535ms step_avg:77.30ms +[2025-09-02 17:55:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:55:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:55:44] [Rank 0] PRINT: step:7400/10000 val_loss:3.7078 svd_entropy: attn_qk:H=0.7768,top10E=0.25,eRank=177.9,q75/q25=56.71 attn_vo:H=0.8486,top10E=0.13,eRank=304.6,q75/q25=45.84 mlp_w1:H=0.9109,top10E=0.14,eRank=428.0,q75/q25=4.42 mlp_w2:H=0.9713,top10E=0.04,eRank=634.9,q75/q25=2.84 vo_prod:H=0.7509,top10E=0.23,eRank=152.8,q75/q25=3369.98 train_time:572240ms step_avg:77.33ms +[2025-09-02 17:55:44] [Rank 0] PRINT: step:7400/10000 val_loss:3.7078 svd_entropy: attn_qk:H=0.7768,top10E=0.25,eRank=177.9,q75/q25=56.71 attn_vo:H=0.8486,top10E=0.13,eRank=304.6,q75/q25=45.84 mlp_w1:H=0.9109,top10E=0.14,eRank=428.0,q75/q25=4.42 mlp_w2:H=0.9713,top10E=0.04,eRank=634.9,q75/q25=2.84 vo_prod:H=0.7509,top10E=0.23,eRank=152.8,q75/q25=3369.98 train_time:572240ms step_avg:77.33ms +[2025-09-02 17:55:44] [Rank 0] step:7401/10000 train_time:572254ms step_avg:77.32ms +[2025-09-02 17:55:44] [Rank 0] step:7401/10000 train_time:572254ms step_avg:77.32ms +[2025-09-02 17:55:46] [Rank 0] step:7421/10000 train_time:573817ms step_avg:77.32ms +[2025-09-02 17:55:46] [Rank 0] step:7421/10000 train_time:573817ms step_avg:77.32ms +[2025-09-02 17:55:48] [Rank 0] step:7441/10000 train_time:575448ms step_avg:77.33ms +[2025-09-02 17:55:48] [Rank 0] step:7441/10000 train_time:575448ms step_avg:77.33ms +[2025-09-02 17:55:49] [Rank 0] step:7461/10000 train_time:577084ms step_avg:77.35ms +[2025-09-02 17:55:49] [Rank 0] step:7461/10000 train_time:577084ms step_avg:77.35ms +[2025-09-02 17:55:51] [Rank 0] step:7481/10000 train_time:578727ms step_avg:77.36ms +[2025-09-02 17:55:51] [Rank 0] step:7481/10000 train_time:578727ms step_avg:77.36ms +[2025-09-02 17:55:53] [Rank 0] step:7501/10000 train_time:580368ms step_avg:77.37ms +[2025-09-02 17:55:53] [Rank 0] step:7501/10000 train_time:580368ms step_avg:77.37ms +[2025-09-02 17:55:54] [Rank 0] step:7521/10000 train_time:582010ms step_avg:77.38ms +[2025-09-02 17:55:54] [Rank 0] step:7521/10000 train_time:582010ms step_avg:77.38ms +[2025-09-02 17:55:56] [Rank 0] step:7541/10000 train_time:583661ms step_avg:77.40ms +[2025-09-02 17:55:56] [Rank 0] step:7541/10000 train_time:583661ms step_avg:77.40ms +[2025-09-02 17:55:57] [Rank 0] step:7561/10000 train_time:585286ms step_avg:77.41ms +[2025-09-02 17:55:57] [Rank 0] step:7561/10000 train_time:585286ms step_avg:77.41ms +[2025-09-02 17:55:59] [Rank 0] step:7581/10000 train_time:586934ms step_avg:77.42ms +[2025-09-02 17:55:59] [Rank 0] step:7581/10000 train_time:586934ms step_avg:77.42ms +[2025-09-02 17:56:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:56:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:56:13] [Rank 0] PRINT: step:7600/10000 val_loss:3.7011 svd_entropy: attn_qk:H=0.7774,top10E=0.25,eRank=178.5,q75/q25=56.32 attn_vo:H=0.8491,top10E=0.13,eRank=305.4,q75/q25=45.40 mlp_w1:H=0.9114,top10E=0.13,eRank=429.4,q75/q25=4.40 mlp_w2:H=0.9713,top10E=0.04,eRank=634.9,q75/q25=2.84 vo_prod:H=0.7517,top10E=0.23,eRank=153.7,q75/q25=3391.67 train_time:588668ms step_avg:77.46ms +[2025-09-02 17:56:13] [Rank 0] PRINT: step:7600/10000 val_loss:3.7011 svd_entropy: attn_qk:H=0.7774,top10E=0.25,eRank=178.5,q75/q25=56.32 attn_vo:H=0.8491,top10E=0.13,eRank=305.4,q75/q25=45.40 mlp_w1:H=0.9114,top10E=0.13,eRank=429.4,q75/q25=4.40 mlp_w2:H=0.9713,top10E=0.04,eRank=634.9,q75/q25=2.84 vo_prod:H=0.7517,top10E=0.23,eRank=153.7,q75/q25=3391.67 train_time:588668ms step_avg:77.46ms +[2025-09-02 17:56:13] [Rank 0] step:7601/10000 train_time:588683ms step_avg:77.45ms +[2025-09-02 17:56:13] [Rank 0] step:7601/10000 train_time:588683ms step_avg:77.45ms +[2025-09-02 17:56:14] [Rank 0] step:7621/10000 train_time:590244ms step_avg:77.45ms +[2025-09-02 17:56:14] [Rank 0] step:7621/10000 train_time:590244ms step_avg:77.45ms +[2025-09-02 17:56:16] [Rank 0] step:7641/10000 train_time:591877ms step_avg:77.46ms +[2025-09-02 17:56:16] [Rank 0] step:7641/10000 train_time:591877ms step_avg:77.46ms +[2025-09-02 17:56:18] [Rank 0] step:7661/10000 train_time:593517ms step_avg:77.47ms +[2025-09-02 17:56:18] [Rank 0] step:7661/10000 train_time:593517ms step_avg:77.47ms +[2025-09-02 17:56:19] [Rank 0] step:7681/10000 train_time:595148ms step_avg:77.48ms +[2025-09-02 17:56:19] [Rank 0] step:7681/10000 train_time:595148ms step_avg:77.48ms +[2025-09-02 17:56:21] [Rank 0] step:7701/10000 train_time:596785ms step_avg:77.49ms +[2025-09-02 17:56:21] [Rank 0] step:7701/10000 train_time:596785ms step_avg:77.49ms +[2025-09-02 17:56:23] [Rank 0] step:7721/10000 train_time:598437ms step_avg:77.51ms +[2025-09-02 17:56:23] [Rank 0] step:7721/10000 train_time:598437ms step_avg:77.51ms +[2025-09-02 17:56:24] [Rank 0] step:7741/10000 train_time:600078ms step_avg:77.52ms +[2025-09-02 17:56:24] [Rank 0] step:7741/10000 train_time:600078ms step_avg:77.52ms +[2025-09-02 17:56:26] [Rank 0] step:7761/10000 train_time:601724ms step_avg:77.53ms +[2025-09-02 17:56:26] [Rank 0] step:7761/10000 train_time:601724ms step_avg:77.53ms +[2025-09-02 17:56:27] [Rank 0] step:7781/10000 train_time:603365ms step_avg:77.54ms +[2025-09-02 17:56:27] [Rank 0] step:7781/10000 train_time:603365ms step_avg:77.54ms +[2025-09-02 17:56:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:56:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:56:41] [Rank 0] PRINT: step:7800/10000 val_loss:3.6878 svd_entropy: attn_qk:H=0.7778,top10E=0.25,eRank=179.0,q75/q25=56.23 attn_vo:H=0.8496,top10E=0.13,eRank=306.3,q75/q25=45.24 mlp_w1:H=0.9119,top10E=0.13,eRank=430.7,q75/q25=4.39 mlp_w2:H=0.9713,top10E=0.04,eRank=634.9,q75/q25=2.84 vo_prod:H=0.7526,top10E=0.23,eRank=154.6,q75/q25=3378.40 train_time:605102ms step_avg:77.58ms +[2025-09-02 17:56:41] [Rank 0] PRINT: step:7800/10000 val_loss:3.6878 svd_entropy: attn_qk:H=0.7778,top10E=0.25,eRank=179.0,q75/q25=56.23 attn_vo:H=0.8496,top10E=0.13,eRank=306.3,q75/q25=45.24 mlp_w1:H=0.9119,top10E=0.13,eRank=430.7,q75/q25=4.39 mlp_w2:H=0.9713,top10E=0.04,eRank=634.9,q75/q25=2.84 vo_prod:H=0.7526,top10E=0.23,eRank=154.6,q75/q25=3378.40 train_time:605102ms step_avg:77.58ms +[2025-09-02 17:56:41] [Rank 0] step:7801/10000 train_time:605116ms step_avg:77.57ms +[2025-09-02 17:56:41] [Rank 0] step:7801/10000 train_time:605116ms step_avg:77.57ms +[2025-09-02 17:56:43] [Rank 0] step:7821/10000 train_time:606696ms step_avg:77.57ms +[2025-09-02 17:56:43] [Rank 0] step:7821/10000 train_time:606696ms step_avg:77.57ms +[2025-09-02 17:56:44] [Rank 0] step:7841/10000 train_time:608332ms step_avg:77.58ms +[2025-09-02 17:56:44] [Rank 0] step:7841/10000 train_time:608332ms step_avg:77.58ms +[2025-09-02 17:56:46] [Rank 0] step:7861/10000 train_time:609980ms step_avg:77.60ms +[2025-09-02 17:56:46] [Rank 0] step:7861/10000 train_time:609980ms step_avg:77.60ms +[2025-09-02 17:56:48] [Rank 0] step:7881/10000 train_time:611626ms step_avg:77.61ms +[2025-09-02 17:56:48] [Rank 0] step:7881/10000 train_time:611626ms step_avg:77.61ms +[2025-09-02 17:56:49] [Rank 0] step:7901/10000 train_time:613263ms step_avg:77.62ms +[2025-09-02 17:56:49] [Rank 0] step:7901/10000 train_time:613263ms step_avg:77.62ms +[2025-09-02 17:56:51] [Rank 0] step:7921/10000 train_time:614908ms step_avg:77.63ms +[2025-09-02 17:56:51] [Rank 0] step:7921/10000 train_time:614908ms step_avg:77.63ms +[2025-09-02 17:56:53] [Rank 0] step:7941/10000 train_time:616556ms step_avg:77.64ms +[2025-09-02 17:56:53] [Rank 0] step:7941/10000 train_time:616556ms step_avg:77.64ms +[2025-09-02 17:56:54] [Rank 0] step:7961/10000 train_time:618201ms step_avg:77.65ms +[2025-09-02 17:56:54] [Rank 0] step:7961/10000 train_time:618201ms step_avg:77.65ms +[2025-09-02 17:56:56] [Rank 0] step:7981/10000 train_time:619837ms step_avg:77.66ms +[2025-09-02 17:56:56] [Rank 0] step:7981/10000 train_time:619837ms step_avg:77.66ms +[2025-09-02 17:56:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:56:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:57:09] [Rank 0] PRINT: step:8000/10000 val_loss:3.6733 svd_entropy: attn_qk:H=0.7783,top10E=0.25,eRank=179.5,q75/q25=55.89 attn_vo:H=0.8501,top10E=0.13,eRank=307.0,q75/q25=44.78 mlp_w1:H=0.9123,top10E=0.13,eRank=431.8,q75/q25=4.37 mlp_w2:H=0.9713,top10E=0.04,eRank=634.9,q75/q25=2.84 vo_prod:H=0.7534,top10E=0.22,eRank=155.4,q75/q25=3389.87 train_time:621566ms step_avg:77.70ms +[2025-09-02 17:57:09] [Rank 0] PRINT: step:8000/10000 val_loss:3.6733 svd_entropy: attn_qk:H=0.7783,top10E=0.25,eRank=179.5,q75/q25=55.89 attn_vo:H=0.8501,top10E=0.13,eRank=307.0,q75/q25=44.78 mlp_w1:H=0.9123,top10E=0.13,eRank=431.8,q75/q25=4.37 mlp_w2:H=0.9713,top10E=0.04,eRank=634.9,q75/q25=2.84 vo_prod:H=0.7534,top10E=0.22,eRank=155.4,q75/q25=3389.87 train_time:621566ms step_avg:77.70ms +[2025-09-02 17:57:09] [Rank 0] step:8001/10000 train_time:621581ms step_avg:77.69ms +[2025-09-02 17:57:09] [Rank 0] step:8001/10000 train_time:621581ms step_avg:77.69ms +[2025-09-02 17:57:11] [Rank 0] step:8021/10000 train_time:623162ms step_avg:77.69ms +[2025-09-02 17:57:11] [Rank 0] step:8021/10000 train_time:623162ms step_avg:77.69ms +[2025-09-02 17:57:13] [Rank 0] step:8041/10000 train_time:624808ms step_avg:77.70ms +[2025-09-02 17:57:13] [Rank 0] step:8041/10000 train_time:624808ms step_avg:77.70ms +[2025-09-02 17:57:14] [Rank 0] step:8061/10000 train_time:626447ms step_avg:77.71ms +[2025-09-02 17:57:14] [Rank 0] step:8061/10000 train_time:626447ms step_avg:77.71ms +[2025-09-02 17:57:16] [Rank 0] step:8081/10000 train_time:628079ms step_avg:77.72ms +[2025-09-02 17:57:16] [Rank 0] step:8081/10000 train_time:628079ms step_avg:77.72ms +[2025-09-02 17:57:18] [Rank 0] step:8101/10000 train_time:629730ms step_avg:77.73ms +[2025-09-02 17:57:18] [Rank 0] step:8101/10000 train_time:629730ms step_avg:77.73ms +[2025-09-02 17:57:19] [Rank 0] step:8121/10000 train_time:631405ms step_avg:77.75ms +[2025-09-02 17:57:19] [Rank 0] step:8121/10000 train_time:631405ms step_avg:77.75ms +[2025-09-02 17:57:21] [Rank 0] step:8141/10000 train_time:633224ms step_avg:77.78ms +[2025-09-02 17:57:21] [Rank 0] step:8141/10000 train_time:633224ms step_avg:77.78ms +[2025-09-02 17:57:23] [Rank 0] step:8161/10000 train_time:634882ms step_avg:77.79ms +[2025-09-02 17:57:23] [Rank 0] step:8161/10000 train_time:634882ms step_avg:77.79ms +[2025-09-02 17:57:24] [Rank 0] step:8181/10000 train_time:636550ms step_avg:77.81ms +[2025-09-02 17:57:24] [Rank 0] step:8181/10000 train_time:636550ms step_avg:77.81ms +[2025-09-02 17:57:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:57:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:57:38] [Rank 0] PRINT: step:8200/10000 val_loss:3.6625 svd_entropy: attn_qk:H=0.7786,top10E=0.25,eRank=179.8,q75/q25=55.55 attn_vo:H=0.8505,top10E=0.13,eRank=307.7,q75/q25=44.77 mlp_w1:H=0.9126,top10E=0.13,eRank=432.8,q75/q25=4.35 mlp_w2:H=0.9713,top10E=0.04,eRank=634.9,q75/q25=2.83 vo_prod:H=0.7540,top10E=0.22,eRank=156.0,q75/q25=3293.98 train_time:638330ms step_avg:77.85ms +[2025-09-02 17:57:38] [Rank 0] PRINT: step:8200/10000 val_loss:3.6625 svd_entropy: attn_qk:H=0.7786,top10E=0.25,eRank=179.8,q75/q25=55.55 attn_vo:H=0.8505,top10E=0.13,eRank=307.7,q75/q25=44.77 mlp_w1:H=0.9126,top10E=0.13,eRank=432.8,q75/q25=4.35 mlp_w2:H=0.9713,top10E=0.04,eRank=634.9,q75/q25=2.83 vo_prod:H=0.7540,top10E=0.22,eRank=156.0,q75/q25=3293.98 train_time:638330ms step_avg:77.85ms +[2025-09-02 17:57:38] [Rank 0] step:8201/10000 train_time:638345ms step_avg:77.84ms +[2025-09-02 17:57:38] [Rank 0] step:8201/10000 train_time:638345ms step_avg:77.84ms +[2025-09-02 17:57:40] [Rank 0] step:8221/10000 train_time:639943ms step_avg:77.84ms +[2025-09-02 17:57:40] [Rank 0] step:8221/10000 train_time:639943ms step_avg:77.84ms +[2025-09-02 17:57:41] [Rank 0] step:8241/10000 train_time:641617ms step_avg:77.86ms +[2025-09-02 17:57:41] [Rank 0] step:8241/10000 train_time:641617ms step_avg:77.86ms +[2025-09-02 17:57:43] [Rank 0] step:8261/10000 train_time:643286ms step_avg:77.87ms +[2025-09-02 17:57:43] [Rank 0] step:8261/10000 train_time:643286ms step_avg:77.87ms +[2025-09-02 17:57:45] [Rank 0] step:8281/10000 train_time:644955ms step_avg:77.88ms +[2025-09-02 17:57:45] [Rank 0] step:8281/10000 train_time:644955ms step_avg:77.88ms +[2025-09-02 17:57:46] [Rank 0] step:8301/10000 train_time:646621ms step_avg:77.90ms +[2025-09-02 17:57:46] [Rank 0] step:8301/10000 train_time:646621ms step_avg:77.90ms +[2025-09-02 17:57:48] [Rank 0] step:8321/10000 train_time:648279ms step_avg:77.91ms +[2025-09-02 17:57:48] [Rank 0] step:8321/10000 train_time:648279ms step_avg:77.91ms +[2025-09-02 17:57:50] [Rank 0] step:8341/10000 train_time:649948ms step_avg:77.92ms +[2025-09-02 17:57:50] [Rank 0] step:8341/10000 train_time:649948ms step_avg:77.92ms +[2025-09-02 17:57:51] [Rank 0] step:8361/10000 train_time:651618ms step_avg:77.94ms +[2025-09-02 17:57:51] [Rank 0] step:8361/10000 train_time:651618ms step_avg:77.94ms +[2025-09-02 17:57:53] [Rank 0] step:8381/10000 train_time:653283ms step_avg:77.95ms +[2025-09-02 17:57:53] [Rank 0] step:8381/10000 train_time:653283ms step_avg:77.95ms +[2025-09-02 17:57:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:57:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:58:06] [Rank 0] PRINT: step:8400/10000 val_loss:3.6520 svd_entropy: attn_qk:H=0.7788,top10E=0.25,eRank=180.0,q75/q25=55.34 attn_vo:H=0.8509,top10E=0.13,eRank=308.4,q75/q25=44.51 mlp_w1:H=0.9130,top10E=0.13,eRank=433.8,q75/q25=4.33 mlp_w2:H=0.9713,top10E=0.04,eRank=634.9,q75/q25=2.84 vo_prod:H=0.7547,top10E=0.22,eRank=156.7,q75/q25=3324.47 train_time:655034ms step_avg:77.98ms +[2025-09-02 17:58:06] [Rank 0] PRINT: step:8400/10000 val_loss:3.6520 svd_entropy: attn_qk:H=0.7788,top10E=0.25,eRank=180.0,q75/q25=55.34 attn_vo:H=0.8509,top10E=0.13,eRank=308.4,q75/q25=44.51 mlp_w1:H=0.9130,top10E=0.13,eRank=433.8,q75/q25=4.33 mlp_w2:H=0.9713,top10E=0.04,eRank=634.9,q75/q25=2.84 vo_prod:H=0.7547,top10E=0.22,eRank=156.7,q75/q25=3324.47 train_time:655034ms step_avg:77.98ms +[2025-09-02 17:58:06] [Rank 0] step:8401/10000 train_time:655049ms step_avg:77.97ms +[2025-09-02 17:58:06] [Rank 0] step:8401/10000 train_time:655049ms step_avg:77.97ms +[2025-09-02 17:58:08] [Rank 0] step:8421/10000 train_time:656626ms step_avg:77.97ms +[2025-09-02 17:58:08] [Rank 0] step:8421/10000 train_time:656626ms step_avg:77.97ms +[2025-09-02 17:58:10] [Rank 0] step:8441/10000 train_time:658289ms step_avg:77.99ms +[2025-09-02 17:58:10] [Rank 0] step:8441/10000 train_time:658289ms step_avg:77.99ms +[2025-09-02 17:58:11] [Rank 0] step:8461/10000 train_time:659950ms step_avg:78.00ms +[2025-09-02 17:58:11] [Rank 0] step:8461/10000 train_time:659950ms step_avg:78.00ms +[2025-09-02 17:58:13] [Rank 0] step:8481/10000 train_time:661621ms step_avg:78.01ms +[2025-09-02 17:58:13] [Rank 0] step:8481/10000 train_time:661621ms step_avg:78.01ms +[2025-09-02 17:58:15] [Rank 0] step:8501/10000 train_time:663309ms step_avg:78.03ms +[2025-09-02 17:58:15] [Rank 0] step:8501/10000 train_time:663309ms step_avg:78.03ms +[2025-09-02 17:58:16] [Rank 0] step:8521/10000 train_time:664979ms step_avg:78.04ms +[2025-09-02 17:58:16] [Rank 0] step:8521/10000 train_time:664979ms step_avg:78.04ms +[2025-09-02 17:58:18] [Rank 0] step:8541/10000 train_time:666659ms step_avg:78.05ms +[2025-09-02 17:58:18] [Rank 0] step:8541/10000 train_time:666659ms step_avg:78.05ms +[2025-09-02 17:58:20] [Rank 0] step:8561/10000 train_time:668331ms step_avg:78.07ms +[2025-09-02 17:58:20] [Rank 0] step:8561/10000 train_time:668331ms step_avg:78.07ms +[2025-09-02 17:58:21] [Rank 0] step:8581/10000 train_time:670001ms step_avg:78.08ms +[2025-09-02 17:58:21] [Rank 0] step:8581/10000 train_time:670001ms step_avg:78.08ms +[2025-09-02 17:58:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:58:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:58:35] [Rank 0] PRINT: step:8600/10000 val_loss:3.6424 svd_entropy: attn_qk:H=0.7791,top10E=0.25,eRank=180.3,q75/q25=55.12 attn_vo:H=0.8512,top10E=0.13,eRank=308.9,q75/q25=44.35 mlp_w1:H=0.9133,top10E=0.13,eRank=434.6,q75/q25=4.33 mlp_w2:H=0.9713,top10E=0.04,eRank=634.9,q75/q25=2.84 vo_prod:H=0.7553,top10E=0.22,eRank=157.3,q75/q25=3319.35 train_time:671783ms step_avg:78.11ms +[2025-09-02 17:58:35] [Rank 0] PRINT: step:8600/10000 val_loss:3.6424 svd_entropy: attn_qk:H=0.7791,top10E=0.25,eRank=180.3,q75/q25=55.12 attn_vo:H=0.8512,top10E=0.13,eRank=308.9,q75/q25=44.35 mlp_w1:H=0.9133,top10E=0.13,eRank=434.6,q75/q25=4.33 mlp_w2:H=0.9713,top10E=0.04,eRank=634.9,q75/q25=2.84 vo_prod:H=0.7553,top10E=0.22,eRank=157.3,q75/q25=3319.35 train_time:671783ms step_avg:78.11ms +[2025-09-02 17:58:35] [Rank 0] step:8601/10000 train_time:671797ms step_avg:78.11ms +[2025-09-02 17:58:35] [Rank 0] step:8601/10000 train_time:671797ms step_avg:78.11ms +[2025-09-02 17:58:37] [Rank 0] step:8621/10000 train_time:673403ms step_avg:78.11ms +[2025-09-02 17:58:37] [Rank 0] step:8621/10000 train_time:673403ms step_avg:78.11ms +[2025-09-02 17:58:38] [Rank 0] step:8641/10000 train_time:675070ms step_avg:78.12ms +[2025-09-02 17:58:38] [Rank 0] step:8641/10000 train_time:675070ms step_avg:78.12ms +[2025-09-02 17:58:40] [Rank 0] step:8661/10000 train_time:676740ms step_avg:78.14ms +[2025-09-02 17:58:40] [Rank 0] step:8661/10000 train_time:676740ms step_avg:78.14ms +[2025-09-02 17:58:42] [Rank 0] step:8681/10000 train_time:678406ms step_avg:78.15ms +[2025-09-02 17:58:42] [Rank 0] step:8681/10000 train_time:678406ms step_avg:78.15ms +[2025-09-02 17:58:43] [Rank 0] step:8701/10000 train_time:680068ms step_avg:78.16ms +[2025-09-02 17:58:43] [Rank 0] step:8701/10000 train_time:680068ms step_avg:78.16ms +[2025-09-02 17:58:45] [Rank 0] step:8721/10000 train_time:681740ms step_avg:78.17ms +[2025-09-02 17:58:45] [Rank 0] step:8721/10000 train_time:681740ms step_avg:78.17ms +[2025-09-02 17:58:47] [Rank 0] step:8741/10000 train_time:683400ms step_avg:78.18ms +[2025-09-02 17:58:47] [Rank 0] step:8741/10000 train_time:683400ms step_avg:78.18ms +[2025-09-02 17:58:48] [Rank 0] step:8761/10000 train_time:685068ms step_avg:78.20ms +[2025-09-02 17:58:48] [Rank 0] step:8761/10000 train_time:685068ms step_avg:78.20ms +[2025-09-02 17:58:50] [Rank 0] step:8781/10000 train_time:686740ms step_avg:78.21ms +[2025-09-02 17:58:50] [Rank 0] step:8781/10000 train_time:686740ms step_avg:78.21ms +[2025-09-02 17:58:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:58:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:59:03] [Rank 0] PRINT: step:8800/10000 val_loss:3.6332 svd_entropy: attn_qk:H=0.7794,top10E=0.25,eRank=180.6,q75/q25=55.10 attn_vo:H=0.8516,top10E=0.13,eRank=309.5,q75/q25=44.16 mlp_w1:H=0.9135,top10E=0.13,eRank=435.4,q75/q25=4.32 mlp_w2:H=0.9713,top10E=0.04,eRank=634.9,q75/q25=2.83 vo_prod:H=0.7560,top10E=0.22,eRank=157.9,q75/q25=3298.93 train_time:688499ms step_avg:78.24ms +[2025-09-02 17:59:03] [Rank 0] PRINT: step:8800/10000 val_loss:3.6332 svd_entropy: attn_qk:H=0.7794,top10E=0.25,eRank=180.6,q75/q25=55.10 attn_vo:H=0.8516,top10E=0.13,eRank=309.5,q75/q25=44.16 mlp_w1:H=0.9135,top10E=0.13,eRank=435.4,q75/q25=4.32 mlp_w2:H=0.9713,top10E=0.04,eRank=634.9,q75/q25=2.83 vo_prod:H=0.7560,top10E=0.22,eRank=157.9,q75/q25=3298.93 train_time:688499ms step_avg:78.24ms +[2025-09-02 17:59:03] [Rank 0] step:8801/10000 train_time:688514ms step_avg:78.23ms +[2025-09-02 17:59:03] [Rank 0] step:8801/10000 train_time:688514ms step_avg:78.23ms +[2025-09-02 17:59:05] [Rank 0] step:8821/10000 train_time:690101ms step_avg:78.23ms +[2025-09-02 17:59:05] [Rank 0] step:8821/10000 train_time:690101ms step_avg:78.23ms +[2025-09-02 17:59:07] [Rank 0] step:8841/10000 train_time:691791ms step_avg:78.25ms +[2025-09-02 17:59:07] [Rank 0] step:8841/10000 train_time:691791ms step_avg:78.25ms +[2025-09-02 17:59:08] [Rank 0] step:8861/10000 train_time:693456ms step_avg:78.26ms +[2025-09-02 17:59:08] [Rank 0] step:8861/10000 train_time:693456ms step_avg:78.26ms +[2025-09-02 17:59:10] [Rank 0] step:8881/10000 train_time:695127ms step_avg:78.27ms +[2025-09-02 17:59:10] [Rank 0] step:8881/10000 train_time:695127ms step_avg:78.27ms +[2025-09-02 17:59:12] [Rank 0] step:8901/10000 train_time:696796ms step_avg:78.28ms +[2025-09-02 17:59:12] [Rank 0] step:8901/10000 train_time:696796ms step_avg:78.28ms +[2025-09-02 17:59:14] [Rank 0] step:8921/10000 train_time:698480ms step_avg:78.30ms +[2025-09-02 17:59:14] [Rank 0] step:8921/10000 train_time:698480ms step_avg:78.30ms +[2025-09-02 17:59:15] [Rank 0] step:8941/10000 train_time:700160ms step_avg:78.31ms +[2025-09-02 17:59:15] [Rank 0] step:8941/10000 train_time:700160ms step_avg:78.31ms +[2025-09-02 17:59:17] [Rank 0] step:8961/10000 train_time:701824ms step_avg:78.32ms +[2025-09-02 17:59:17] [Rank 0] step:8961/10000 train_time:701824ms step_avg:78.32ms +[2025-09-02 17:59:19] [Rank 0] step:8981/10000 train_time:703491ms step_avg:78.33ms +[2025-09-02 17:59:19] [Rank 0] step:8981/10000 train_time:703491ms step_avg:78.33ms +[2025-09-02 17:59:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:59:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:59:32] [Rank 0] PRINT: step:9000/10000 val_loss:3.6241 svd_entropy: attn_qk:H=0.7796,top10E=0.25,eRank=180.8,q75/q25=54.94 attn_vo:H=0.8519,top10E=0.13,eRank=310.0,q75/q25=43.96 mlp_w1:H=0.9138,top10E=0.13,eRank=436.0,q75/q25=4.31 mlp_w2:H=0.9713,top10E=0.04,eRank=635.0,q75/q25=2.84 vo_prod:H=0.7566,top10E=0.22,eRank=158.6,q75/q25=3275.84 train_time:705242ms step_avg:78.36ms +[2025-09-02 17:59:32] [Rank 0] PRINT: step:9000/10000 val_loss:3.6241 svd_entropy: attn_qk:H=0.7796,top10E=0.25,eRank=180.8,q75/q25=54.94 attn_vo:H=0.8519,top10E=0.13,eRank=310.0,q75/q25=43.96 mlp_w1:H=0.9138,top10E=0.13,eRank=436.0,q75/q25=4.31 mlp_w2:H=0.9713,top10E=0.04,eRank=635.0,q75/q25=2.84 vo_prod:H=0.7566,top10E=0.22,eRank=158.6,q75/q25=3275.84 train_time:705242ms step_avg:78.36ms +[2025-09-02 17:59:32] [Rank 0] step:9001/10000 train_time:705256ms step_avg:78.35ms +[2025-09-02 17:59:32] [Rank 0] step:9001/10000 train_time:705256ms step_avg:78.35ms +[2025-09-02 17:59:34] [Rank 0] step:9021/10000 train_time:706854ms step_avg:78.36ms +[2025-09-02 17:59:34] [Rank 0] step:9021/10000 train_time:706854ms step_avg:78.36ms +[2025-09-02 17:59:35] [Rank 0] step:9041/10000 train_time:708526ms step_avg:78.37ms +[2025-09-02 17:59:35] [Rank 0] step:9041/10000 train_time:708526ms step_avg:78.37ms +[2025-09-02 17:59:37] [Rank 0] step:9061/10000 train_time:710201ms step_avg:78.38ms +[2025-09-02 17:59:37] [Rank 0] step:9061/10000 train_time:710201ms step_avg:78.38ms +[2025-09-02 17:59:39] [Rank 0] step:9081/10000 train_time:711878ms step_avg:78.39ms +[2025-09-02 17:59:39] [Rank 0] step:9081/10000 train_time:711878ms step_avg:78.39ms +[2025-09-02 17:59:40] [Rank 0] step:9101/10000 train_time:713569ms step_avg:78.41ms +[2025-09-02 17:59:40] [Rank 0] step:9101/10000 train_time:713569ms step_avg:78.41ms +[2025-09-02 17:59:42] [Rank 0] step:9121/10000 train_time:715241ms step_avg:78.42ms +[2025-09-02 17:59:42] [Rank 0] step:9121/10000 train_time:715241ms step_avg:78.42ms +[2025-09-02 17:59:44] [Rank 0] step:9141/10000 train_time:716904ms step_avg:78.43ms +[2025-09-02 17:59:44] [Rank 0] step:9141/10000 train_time:716904ms step_avg:78.43ms +[2025-09-02 17:59:45] [Rank 0] step:9161/10000 train_time:718569ms step_avg:78.44ms +[2025-09-02 17:59:45] [Rank 0] step:9161/10000 train_time:718569ms step_avg:78.44ms +[2025-09-02 17:59:47] [Rank 0] step:9181/10000 train_time:720274ms step_avg:78.45ms +[2025-09-02 17:59:47] [Rank 0] step:9181/10000 train_time:720274ms step_avg:78.45ms +[2025-09-02 17:59:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:59:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:00:01] [Rank 0] PRINT: step:9200/10000 val_loss:3.6157 svd_entropy: attn_qk:H=0.7797,top10E=0.25,eRank=181.0,q75/q25=54.56 attn_vo:H=0.8521,top10E=0.13,eRank=310.5,q75/q25=43.82 mlp_w1:H=0.9140,top10E=0.13,eRank=436.6,q75/q25=4.29 mlp_w2:H=0.9713,top10E=0.04,eRank=634.9,q75/q25=2.83 vo_prod:H=0.7571,top10E=0.22,eRank=159.1,q75/q25=3306.73 train_time:722028ms step_avg:78.48ms +[2025-09-02 18:00:01] [Rank 0] PRINT: step:9200/10000 val_loss:3.6157 svd_entropy: attn_qk:H=0.7797,top10E=0.25,eRank=181.0,q75/q25=54.56 attn_vo:H=0.8521,top10E=0.13,eRank=310.5,q75/q25=43.82 mlp_w1:H=0.9140,top10E=0.13,eRank=436.6,q75/q25=4.29 mlp_w2:H=0.9713,top10E=0.04,eRank=634.9,q75/q25=2.83 vo_prod:H=0.7571,top10E=0.22,eRank=159.1,q75/q25=3306.73 train_time:722028ms step_avg:78.48ms +[2025-09-02 18:00:01] [Rank 0] step:9201/10000 train_time:722042ms step_avg:78.47ms +[2025-09-02 18:00:01] [Rank 0] step:9201/10000 train_time:722042ms step_avg:78.47ms +[2025-09-02 18:00:02] [Rank 0] step:9221/10000 train_time:723645ms step_avg:78.48ms +[2025-09-02 18:00:02] [Rank 0] step:9221/10000 train_time:723645ms step_avg:78.48ms +[2025-09-02 18:00:04] [Rank 0] step:9241/10000 train_time:725331ms step_avg:78.49ms +[2025-09-02 18:00:04] [Rank 0] step:9241/10000 train_time:725331ms step_avg:78.49ms +[2025-09-02 18:00:06] [Rank 0] step:9261/10000 train_time:727014ms step_avg:78.50ms +[2025-09-02 18:00:06] [Rank 0] step:9261/10000 train_time:727014ms step_avg:78.50ms +[2025-09-02 18:00:07] [Rank 0] step:9281/10000 train_time:728680ms step_avg:78.51ms +[2025-09-02 18:00:07] [Rank 0] step:9281/10000 train_time:728680ms step_avg:78.51ms +[2025-09-02 18:00:09] [Rank 0] step:9301/10000 train_time:730355ms step_avg:78.52ms +[2025-09-02 18:00:09] [Rank 0] step:9301/10000 train_time:730355ms step_avg:78.52ms +[2025-09-02 18:00:11] [Rank 0] step:9321/10000 train_time:732032ms step_avg:78.54ms +[2025-09-02 18:00:11] [Rank 0] step:9321/10000 train_time:732032ms step_avg:78.54ms +[2025-09-02 18:00:12] [Rank 0] step:9341/10000 train_time:733708ms step_avg:78.55ms +[2025-09-02 18:00:12] [Rank 0] step:9341/10000 train_time:733708ms step_avg:78.55ms +[2025-09-02 18:00:14] [Rank 0] step:9361/10000 train_time:735385ms step_avg:78.56ms +[2025-09-02 18:00:14] [Rank 0] step:9361/10000 train_time:735385ms step_avg:78.56ms +[2025-09-02 18:00:16] [Rank 0] step:9381/10000 train_time:737074ms step_avg:78.57ms +[2025-09-02 18:00:16] [Rank 0] step:9381/10000 train_time:737074ms step_avg:78.57ms +[2025-09-02 18:00:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:00:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:00:29] [Rank 0] PRINT: step:9400/10000 val_loss:3.6083 svd_entropy: attn_qk:H=0.7799,top10E=0.25,eRank=181.2,q75/q25=54.56 attn_vo:H=0.8524,top10E=0.13,eRank=310.8,q75/q25=43.67 mlp_w1:H=0.9141,top10E=0.13,eRank=437.0,q75/q25=4.28 mlp_w2:H=0.9713,top10E=0.04,eRank=634.9,q75/q25=2.83 vo_prod:H=0.7576,top10E=0.22,eRank=159.6,q75/q25=3301.57 train_time:738840ms step_avg:78.60ms +[2025-09-02 18:00:29] [Rank 0] PRINT: step:9400/10000 val_loss:3.6083 svd_entropy: attn_qk:H=0.7799,top10E=0.25,eRank=181.2,q75/q25=54.56 attn_vo:H=0.8524,top10E=0.13,eRank=310.8,q75/q25=43.67 mlp_w1:H=0.9141,top10E=0.13,eRank=437.0,q75/q25=4.28 mlp_w2:H=0.9713,top10E=0.04,eRank=634.9,q75/q25=2.83 vo_prod:H=0.7576,top10E=0.22,eRank=159.6,q75/q25=3301.57 train_time:738840ms step_avg:78.60ms +[2025-09-02 18:00:29] [Rank 0] step:9401/10000 train_time:738854ms step_avg:78.59ms +[2025-09-02 18:00:29] [Rank 0] step:9401/10000 train_time:738854ms step_avg:78.59ms +[2025-09-02 18:00:31] [Rank 0] step:9421/10000 train_time:740445ms step_avg:78.60ms +[2025-09-02 18:00:31] [Rank 0] step:9421/10000 train_time:740445ms step_avg:78.60ms +[2025-09-02 18:00:33] [Rank 0] step:9441/10000 train_time:742113ms step_avg:78.61ms +[2025-09-02 18:00:33] [Rank 0] step:9441/10000 train_time:742113ms step_avg:78.61ms +[2025-09-02 18:00:34] [Rank 0] step:9461/10000 train_time:743788ms step_avg:78.62ms +[2025-09-02 18:00:34] [Rank 0] step:9461/10000 train_time:743788ms step_avg:78.62ms +[2025-09-02 18:00:36] [Rank 0] step:9481/10000 train_time:745461ms step_avg:78.63ms +[2025-09-02 18:00:36] [Rank 0] step:9481/10000 train_time:745461ms step_avg:78.63ms +[2025-09-02 18:00:38] [Rank 0] step:9501/10000 train_time:747147ms step_avg:78.64ms +[2025-09-02 18:00:38] [Rank 0] step:9501/10000 train_time:747147ms step_avg:78.64ms +[2025-09-02 18:00:39] [Rank 0] step:9521/10000 train_time:748808ms step_avg:78.65ms +[2025-09-02 18:00:39] [Rank 0] step:9521/10000 train_time:748808ms step_avg:78.65ms +[2025-09-02 18:00:41] [Rank 0] step:9541/10000 train_time:750480ms step_avg:78.66ms +[2025-09-02 18:00:41] [Rank 0] step:9541/10000 train_time:750480ms step_avg:78.66ms +[2025-09-02 18:00:43] [Rank 0] step:9561/10000 train_time:752145ms step_avg:78.67ms +[2025-09-02 18:00:43] [Rank 0] step:9561/10000 train_time:752145ms step_avg:78.67ms +[2025-09-02 18:00:44] [Rank 0] step:9581/10000 train_time:753818ms step_avg:78.68ms +[2025-09-02 18:00:44] [Rank 0] step:9581/10000 train_time:753818ms step_avg:78.68ms +[2025-09-02 18:00:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:00:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:00:58] [Rank 0] PRINT: step:9600/10000 val_loss:3.6020 svd_entropy: attn_qk:H=0.7800,top10E=0.25,eRank=181.3,q75/q25=54.53 attn_vo:H=0.8525,top10E=0.13,eRank=311.2,q75/q25=43.61 mlp_w1:H=0.9143,top10E=0.13,eRank=437.4,q75/q25=4.28 mlp_w2:H=0.9713,top10E=0.04,eRank=634.9,q75/q25=2.84 vo_prod:H=0.7579,top10E=0.22,eRank=160.0,q75/q25=3311.49 train_time:755588ms step_avg:78.71ms +[2025-09-02 18:00:58] [Rank 0] PRINT: step:9600/10000 val_loss:3.6020 svd_entropy: attn_qk:H=0.7800,top10E=0.25,eRank=181.3,q75/q25=54.53 attn_vo:H=0.8525,top10E=0.13,eRank=311.2,q75/q25=43.61 mlp_w1:H=0.9143,top10E=0.13,eRank=437.4,q75/q25=4.28 mlp_w2:H=0.9713,top10E=0.04,eRank=634.9,q75/q25=2.84 vo_prod:H=0.7579,top10E=0.22,eRank=160.0,q75/q25=3311.49 train_time:755588ms step_avg:78.71ms +[2025-09-02 18:00:58] [Rank 0] step:9601/10000 train_time:755602ms step_avg:78.70ms +[2025-09-02 18:00:58] [Rank 0] step:9601/10000 train_time:755602ms step_avg:78.70ms +[2025-09-02 18:01:00] [Rank 0] step:9621/10000 train_time:757213ms step_avg:78.70ms +[2025-09-02 18:01:00] [Rank 0] step:9621/10000 train_time:757213ms step_avg:78.70ms +[2025-09-02 18:01:01] [Rank 0] step:9641/10000 train_time:758889ms step_avg:78.71ms +[2025-09-02 18:01:01] [Rank 0] step:9641/10000 train_time:758889ms step_avg:78.71ms +[2025-09-02 18:01:03] [Rank 0] step:9661/10000 train_time:760592ms step_avg:78.73ms +[2025-09-02 18:01:03] [Rank 0] step:9661/10000 train_time:760592ms step_avg:78.73ms +[2025-09-02 18:01:05] [Rank 0] step:9681/10000 train_time:762285ms step_avg:78.74ms +[2025-09-02 18:01:05] [Rank 0] step:9681/10000 train_time:762285ms step_avg:78.74ms +[2025-09-02 18:01:06] [Rank 0] step:9701/10000 train_time:763992ms step_avg:78.75ms +[2025-09-02 18:01:06] [Rank 0] step:9701/10000 train_time:763992ms step_avg:78.75ms +[2025-09-02 18:01:08] [Rank 0] step:9721/10000 train_time:765680ms step_avg:78.77ms +[2025-09-02 18:01:08] [Rank 0] step:9721/10000 train_time:765680ms step_avg:78.77ms +[2025-09-02 18:01:10] [Rank 0] step:9741/10000 train_time:767394ms step_avg:78.78ms +[2025-09-02 18:01:10] [Rank 0] step:9741/10000 train_time:767394ms step_avg:78.78ms +[2025-09-02 18:01:11] [Rank 0] step:9761/10000 train_time:769095ms step_avg:78.79ms +[2025-09-02 18:01:11] [Rank 0] step:9761/10000 train_time:769095ms step_avg:78.79ms +[2025-09-02 18:01:13] [Rank 0] step:9781/10000 train_time:770799ms step_avg:78.81ms +[2025-09-02 18:01:13] [Rank 0] step:9781/10000 train_time:770799ms step_avg:78.81ms +[2025-09-02 18:01:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:01:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:01:27] [Rank 0] PRINT: step:9800/10000 val_loss:3.5952 svd_entropy: attn_qk:H=0.7801,top10E=0.25,eRank=181.4,q75/q25=54.37 attn_vo:H=0.8527,top10E=0.13,eRank=311.4,q75/q25=43.49 mlp_w1:H=0.9144,top10E=0.13,eRank=437.7,q75/q25=4.28 mlp_w2:H=0.9713,top10E=0.04,eRank=634.9,q75/q25=2.84 vo_prod:H=0.7583,top10E=0.22,eRank=160.3,q75/q25=3332.09 train_time:772602ms step_avg:78.84ms +[2025-09-02 18:01:27] [Rank 0] PRINT: step:9800/10000 val_loss:3.5952 svd_entropy: attn_qk:H=0.7801,top10E=0.25,eRank=181.4,q75/q25=54.37 attn_vo:H=0.8527,top10E=0.13,eRank=311.4,q75/q25=43.49 mlp_w1:H=0.9144,top10E=0.13,eRank=437.7,q75/q25=4.28 mlp_w2:H=0.9713,top10E=0.04,eRank=634.9,q75/q25=2.84 vo_prod:H=0.7583,top10E=0.22,eRank=160.3,q75/q25=3332.09 train_time:772602ms step_avg:78.84ms +[2025-09-02 18:01:27] [Rank 0] step:9801/10000 train_time:772616ms step_avg:78.83ms +[2025-09-02 18:01:27] [Rank 0] step:9801/10000 train_time:772616ms step_avg:78.83ms +[2025-09-02 18:01:28] [Rank 0] step:9821/10000 train_time:774225ms step_avg:78.83ms +[2025-09-02 18:01:28] [Rank 0] step:9821/10000 train_time:774225ms step_avg:78.83ms +[2025-09-02 18:01:30] [Rank 0] step:9841/10000 train_time:775940ms step_avg:78.85ms +[2025-09-02 18:01:30] [Rank 0] step:9841/10000 train_time:775940ms step_avg:78.85ms +[2025-09-02 18:01:32] [Rank 0] step:9861/10000 train_time:777681ms step_avg:78.86ms +[2025-09-02 18:01:32] [Rank 0] step:9861/10000 train_time:777681ms step_avg:78.86ms +[2025-09-02 18:01:33] [Rank 0] step:9881/10000 train_time:779370ms step_avg:78.88ms +[2025-09-02 18:01:33] [Rank 0] step:9881/10000 train_time:779370ms step_avg:78.88ms +[2025-09-02 18:01:35] [Rank 0] step:9901/10000 train_time:781069ms step_avg:78.89ms +[2025-09-02 18:01:35] [Rank 0] step:9901/10000 train_time:781069ms step_avg:78.89ms +[2025-09-02 18:01:37] [Rank 0] step:9921/10000 train_time:782768ms step_avg:78.90ms +[2025-09-02 18:01:37] [Rank 0] step:9921/10000 train_time:782768ms step_avg:78.90ms +[2025-09-02 18:01:39] [Rank 0] step:9941/10000 train_time:784469ms step_avg:78.91ms +[2025-09-02 18:01:39] [Rank 0] step:9941/10000 train_time:784469ms step_avg:78.91ms +[2025-09-02 18:01:40] [Rank 0] step:9961/10000 train_time:786167ms step_avg:78.92ms +[2025-09-02 18:01:40] [Rank 0] step:9961/10000 train_time:786167ms step_avg:78.92ms +[2025-09-02 18:01:42] [Rank 0] step:9981/10000 train_time:787867ms step_avg:78.94ms +[2025-09-02 18:01:42] [Rank 0] step:9981/10000 train_time:787867ms step_avg:78.94ms +[2025-09-02 18:01:44] [Rank 0] step:10000/10000 train_time:789487ms step_avg:78.95ms +[2025-09-02 18:01:44] [Rank 0] step:10000/10000 train_time:789487ms step_avg:78.95ms +[2025-09-02 18:01:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:01:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:01:55] [Rank 0] PRINT: step:10000/10000 val_loss:3.5896 svd_entropy: attn_qk:H=0.7801,top10E=0.25,eRank=181.4,q75/q25=54.33 attn_vo:H=0.8528,top10E=0.13,eRank=311.6,q75/q25=43.40 mlp_w1:H=0.9145,top10E=0.13,eRank=438.0,q75/q25=4.27 mlp_w2:H=0.9713,top10E=0.04,eRank=634.9,q75/q25=2.83 vo_prod:H=0.7585,top10E=0.22,eRank=160.6,q75/q25=3347.21 train_time:789670ms step_avg:78.97ms +[2025-09-02 18:01:55] [Rank 0] PRINT: step:10000/10000 val_loss:3.5896 svd_entropy: attn_qk:H=0.7801,top10E=0.25,eRank=181.4,q75/q25=54.33 attn_vo:H=0.8528,top10E=0.13,eRank=311.6,q75/q25=43.40 mlp_w1:H=0.9145,top10E=0.13,eRank=438.0,q75/q25=4.27 mlp_w2:H=0.9713,top10E=0.04,eRank=634.9,q75/q25=2.83 vo_prod:H=0.7585,top10E=0.22,eRank=160.6,q75/q25=3347.21 train_time:789670ms step_avg:78.97ms +[2025-09-02 18:01:55] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 18:01:55 2025 --- +[2025-09-02 18:01:55] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 18:01:55 2025 --- +[2025-09-02 18:01:55] [Rank 0] PRINT: Peak memory allocated: 10115 MiB reserved: 15116 MiB +[2025-09-02 18:01:55] [Rank 0] PRINT: Peak memory allocated: 10115 MiB reserved: 15116 MiB diff --git a/logs_svd_qkvo/mode_13_param_qkvo_seed_50/config.json b/logs_svd_qkvo/mode_13_param_qkvo_seed_50/config.json new file mode 100644 index 0000000000000000000000000000000000000000..5ecb5bb7031eef22dd102a58b0c9e612b55f2669 --- /dev/null +++ b/logs_svd_qkvo/mode_13_param_qkvo_seed_50/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 50, + "optimizer_mode": 13, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "6f197592-d618-4333-899e-1498e9d29623", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_13_param_qkvo_seed_50/training_log_6f197592-d618-4333-899e-1498e9d29623.txt b/logs_svd_qkvo/mode_13_param_qkvo_seed_50/training_log_6f197592-d618-4333-899e-1498e9d29623.txt new file mode 100644 index 0000000000000000000000000000000000000000..1f19b8cce5312a551d2c96950a0d2c59e3e7c6d8 --- /dev/null +++ b/logs_svd_qkvo/mode_13_param_qkvo_seed_50/training_log_6f197592-d618-4333-899e-1498e9d29623.txt @@ -0,0 +1,2984 @@ +[2025-09-03 03:59:41] [Rank 0] PRINT: --- Script Start: Wed Sep 3 03:59:41 2025 --- +[2025-09-03 03:59:41] [Rank 0] PRINT: --- Script Start: Wed Sep 3 03:59:41 2025 --- +[2025-09-03 03:59:41] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=13, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-03 03:59:41] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=13, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-03 03:59:41] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-03 03:59:41] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-03 03:59:41] [Rank 0] PRINT: Using fixed seed: 50 +[2025-09-03 03:59:41] [Rank 0] PRINT: Using fixed seed: 50 +[2025-09-03 03:59:41] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_13_param_qkvo_seed_50 +[2025-09-03 03:59:41] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_13_param_qkvo_seed_50 +[2025-09-03 03:59:41] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-03 03:59:41] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-03 03:59:41] [Rank 0] PRINT: Constructing model... +[2025-09-03 03:59:41] [Rank 0] PRINT: Constructing model... +[2025-09-03 03:59:43] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-03 03:59:43] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-03 03:59:43] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-03 03:59:43] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-03 03:59:43] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-03 03:59:43] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-03 03:59:43] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 13 +[2025-09-03 03:59:43] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 13 +[2025-09-03 03:59:43] [Rank 0] PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: 0.008). +[2025-09-03 03:59:43] [Rank 0] PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: 0.008). +[2025-09-03 03:59:43] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-03 03:59:43] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-03 03:59:43] [Rank 0] PRINT: Muon optimizer is active with 23 parameters. +[2025-09-03 03:59:43] [Rank 0] PRINT: Muon optimizer is active with 23 parameters. +[2025-09-03 03:59:43] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-03 03:59:43] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-03 03:59:43] [Rank 0] PRINT: Model compilation complete. +[2025-09-03 03:59:43] [Rank 0] PRINT: Model compilation complete. +[2025-09-03 03:59:43] [Rank 0] PRINT: Starting warmup... +[2025-09-03 03:59:43] [Rank 0] PRINT: Starting warmup... +[2025-09-03 04:07:09] [Rank 0] PRINT: Warmup complete. +[2025-09-03 04:07:09] [Rank 0] PRINT: Warmup complete. +[2025-09-03 04:07:09] [Rank 0] PRINT: Starting training... +[2025-09-03 04:07:09] [Rank 0] PRINT: Starting training... +[2025-09-03 04:07:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:07:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:14:23] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9248,top10E=0.05,eRank=465.9,q75/q25=10.25 attn_vo:H=0.4623,top10E=0.02,eRank=232.8,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.6,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-03 04:14:23] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9248,top10E=0.05,eRank=465.9,q75/q25=10.25 attn_vo:H=0.4623,top10E=0.02,eRank=232.8,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.6,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-03 04:14:25] [Rank 0] step:21/10000 train_time:1422ms step_avg:67.71ms +[2025-09-03 04:14:25] [Rank 0] step:21/10000 train_time:1422ms step_avg:67.71ms +[2025-09-03 04:14:26] [Rank 0] step:41/10000 train_time:2934ms step_avg:71.55ms +[2025-09-03 04:14:26] [Rank 0] step:41/10000 train_time:2934ms step_avg:71.55ms +[2025-09-03 04:14:28] [Rank 0] step:61/10000 train_time:4434ms step_avg:72.69ms +[2025-09-03 04:14:28] [Rank 0] step:61/10000 train_time:4434ms step_avg:72.69ms +[2025-09-03 04:14:29] [Rank 0] step:81/10000 train_time:5886ms step_avg:72.67ms +[2025-09-03 04:14:29] [Rank 0] step:81/10000 train_time:5886ms step_avg:72.67ms +[2025-09-03 04:14:31] [Rank 0] step:101/10000 train_time:7338ms step_avg:72.65ms +[2025-09-03 04:14:31] [Rank 0] step:101/10000 train_time:7338ms step_avg:72.65ms +[2025-09-03 04:14:32] [Rank 0] step:121/10000 train_time:8791ms step_avg:72.65ms +[2025-09-03 04:14:32] [Rank 0] step:121/10000 train_time:8791ms step_avg:72.65ms +[2025-09-03 04:14:33] [Rank 0] step:141/10000 train_time:10243ms step_avg:72.65ms +[2025-09-03 04:14:33] [Rank 0] step:141/10000 train_time:10243ms step_avg:72.65ms +[2025-09-03 04:14:35] [Rank 0] step:161/10000 train_time:11694ms step_avg:72.64ms +[2025-09-03 04:14:35] [Rank 0] step:161/10000 train_time:11694ms step_avg:72.64ms +[2025-09-03 04:14:36] [Rank 0] step:181/10000 train_time:13146ms step_avg:72.63ms +[2025-09-03 04:14:36] [Rank 0] step:181/10000 train_time:13146ms step_avg:72.63ms +[2025-09-03 04:14:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:14:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:14:50] [Rank 0] PRINT: step:200/10000 val_loss:6.2115 svd_entropy: attn_qk:H=0.5604,top10E=0.62,eRank=55.7,q75/q25=13.12 attn_vo:H=0.5169,top10E=0.64,eRank=57.5,q75/q25=15.72 mlp_w1:H=0.6624,top10E=0.51,eRank=96.2,q75/q25=2.95 mlp_w2:H=0.8073,top10E=0.17,eRank=216.6,q75/q25=17.31 vo_prod:H=0.3539,top10E=0.89,eRank=15.3,q75/q25=94.78 train_time:14672ms step_avg:73.36ms +[2025-09-03 04:14:50] [Rank 0] PRINT: step:200/10000 val_loss:6.2115 svd_entropy: attn_qk:H=0.5604,top10E=0.62,eRank=55.7,q75/q25=13.12 attn_vo:H=0.5169,top10E=0.64,eRank=57.5,q75/q25=15.72 mlp_w1:H=0.6624,top10E=0.51,eRank=96.2,q75/q25=2.95 mlp_w2:H=0.8073,top10E=0.17,eRank=216.6,q75/q25=17.31 vo_prod:H=0.3539,top10E=0.89,eRank=15.3,q75/q25=94.78 train_time:14672ms step_avg:73.36ms +[2025-09-03 04:14:50] [Rank 0] step:201/10000 train_time:14687ms step_avg:73.07ms +[2025-09-03 04:14:50] [Rank 0] step:201/10000 train_time:14687ms step_avg:73.07ms +[2025-09-03 04:14:51] [Rank 0] step:221/10000 train_time:16066ms step_avg:72.70ms +[2025-09-03 04:14:51] [Rank 0] step:221/10000 train_time:16066ms step_avg:72.70ms +[2025-09-03 04:14:53] [Rank 0] step:241/10000 train_time:17514ms step_avg:72.67ms +[2025-09-03 04:14:53] [Rank 0] step:241/10000 train_time:17514ms step_avg:72.67ms +[2025-09-03 04:14:54] [Rank 0] step:261/10000 train_time:18962ms step_avg:72.65ms +[2025-09-03 04:14:54] [Rank 0] step:261/10000 train_time:18962ms step_avg:72.65ms +[2025-09-03 04:14:55] [Rank 0] step:281/10000 train_time:20409ms step_avg:72.63ms +[2025-09-03 04:14:55] [Rank 0] step:281/10000 train_time:20409ms step_avg:72.63ms +[2025-09-03 04:14:57] [Rank 0] step:301/10000 train_time:21860ms step_avg:72.63ms +[2025-09-03 04:14:57] [Rank 0] step:301/10000 train_time:21860ms step_avg:72.63ms +[2025-09-03 04:14:58] [Rank 0] step:321/10000 train_time:23307ms step_avg:72.61ms +[2025-09-03 04:14:58] [Rank 0] step:321/10000 train_time:23307ms step_avg:72.61ms +[2025-09-03 04:15:00] [Rank 0] step:341/10000 train_time:24756ms step_avg:72.60ms +[2025-09-03 04:15:00] [Rank 0] step:341/10000 train_time:24756ms step_avg:72.60ms +[2025-09-03 04:15:01] [Rank 0] step:361/10000 train_time:26204ms step_avg:72.59ms +[2025-09-03 04:15:01] [Rank 0] step:361/10000 train_time:26204ms step_avg:72.59ms +[2025-09-03 04:15:03] [Rank 0] step:381/10000 train_time:27652ms step_avg:72.58ms +[2025-09-03 04:15:03] [Rank 0] step:381/10000 train_time:27652ms step_avg:72.58ms +[2025-09-03 04:15:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:15:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:15:16] [Rank 0] PRINT: step:400/10000 val_loss:5.7084 svd_entropy: attn_qk:H=0.6072,top10E=0.51,eRank=70.5,q75/q25=15.99 attn_vo:H=0.6001,top10E=0.48,eRank=85.7,q75/q25=21.54 mlp_w1:H=0.6858,top10E=0.41,eRank=115.4,q75/q25=4.54 mlp_w2:H=0.9293,top10E=0.07,eRank=481.5,q75/q25=6.38 vo_prod:H=0.4523,top10E=0.74,eRank=25.1,q75/q25=170.23 train_time:29174ms step_avg:72.94ms +[2025-09-03 04:15:16] [Rank 0] PRINT: step:400/10000 val_loss:5.7084 svd_entropy: attn_qk:H=0.6072,top10E=0.51,eRank=70.5,q75/q25=15.99 attn_vo:H=0.6001,top10E=0.48,eRank=85.7,q75/q25=21.54 mlp_w1:H=0.6858,top10E=0.41,eRank=115.4,q75/q25=4.54 mlp_w2:H=0.9293,top10E=0.07,eRank=481.5,q75/q25=6.38 vo_prod:H=0.4523,top10E=0.74,eRank=25.1,q75/q25=170.23 train_time:29174ms step_avg:72.94ms +[2025-09-03 04:15:16] [Rank 0] step:401/10000 train_time:29188ms step_avg:72.79ms +[2025-09-03 04:15:16] [Rank 0] step:401/10000 train_time:29188ms step_avg:72.79ms +[2025-09-03 04:15:17] [Rank 0] step:421/10000 train_time:30568ms step_avg:72.61ms +[2025-09-03 04:15:17] [Rank 0] step:421/10000 train_time:30568ms step_avg:72.61ms +[2025-09-03 04:15:19] [Rank 0] step:441/10000 train_time:32015ms step_avg:72.60ms +[2025-09-03 04:15:19] [Rank 0] step:441/10000 train_time:32015ms step_avg:72.60ms +[2025-09-03 04:15:20] [Rank 0] step:461/10000 train_time:33460ms step_avg:72.58ms +[2025-09-03 04:15:20] [Rank 0] step:461/10000 train_time:33460ms step_avg:72.58ms +[2025-09-03 04:15:22] [Rank 0] step:481/10000 train_time:34907ms step_avg:72.57ms +[2025-09-03 04:15:22] [Rank 0] step:481/10000 train_time:34907ms step_avg:72.57ms +[2025-09-03 04:15:23] [Rank 0] step:501/10000 train_time:36354ms step_avg:72.56ms +[2025-09-03 04:15:23] [Rank 0] step:501/10000 train_time:36354ms step_avg:72.56ms +[2025-09-03 04:15:25] [Rank 0] step:521/10000 train_time:37803ms step_avg:72.56ms +[2025-09-03 04:15:25] [Rank 0] step:521/10000 train_time:37803ms step_avg:72.56ms +[2025-09-03 04:15:26] [Rank 0] step:541/10000 train_time:39250ms step_avg:72.55ms +[2025-09-03 04:15:26] [Rank 0] step:541/10000 train_time:39250ms step_avg:72.55ms +[2025-09-03 04:15:27] [Rank 0] step:561/10000 train_time:40698ms step_avg:72.55ms +[2025-09-03 04:15:27] [Rank 0] step:561/10000 train_time:40698ms step_avg:72.55ms +[2025-09-03 04:15:29] [Rank 0] step:581/10000 train_time:42146ms step_avg:72.54ms +[2025-09-03 04:15:29] [Rank 0] step:581/10000 train_time:42146ms step_avg:72.54ms +[2025-09-03 04:15:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:15:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:15:42] [Rank 0] PRINT: step:600/10000 val_loss:5.4201 svd_entropy: attn_qk:H=0.6405,top10E=0.45,eRank=83.3,q75/q25=20.99 attn_vo:H=0.6472,top10E=0.40,eRank=109.6,q75/q25=29.66 mlp_w1:H=0.7286,top10E=0.35,eRank=145.4,q75/q25=6.37 mlp_w2:H=0.9482,top10E=0.05,eRank=545.3,q75/q25=4.54 vo_prod:H=0.5103,top10E=0.62,eRank=35.2,q75/q25=359.09 train_time:43735ms step_avg:72.89ms +[2025-09-03 04:15:42] [Rank 0] PRINT: step:600/10000 val_loss:5.4201 svd_entropy: attn_qk:H=0.6405,top10E=0.45,eRank=83.3,q75/q25=20.99 attn_vo:H=0.6472,top10E=0.40,eRank=109.6,q75/q25=29.66 mlp_w1:H=0.7286,top10E=0.35,eRank=145.4,q75/q25=6.37 mlp_w2:H=0.9482,top10E=0.05,eRank=545.3,q75/q25=4.54 vo_prod:H=0.5103,top10E=0.62,eRank=35.2,q75/q25=359.09 train_time:43735ms step_avg:72.89ms +[2025-09-03 04:15:42] [Rank 0] step:601/10000 train_time:43750ms step_avg:72.79ms +[2025-09-03 04:15:42] [Rank 0] step:601/10000 train_time:43750ms step_avg:72.79ms +[2025-09-03 04:15:43] [Rank 0] step:621/10000 train_time:45131ms step_avg:72.67ms +[2025-09-03 04:15:43] [Rank 0] step:621/10000 train_time:45131ms step_avg:72.67ms +[2025-09-03 04:15:45] [Rank 0] step:641/10000 train_time:46578ms step_avg:72.66ms +[2025-09-03 04:15:45] [Rank 0] step:641/10000 train_time:46578ms step_avg:72.66ms +[2025-09-03 04:15:46] [Rank 0] step:661/10000 train_time:48028ms step_avg:72.66ms +[2025-09-03 04:15:46] [Rank 0] step:661/10000 train_time:48028ms step_avg:72.66ms +[2025-09-03 04:15:48] [Rank 0] step:681/10000 train_time:49476ms step_avg:72.65ms +[2025-09-03 04:15:48] [Rank 0] step:681/10000 train_time:49476ms step_avg:72.65ms +[2025-09-03 04:15:49] [Rank 0] step:701/10000 train_time:50927ms step_avg:72.65ms +[2025-09-03 04:15:49] [Rank 0] step:701/10000 train_time:50927ms step_avg:72.65ms +[2025-09-03 04:15:51] [Rank 0] step:721/10000 train_time:52377ms step_avg:72.64ms +[2025-09-03 04:15:51] [Rank 0] step:721/10000 train_time:52377ms step_avg:72.64ms +[2025-09-03 04:15:52] [Rank 0] step:741/10000 train_time:53827ms step_avg:72.64ms +[2025-09-03 04:15:52] [Rank 0] step:741/10000 train_time:53827ms step_avg:72.64ms +[2025-09-03 04:15:54] [Rank 0] step:761/10000 train_time:55289ms step_avg:72.65ms +[2025-09-03 04:15:54] [Rank 0] step:761/10000 train_time:55289ms step_avg:72.65ms +[2025-09-03 04:15:55] [Rank 0] step:781/10000 train_time:56753ms step_avg:72.67ms +[2025-09-03 04:15:55] [Rank 0] step:781/10000 train_time:56753ms step_avg:72.67ms +[2025-09-03 04:15:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:15:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:16:08] [Rank 0] PRINT: step:800/10000 val_loss:5.1945 svd_entropy: attn_qk:H=0.6630,top10E=0.41,eRank=93.5,q75/q25=27.96 attn_vo:H=0.6792,top10E=0.35,eRank=130.0,q75/q25=38.61 mlp_w1:H=0.7604,top10E=0.31,eRank=172.8,q75/q25=7.29 mlp_w2:H=0.9545,top10E=0.05,eRank=568.3,q75/q25=4.03 vo_prod:H=0.5491,top10E=0.55,eRank=44.7,q75/q25=785.99 train_time:58295ms step_avg:72.87ms +[2025-09-03 04:16:08] [Rank 0] PRINT: step:800/10000 val_loss:5.1945 svd_entropy: attn_qk:H=0.6630,top10E=0.41,eRank=93.5,q75/q25=27.96 attn_vo:H=0.6792,top10E=0.35,eRank=130.0,q75/q25=38.61 mlp_w1:H=0.7604,top10E=0.31,eRank=172.8,q75/q25=7.29 mlp_w2:H=0.9545,top10E=0.05,eRank=568.3,q75/q25=4.03 vo_prod:H=0.5491,top10E=0.55,eRank=44.7,q75/q25=785.99 train_time:58295ms step_avg:72.87ms +[2025-09-03 04:16:08] [Rank 0] step:801/10000 train_time:58309ms step_avg:72.79ms +[2025-09-03 04:16:08] [Rank 0] step:801/10000 train_time:58309ms step_avg:72.79ms +[2025-09-03 04:16:10] [Rank 0] step:821/10000 train_time:59697ms step_avg:72.71ms +[2025-09-03 04:16:10] [Rank 0] step:821/10000 train_time:59697ms step_avg:72.71ms +[2025-09-03 04:16:11] [Rank 0] step:841/10000 train_time:61158ms step_avg:72.72ms +[2025-09-03 04:16:11] [Rank 0] step:841/10000 train_time:61158ms step_avg:72.72ms +[2025-09-03 04:16:13] [Rank 0] step:861/10000 train_time:62620ms step_avg:72.73ms +[2025-09-03 04:16:13] [Rank 0] step:861/10000 train_time:62620ms step_avg:72.73ms +[2025-09-03 04:16:14] [Rank 0] step:881/10000 train_time:64081ms step_avg:72.74ms +[2025-09-03 04:16:14] [Rank 0] step:881/10000 train_time:64081ms step_avg:72.74ms +[2025-09-03 04:16:15] [Rank 0] step:901/10000 train_time:65543ms step_avg:72.75ms +[2025-09-03 04:16:15] [Rank 0] step:901/10000 train_time:65543ms step_avg:72.75ms +[2025-09-03 04:16:17] [Rank 0] step:921/10000 train_time:67006ms step_avg:72.75ms +[2025-09-03 04:16:17] [Rank 0] step:921/10000 train_time:67006ms step_avg:72.75ms +[2025-09-03 04:16:18] [Rank 0] step:941/10000 train_time:68469ms step_avg:72.76ms +[2025-09-03 04:16:18] [Rank 0] step:941/10000 train_time:68469ms step_avg:72.76ms +[2025-09-03 04:16:20] [Rank 0] step:961/10000 train_time:69932ms step_avg:72.77ms +[2025-09-03 04:16:20] [Rank 0] step:961/10000 train_time:69932ms step_avg:72.77ms +[2025-09-03 04:16:21] [Rank 0] step:981/10000 train_time:71394ms step_avg:72.78ms +[2025-09-03 04:16:21] [Rank 0] step:981/10000 train_time:71394ms step_avg:72.78ms +[2025-09-03 04:16:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:16:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:16:34] [Rank 0] PRINT: step:1000/10000 val_loss:5.0227 svd_entropy: attn_qk:H=0.6806,top10E=0.38,eRank=102.5,q75/q25=35.42 attn_vo:H=0.7027,top10E=0.32,eRank=147.9,q75/q25=45.84 mlp_w1:H=0.7848,top10E=0.28,eRank=197.9,q75/q25=7.52 mlp_w2:H=0.9594,top10E=0.05,eRank=587.0,q75/q25=3.65 vo_prod:H=0.5785,top10E=0.49,eRank=53.9,q75/q25=1569.78 train_time:72931ms step_avg:72.93ms +[2025-09-03 04:16:34] [Rank 0] PRINT: step:1000/10000 val_loss:5.0227 svd_entropy: attn_qk:H=0.6806,top10E=0.38,eRank=102.5,q75/q25=35.42 attn_vo:H=0.7027,top10E=0.32,eRank=147.9,q75/q25=45.84 mlp_w1:H=0.7848,top10E=0.28,eRank=197.9,q75/q25=7.52 mlp_w2:H=0.9594,top10E=0.05,eRank=587.0,q75/q25=3.65 vo_prod:H=0.5785,top10E=0.49,eRank=53.9,q75/q25=1569.78 train_time:72931ms step_avg:72.93ms +[2025-09-03 04:16:35] [Rank 0] step:1001/10000 train_time:72946ms step_avg:72.87ms +[2025-09-03 04:16:35] [Rank 0] step:1001/10000 train_time:72946ms step_avg:72.87ms +[2025-09-03 04:16:36] [Rank 0] step:1021/10000 train_time:74346ms step_avg:72.82ms +[2025-09-03 04:16:36] [Rank 0] step:1021/10000 train_time:74346ms step_avg:72.82ms +[2025-09-03 04:16:37] [Rank 0] step:1041/10000 train_time:75806ms step_avg:72.82ms +[2025-09-03 04:16:37] [Rank 0] step:1041/10000 train_time:75806ms step_avg:72.82ms +[2025-09-03 04:16:39] [Rank 0] step:1061/10000 train_time:77267ms step_avg:72.82ms +[2025-09-03 04:16:39] [Rank 0] step:1061/10000 train_time:77267ms step_avg:72.82ms +[2025-09-03 04:16:40] [Rank 0] step:1081/10000 train_time:78728ms step_avg:72.83ms +[2025-09-03 04:16:40] [Rank 0] step:1081/10000 train_time:78728ms step_avg:72.83ms +[2025-09-03 04:16:42] [Rank 0] step:1101/10000 train_time:80188ms step_avg:72.83ms +[2025-09-03 04:16:42] [Rank 0] step:1101/10000 train_time:80188ms step_avg:72.83ms +[2025-09-03 04:16:43] [Rank 0] step:1121/10000 train_time:81650ms step_avg:72.84ms +[2025-09-03 04:16:43] [Rank 0] step:1121/10000 train_time:81650ms step_avg:72.84ms +[2025-09-03 04:16:45] [Rank 0] step:1141/10000 train_time:83111ms step_avg:72.84ms +[2025-09-03 04:16:45] [Rank 0] step:1141/10000 train_time:83111ms step_avg:72.84ms +[2025-09-03 04:16:46] [Rank 0] step:1161/10000 train_time:84573ms step_avg:72.84ms +[2025-09-03 04:16:46] [Rank 0] step:1161/10000 train_time:84573ms step_avg:72.84ms +[2025-09-03 04:16:48] [Rank 0] step:1181/10000 train_time:86035ms step_avg:72.85ms +[2025-09-03 04:16:48] [Rank 0] step:1181/10000 train_time:86035ms step_avg:72.85ms +[2025-09-03 04:16:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:16:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:17:01] [Rank 0] PRINT: step:1200/10000 val_loss:4.8485 svd_entropy: attn_qk:H=0.6940,top10E=0.35,eRank=110.4,q75/q25=42.70 attn_vo:H=0.7221,top10E=0.30,eRank=165.2,q75/q25=50.99 mlp_w1:H=0.8047,top10E=0.26,eRank=222.0,q75/q25=7.47 mlp_w2:H=0.9627,top10E=0.04,eRank=599.8,q75/q25=3.42 vo_prod:H=0.6011,top10E=0.44,eRank=62.7,q75/q25=2664.79 train_time:87574ms step_avg:72.98ms +[2025-09-03 04:17:01] [Rank 0] PRINT: step:1200/10000 val_loss:4.8485 svd_entropy: attn_qk:H=0.6940,top10E=0.35,eRank=110.4,q75/q25=42.70 attn_vo:H=0.7221,top10E=0.30,eRank=165.2,q75/q25=50.99 mlp_w1:H=0.8047,top10E=0.26,eRank=222.0,q75/q25=7.47 mlp_w2:H=0.9627,top10E=0.04,eRank=599.8,q75/q25=3.42 vo_prod:H=0.6011,top10E=0.44,eRank=62.7,q75/q25=2664.79 train_time:87574ms step_avg:72.98ms +[2025-09-03 04:17:01] [Rank 0] step:1201/10000 train_time:87588ms step_avg:72.93ms +[2025-09-03 04:17:01] [Rank 0] step:1201/10000 train_time:87588ms step_avg:72.93ms +[2025-09-03 04:17:02] [Rank 0] step:1221/10000 train_time:88978ms step_avg:72.87ms +[2025-09-03 04:17:02] [Rank 0] step:1221/10000 train_time:88978ms step_avg:72.87ms +[2025-09-03 04:17:04] [Rank 0] step:1241/10000 train_time:90440ms step_avg:72.88ms +[2025-09-03 04:17:04] [Rank 0] step:1241/10000 train_time:90440ms step_avg:72.88ms +[2025-09-03 04:17:05] [Rank 0] step:1261/10000 train_time:91900ms step_avg:72.88ms +[2025-09-03 04:17:05] [Rank 0] step:1261/10000 train_time:91900ms step_avg:72.88ms +[2025-09-03 04:17:06] [Rank 0] step:1281/10000 train_time:93361ms step_avg:72.88ms +[2025-09-03 04:17:06] [Rank 0] step:1281/10000 train_time:93361ms step_avg:72.88ms +[2025-09-03 04:17:08] [Rank 0] step:1301/10000 train_time:94823ms step_avg:72.88ms +[2025-09-03 04:17:08] [Rank 0] step:1301/10000 train_time:94823ms step_avg:72.88ms +[2025-09-03 04:17:09] [Rank 0] step:1321/10000 train_time:96285ms step_avg:72.89ms +[2025-09-03 04:17:09] [Rank 0] step:1321/10000 train_time:96285ms step_avg:72.89ms +[2025-09-03 04:17:11] [Rank 0] step:1341/10000 train_time:97755ms step_avg:72.90ms +[2025-09-03 04:17:11] [Rank 0] step:1341/10000 train_time:97755ms step_avg:72.90ms +[2025-09-03 04:17:12] [Rank 0] step:1361/10000 train_time:99217ms step_avg:72.90ms +[2025-09-03 04:17:12] [Rank 0] step:1361/10000 train_time:99217ms step_avg:72.90ms +[2025-09-03 04:17:14] [Rank 0] step:1381/10000 train_time:100679ms step_avg:72.90ms +[2025-09-03 04:17:14] [Rank 0] step:1381/10000 train_time:100679ms step_avg:72.90ms +[2025-09-03 04:17:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:17:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:17:27] [Rank 0] PRINT: step:1400/10000 val_loss:4.7233 svd_entropy: attn_qk:H=0.7041,top10E=0.34,eRank=116.9,q75/q25=48.23 attn_vo:H=0.7369,top10E=0.28,eRank=179.9,q75/q25=54.28 mlp_w1:H=0.8206,top10E=0.24,eRank=243.7,q75/q25=7.26 mlp_w2:H=0.9649,top10E=0.04,eRank=608.6,q75/q25=3.28 vo_prod:H=0.6173,top10E=0.42,eRank=69.9,q75/q25=3573.98 train_time:102217ms step_avg:73.01ms +[2025-09-03 04:17:27] [Rank 0] PRINT: step:1400/10000 val_loss:4.7233 svd_entropy: attn_qk:H=0.7041,top10E=0.34,eRank=116.9,q75/q25=48.23 attn_vo:H=0.7369,top10E=0.28,eRank=179.9,q75/q25=54.28 mlp_w1:H=0.8206,top10E=0.24,eRank=243.7,q75/q25=7.26 mlp_w2:H=0.9649,top10E=0.04,eRank=608.6,q75/q25=3.28 vo_prod:H=0.6173,top10E=0.42,eRank=69.9,q75/q25=3573.98 train_time:102217ms step_avg:73.01ms +[2025-09-03 04:17:27] [Rank 0] step:1401/10000 train_time:102230ms step_avg:72.97ms +[2025-09-03 04:17:27] [Rank 0] step:1401/10000 train_time:102230ms step_avg:72.97ms +[2025-09-03 04:17:28] [Rank 0] step:1421/10000 train_time:103629ms step_avg:72.93ms +[2025-09-03 04:17:28] [Rank 0] step:1421/10000 train_time:103629ms step_avg:72.93ms +[2025-09-03 04:17:30] [Rank 0] step:1441/10000 train_time:105091ms step_avg:72.93ms +[2025-09-03 04:17:30] [Rank 0] step:1441/10000 train_time:105091ms step_avg:72.93ms +[2025-09-03 04:17:31] [Rank 0] step:1461/10000 train_time:106552ms step_avg:72.93ms +[2025-09-03 04:17:31] [Rank 0] step:1461/10000 train_time:106552ms step_avg:72.93ms +[2025-09-03 04:17:33] [Rank 0] step:1481/10000 train_time:108016ms step_avg:72.93ms +[2025-09-03 04:17:33] [Rank 0] step:1481/10000 train_time:108016ms step_avg:72.93ms +[2025-09-03 04:17:34] [Rank 0] step:1501/10000 train_time:109488ms step_avg:72.94ms +[2025-09-03 04:17:34] [Rank 0] step:1501/10000 train_time:109488ms step_avg:72.94ms +[2025-09-03 04:17:36] [Rank 0] step:1521/10000 train_time:110970ms step_avg:72.96ms +[2025-09-03 04:17:36] [Rank 0] step:1521/10000 train_time:110970ms step_avg:72.96ms +[2025-09-03 04:17:37] [Rank 0] step:1541/10000 train_time:112446ms step_avg:72.97ms +[2025-09-03 04:17:37] [Rank 0] step:1541/10000 train_time:112446ms step_avg:72.97ms +[2025-09-03 04:17:39] [Rank 0] step:1561/10000 train_time:113920ms step_avg:72.98ms +[2025-09-03 04:17:39] [Rank 0] step:1561/10000 train_time:113920ms step_avg:72.98ms +[2025-09-03 04:17:40] [Rank 0] step:1581/10000 train_time:115403ms step_avg:72.99ms +[2025-09-03 04:17:40] [Rank 0] step:1581/10000 train_time:115403ms step_avg:72.99ms +[2025-09-03 04:17:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:17:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:17:53] [Rank 0] PRINT: step:1600/10000 val_loss:4.5986 svd_entropy: attn_qk:H=0.7119,top10E=0.33,eRank=122.1,q75/q25=53.14 attn_vo:H=0.7613,top10E=0.24,eRank=197.7,q75/q25=56.70 mlp_w1:H=0.8332,top10E=0.22,eRank=262.8,q75/q25=7.01 mlp_w2:H=0.9665,top10E=0.04,eRank=615.1,q75/q25=3.17 vo_prod:H=0.6368,top10E=0.38,eRank=77.4,q75/q25=4000.98 train_time:116954ms step_avg:73.10ms +[2025-09-03 04:17:53] [Rank 0] PRINT: step:1600/10000 val_loss:4.5986 svd_entropy: attn_qk:H=0.7119,top10E=0.33,eRank=122.1,q75/q25=53.14 attn_vo:H=0.7613,top10E=0.24,eRank=197.7,q75/q25=56.70 mlp_w1:H=0.8332,top10E=0.22,eRank=262.8,q75/q25=7.01 mlp_w2:H=0.9665,top10E=0.04,eRank=615.1,q75/q25=3.17 vo_prod:H=0.6368,top10E=0.38,eRank=77.4,q75/q25=4000.98 train_time:116954ms step_avg:73.10ms +[2025-09-03 04:17:53] [Rank 0] step:1601/10000 train_time:116969ms step_avg:73.06ms +[2025-09-03 04:17:53] [Rank 0] step:1601/10000 train_time:116969ms step_avg:73.06ms +[2025-09-03 04:17:55] [Rank 0] step:1621/10000 train_time:118374ms step_avg:73.03ms +[2025-09-03 04:17:55] [Rank 0] step:1621/10000 train_time:118374ms step_avg:73.03ms +[2025-09-03 04:17:56] [Rank 0] step:1641/10000 train_time:119847ms step_avg:73.03ms +[2025-09-03 04:17:56] [Rank 0] step:1641/10000 train_time:119847ms step_avg:73.03ms +[2025-09-03 04:17:58] [Rank 0] step:1661/10000 train_time:121319ms step_avg:73.04ms +[2025-09-03 04:17:58] [Rank 0] step:1661/10000 train_time:121319ms step_avg:73.04ms +[2025-09-03 04:17:59] [Rank 0] step:1681/10000 train_time:122793ms step_avg:73.05ms +[2025-09-03 04:17:59] [Rank 0] step:1681/10000 train_time:122793ms step_avg:73.05ms +[2025-09-03 04:18:01] [Rank 0] step:1701/10000 train_time:124265ms step_avg:73.05ms +[2025-09-03 04:18:01] [Rank 0] step:1701/10000 train_time:124265ms step_avg:73.05ms +[2025-09-03 04:18:02] [Rank 0] step:1721/10000 train_time:125739ms step_avg:73.06ms +[2025-09-03 04:18:02] [Rank 0] step:1721/10000 train_time:125739ms step_avg:73.06ms +[2025-09-03 04:18:04] [Rank 0] step:1741/10000 train_time:127213ms step_avg:73.07ms +[2025-09-03 04:18:04] [Rank 0] step:1741/10000 train_time:127213ms step_avg:73.07ms +[2025-09-03 04:18:05] [Rank 0] step:1761/10000 train_time:128686ms step_avg:73.08ms +[2025-09-03 04:18:05] [Rank 0] step:1761/10000 train_time:128686ms step_avg:73.08ms +[2025-09-03 04:18:07] [Rank 0] step:1781/10000 train_time:130160ms step_avg:73.08ms +[2025-09-03 04:18:07] [Rank 0] step:1781/10000 train_time:130160ms step_avg:73.08ms +[2025-09-03 04:18:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:18:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:18:20] [Rank 0] PRINT: step:1800/10000 val_loss:4.4991 svd_entropy: attn_qk:H=0.7184,top10E=0.32,eRank=126.6,q75/q25=56.70 attn_vo:H=0.7728,top10E=0.22,eRank=209.7,q75/q25=58.00 mlp_w1:H=0.8434,top10E=0.21,eRank=279.5,q75/q25=6.75 mlp_w2:H=0.9677,top10E=0.04,eRank=619.7,q75/q25=3.10 vo_prod:H=0.6535,top10E=0.36,eRank=84.3,q75/q25=4093.30 train_time:131708ms step_avg:73.17ms +[2025-09-03 04:18:20] [Rank 0] PRINT: step:1800/10000 val_loss:4.4991 svd_entropy: attn_qk:H=0.7184,top10E=0.32,eRank=126.6,q75/q25=56.70 attn_vo:H=0.7728,top10E=0.22,eRank=209.7,q75/q25=58.00 mlp_w1:H=0.8434,top10E=0.21,eRank=279.5,q75/q25=6.75 mlp_w2:H=0.9677,top10E=0.04,eRank=619.7,q75/q25=3.10 vo_prod:H=0.6535,top10E=0.36,eRank=84.3,q75/q25=4093.30 train_time:131708ms step_avg:73.17ms +[2025-09-03 04:18:20] [Rank 0] step:1801/10000 train_time:131722ms step_avg:73.14ms +[2025-09-03 04:18:20] [Rank 0] step:1801/10000 train_time:131722ms step_avg:73.14ms +[2025-09-03 04:18:21] [Rank 0] step:1821/10000 train_time:133123ms step_avg:73.10ms +[2025-09-03 04:18:21] [Rank 0] step:1821/10000 train_time:133123ms step_avg:73.10ms +[2025-09-03 04:18:23] [Rank 0] step:1841/10000 train_time:134593ms step_avg:73.11ms +[2025-09-03 04:18:23] [Rank 0] step:1841/10000 train_time:134593ms step_avg:73.11ms +[2025-09-03 04:18:24] [Rank 0] step:1861/10000 train_time:136066ms step_avg:73.11ms +[2025-09-03 04:18:24] [Rank 0] step:1861/10000 train_time:136066ms step_avg:73.11ms +[2025-09-03 04:18:26] [Rank 0] step:1881/10000 train_time:137539ms step_avg:73.12ms +[2025-09-03 04:18:26] [Rank 0] step:1881/10000 train_time:137539ms step_avg:73.12ms +[2025-09-03 04:18:27] [Rank 0] step:1901/10000 train_time:139011ms step_avg:73.13ms +[2025-09-03 04:18:27] [Rank 0] step:1901/10000 train_time:139011ms step_avg:73.13ms +[2025-09-03 04:18:29] [Rank 0] step:1921/10000 train_time:140485ms step_avg:73.13ms +[2025-09-03 04:18:29] [Rank 0] step:1921/10000 train_time:140485ms step_avg:73.13ms +[2025-09-03 04:18:30] [Rank 0] step:1941/10000 train_time:141959ms step_avg:73.14ms +[2025-09-03 04:18:30] [Rank 0] step:1941/10000 train_time:141959ms step_avg:73.14ms +[2025-09-03 04:18:32] [Rank 0] step:1961/10000 train_time:143433ms step_avg:73.14ms +[2025-09-03 04:18:32] [Rank 0] step:1961/10000 train_time:143433ms step_avg:73.14ms +[2025-09-03 04:18:33] [Rank 0] step:1981/10000 train_time:144907ms step_avg:73.15ms +[2025-09-03 04:18:33] [Rank 0] step:1981/10000 train_time:144907ms step_avg:73.15ms +[2025-09-03 04:18:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:18:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:18:46] [Rank 0] PRINT: step:2000/10000 val_loss:4.4422 svd_entropy: attn_qk:H=0.7242,top10E=0.31,eRank=130.8,q75/q25=59.37 attn_vo:H=0.7810,top10E=0.21,eRank=218.9,q75/q25=58.25 mlp_w1:H=0.8516,top10E=0.20,eRank=293.9,q75/q25=6.53 mlp_w2:H=0.9685,top10E=0.04,eRank=623.0,q75/q25=3.05 vo_prod:H=0.6619,top10E=0.34,eRank=89.7,q75/q25=3986.72 train_time:146457ms step_avg:73.23ms +[2025-09-03 04:18:46] [Rank 0] PRINT: step:2000/10000 val_loss:4.4422 svd_entropy: attn_qk:H=0.7242,top10E=0.31,eRank=130.8,q75/q25=59.37 attn_vo:H=0.7810,top10E=0.21,eRank=218.9,q75/q25=58.25 mlp_w1:H=0.8516,top10E=0.20,eRank=293.9,q75/q25=6.53 mlp_w2:H=0.9685,top10E=0.04,eRank=623.0,q75/q25=3.05 vo_prod:H=0.6619,top10E=0.34,eRank=89.7,q75/q25=3986.72 train_time:146457ms step_avg:73.23ms +[2025-09-03 04:18:46] [Rank 0] step:2001/10000 train_time:146471ms step_avg:73.20ms +[2025-09-03 04:18:46] [Rank 0] step:2001/10000 train_time:146471ms step_avg:73.20ms +[2025-09-03 04:18:48] [Rank 0] step:2021/10000 train_time:147890ms step_avg:73.18ms +[2025-09-03 04:18:48] [Rank 0] step:2021/10000 train_time:147890ms step_avg:73.18ms +[2025-09-03 04:18:50] [Rank 0] step:2041/10000 train_time:149750ms step_avg:73.37ms +[2025-09-03 04:18:50] [Rank 0] step:2041/10000 train_time:149750ms step_avg:73.37ms +[2025-09-03 04:18:51] [Rank 0] step:2061/10000 train_time:151222ms step_avg:73.37ms +[2025-09-03 04:18:51] [Rank 0] step:2061/10000 train_time:151222ms step_avg:73.37ms +[2025-09-03 04:18:53] [Rank 0] step:2081/10000 train_time:152694ms step_avg:73.38ms +[2025-09-03 04:18:53] [Rank 0] step:2081/10000 train_time:152694ms step_avg:73.38ms +[2025-09-03 04:18:54] [Rank 0] step:2101/10000 train_time:154168ms step_avg:73.38ms +[2025-09-03 04:18:54] [Rank 0] step:2101/10000 train_time:154168ms step_avg:73.38ms +[2025-09-03 04:18:56] [Rank 0] step:2121/10000 train_time:155641ms step_avg:73.38ms +[2025-09-03 04:18:56] [Rank 0] step:2121/10000 train_time:155641ms step_avg:73.38ms +[2025-09-03 04:18:57] [Rank 0] step:2141/10000 train_time:157114ms step_avg:73.38ms +[2025-09-03 04:18:57] [Rank 0] step:2141/10000 train_time:157114ms step_avg:73.38ms +[2025-09-03 04:18:59] [Rank 0] step:2161/10000 train_time:158590ms step_avg:73.39ms +[2025-09-03 04:18:59] [Rank 0] step:2161/10000 train_time:158590ms step_avg:73.39ms +[2025-09-03 04:19:00] [Rank 0] step:2181/10000 train_time:160063ms step_avg:73.39ms +[2025-09-03 04:19:00] [Rank 0] step:2181/10000 train_time:160063ms step_avg:73.39ms +[2025-09-03 04:19:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:19:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:19:13] [Rank 0] PRINT: step:2200/10000 val_loss:4.3730 svd_entropy: attn_qk:H=0.7293,top10E=0.31,eRank=134.7,q75/q25=61.05 attn_vo:H=0.7880,top10E=0.20,eRank=227.0,q75/q25=57.66 mlp_w1:H=0.8582,top10E=0.19,eRank=306.0,q75/q25=6.28 mlp_w2:H=0.9691,top10E=0.04,eRank=625.7,q75/q25=3.01 vo_prod:H=0.6705,top10E=0.33,eRank=95.1,q75/q25=3754.18 train_time:161613ms step_avg:73.46ms +[2025-09-03 04:19:13] [Rank 0] PRINT: step:2200/10000 val_loss:4.3730 svd_entropy: attn_qk:H=0.7293,top10E=0.31,eRank=134.7,q75/q25=61.05 attn_vo:H=0.7880,top10E=0.20,eRank=227.0,q75/q25=57.66 mlp_w1:H=0.8582,top10E=0.19,eRank=306.0,q75/q25=6.28 mlp_w2:H=0.9691,top10E=0.04,eRank=625.7,q75/q25=3.01 vo_prod:H=0.6705,top10E=0.33,eRank=95.1,q75/q25=3754.18 train_time:161613ms step_avg:73.46ms +[2025-09-03 04:19:13] [Rank 0] step:2201/10000 train_time:161628ms step_avg:73.43ms +[2025-09-03 04:19:13] [Rank 0] step:2201/10000 train_time:161628ms step_avg:73.43ms +[2025-09-03 04:19:15] [Rank 0] step:2221/10000 train_time:163045ms step_avg:73.41ms +[2025-09-03 04:19:15] [Rank 0] step:2221/10000 train_time:163045ms step_avg:73.41ms +[2025-09-03 04:19:16] [Rank 0] step:2241/10000 train_time:164553ms step_avg:73.43ms +[2025-09-03 04:19:16] [Rank 0] step:2241/10000 train_time:164553ms step_avg:73.43ms +[2025-09-03 04:19:18] [Rank 0] step:2261/10000 train_time:166071ms step_avg:73.45ms +[2025-09-03 04:19:18] [Rank 0] step:2261/10000 train_time:166071ms step_avg:73.45ms +[2025-09-03 04:19:19] [Rank 0] step:2281/10000 train_time:167597ms step_avg:73.48ms +[2025-09-03 04:19:19] [Rank 0] step:2281/10000 train_time:167597ms step_avg:73.48ms +[2025-09-03 04:19:21] [Rank 0] step:2301/10000 train_time:169114ms step_avg:73.50ms +[2025-09-03 04:19:21] [Rank 0] step:2301/10000 train_time:169114ms step_avg:73.50ms +[2025-09-03 04:19:22] [Rank 0] step:2321/10000 train_time:170632ms step_avg:73.52ms +[2025-09-03 04:19:22] [Rank 0] step:2321/10000 train_time:170632ms step_avg:73.52ms +[2025-09-03 04:19:24] [Rank 0] step:2341/10000 train_time:172149ms step_avg:73.54ms +[2025-09-03 04:19:24] [Rank 0] step:2341/10000 train_time:172149ms step_avg:73.54ms +[2025-09-03 04:19:25] [Rank 0] step:2361/10000 train_time:173668ms step_avg:73.56ms +[2025-09-03 04:19:25] [Rank 0] step:2361/10000 train_time:173668ms step_avg:73.56ms +[2025-09-03 04:19:27] [Rank 0] step:2381/10000 train_time:175188ms step_avg:73.58ms +[2025-09-03 04:19:27] [Rank 0] step:2381/10000 train_time:175188ms step_avg:73.58ms +[2025-09-03 04:19:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:19:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:19:40] [Rank 0] PRINT: step:2400/10000 val_loss:4.2953 svd_entropy: attn_qk:H=0.7328,top10E=0.30,eRank=137.3,q75/q25=61.89 attn_vo:H=0.7941,top10E=0.20,eRank=234.2,q75/q25=57.31 mlp_w1:H=0.8640,top10E=0.19,eRank=317.4,q75/q25=6.07 mlp_w2:H=0.9696,top10E=0.04,eRank=627.5,q75/q25=2.99 vo_prod:H=0.6783,top10E=0.32,eRank=100.0,q75/q25=3496.28 train_time:176785ms step_avg:73.66ms +[2025-09-03 04:19:40] [Rank 0] PRINT: step:2400/10000 val_loss:4.2953 svd_entropy: attn_qk:H=0.7328,top10E=0.30,eRank=137.3,q75/q25=61.89 attn_vo:H=0.7941,top10E=0.20,eRank=234.2,q75/q25=57.31 mlp_w1:H=0.8640,top10E=0.19,eRank=317.4,q75/q25=6.07 mlp_w2:H=0.9696,top10E=0.04,eRank=627.5,q75/q25=2.99 vo_prod:H=0.6783,top10E=0.32,eRank=100.0,q75/q25=3496.28 train_time:176785ms step_avg:73.66ms +[2025-09-03 04:19:40] [Rank 0] step:2401/10000 train_time:176800ms step_avg:73.64ms +[2025-09-03 04:19:40] [Rank 0] step:2401/10000 train_time:176800ms step_avg:73.64ms +[2025-09-03 04:19:42] [Rank 0] step:2421/10000 train_time:178251ms step_avg:73.63ms +[2025-09-03 04:19:42] [Rank 0] step:2421/10000 train_time:178251ms step_avg:73.63ms +[2025-09-03 04:19:43] [Rank 0] step:2441/10000 train_time:179806ms step_avg:73.66ms +[2025-09-03 04:19:43] [Rank 0] step:2441/10000 train_time:179806ms step_avg:73.66ms +[2025-09-03 04:19:45] [Rank 0] step:2461/10000 train_time:181325ms step_avg:73.68ms +[2025-09-03 04:19:45] [Rank 0] step:2461/10000 train_time:181325ms step_avg:73.68ms +[2025-09-03 04:19:46] [Rank 0] step:2481/10000 train_time:182842ms step_avg:73.70ms +[2025-09-03 04:19:46] [Rank 0] step:2481/10000 train_time:182842ms step_avg:73.70ms +[2025-09-03 04:19:48] [Rank 0] step:2501/10000 train_time:184360ms step_avg:73.71ms +[2025-09-03 04:19:48] [Rank 0] step:2501/10000 train_time:184360ms step_avg:73.71ms +[2025-09-03 04:19:49] [Rank 0] step:2521/10000 train_time:185880ms step_avg:73.73ms +[2025-09-03 04:19:49] [Rank 0] step:2521/10000 train_time:185880ms step_avg:73.73ms +[2025-09-03 04:19:51] [Rank 0] step:2541/10000 train_time:187398ms step_avg:73.75ms +[2025-09-03 04:19:51] [Rank 0] step:2541/10000 train_time:187398ms step_avg:73.75ms +[2025-09-03 04:19:52] [Rank 0] step:2561/10000 train_time:188920ms step_avg:73.77ms +[2025-09-03 04:19:52] [Rank 0] step:2561/10000 train_time:188920ms step_avg:73.77ms +[2025-09-03 04:19:54] [Rank 0] step:2581/10000 train_time:190441ms step_avg:73.79ms +[2025-09-03 04:19:54] [Rank 0] step:2581/10000 train_time:190441ms step_avg:73.79ms +[2025-09-03 04:19:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:19:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:20:07] [Rank 0] PRINT: step:2600/10000 val_loss:4.2486 svd_entropy: attn_qk:H=0.7365,top10E=0.30,eRank=140.4,q75/q25=62.64 attn_vo:H=0.7996,top10E=0.19,eRank=240.6,q75/q25=56.44 mlp_w1:H=0.8690,top10E=0.18,eRank=327.4,q75/q25=5.88 mlp_w2:H=0.9699,top10E=0.04,eRank=629.0,q75/q25=2.97 vo_prod:H=0.6852,top10E=0.31,eRank=104.4,q75/q25=3302.62 train_time:192037ms step_avg:73.86ms +[2025-09-03 04:20:07] [Rank 0] PRINT: step:2600/10000 val_loss:4.2486 svd_entropy: attn_qk:H=0.7365,top10E=0.30,eRank=140.4,q75/q25=62.64 attn_vo:H=0.7996,top10E=0.19,eRank=240.6,q75/q25=56.44 mlp_w1:H=0.8690,top10E=0.18,eRank=327.4,q75/q25=5.88 mlp_w2:H=0.9699,top10E=0.04,eRank=629.0,q75/q25=2.97 vo_prod:H=0.6852,top10E=0.31,eRank=104.4,q75/q25=3302.62 train_time:192037ms step_avg:73.86ms +[2025-09-03 04:20:07] [Rank 0] step:2601/10000 train_time:192051ms step_avg:73.84ms +[2025-09-03 04:20:07] [Rank 0] step:2601/10000 train_time:192051ms step_avg:73.84ms +[2025-09-03 04:20:09] [Rank 0] step:2621/10000 train_time:193509ms step_avg:73.83ms +[2025-09-03 04:20:09] [Rank 0] step:2621/10000 train_time:193509ms step_avg:73.83ms +[2025-09-03 04:20:10] [Rank 0] step:2641/10000 train_time:195025ms step_avg:73.85ms +[2025-09-03 04:20:10] [Rank 0] step:2641/10000 train_time:195025ms step_avg:73.85ms +[2025-09-03 04:20:12] [Rank 0] step:2661/10000 train_time:196543ms step_avg:73.86ms +[2025-09-03 04:20:12] [Rank 0] step:2661/10000 train_time:196543ms step_avg:73.86ms +[2025-09-03 04:20:13] [Rank 0] step:2681/10000 train_time:198060ms step_avg:73.88ms +[2025-09-03 04:20:13] [Rank 0] step:2681/10000 train_time:198060ms step_avg:73.88ms +[2025-09-03 04:20:15] [Rank 0] step:2701/10000 train_time:199577ms step_avg:73.89ms +[2025-09-03 04:20:15] [Rank 0] step:2701/10000 train_time:199577ms step_avg:73.89ms +[2025-09-03 04:20:16] [Rank 0] step:2721/10000 train_time:201094ms step_avg:73.90ms +[2025-09-03 04:20:16] [Rank 0] step:2721/10000 train_time:201094ms step_avg:73.90ms +[2025-09-03 04:20:18] [Rank 0] step:2741/10000 train_time:202611ms step_avg:73.92ms +[2025-09-03 04:20:18] [Rank 0] step:2741/10000 train_time:202611ms step_avg:73.92ms +[2025-09-03 04:20:19] [Rank 0] step:2761/10000 train_time:204130ms step_avg:73.93ms +[2025-09-03 04:20:19] [Rank 0] step:2761/10000 train_time:204130ms step_avg:73.93ms +[2025-09-03 04:20:21] [Rank 0] step:2781/10000 train_time:205659ms step_avg:73.95ms +[2025-09-03 04:20:21] [Rank 0] step:2781/10000 train_time:205659ms step_avg:73.95ms +[2025-09-03 04:20:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:20:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:20:34] [Rank 0] PRINT: step:2800/10000 val_loss:4.2123 svd_entropy: attn_qk:H=0.7401,top10E=0.29,eRank=143.4,q75/q25=63.22 attn_vo:H=0.8045,top10E=0.18,eRank=246.5,q75/q25=55.46 mlp_w1:H=0.8735,top10E=0.18,eRank=336.6,q75/q25=5.71 mlp_w2:H=0.9702,top10E=0.04,eRank=630.0,q75/q25=2.96 vo_prod:H=0.6918,top10E=0.30,eRank=109.0,q75/q25=3000.12 train_time:207254ms step_avg:74.02ms +[2025-09-03 04:20:34] [Rank 0] PRINT: step:2800/10000 val_loss:4.2123 svd_entropy: attn_qk:H=0.7401,top10E=0.29,eRank=143.4,q75/q25=63.22 attn_vo:H=0.8045,top10E=0.18,eRank=246.5,q75/q25=55.46 mlp_w1:H=0.8735,top10E=0.18,eRank=336.6,q75/q25=5.71 mlp_w2:H=0.9702,top10E=0.04,eRank=630.0,q75/q25=2.96 vo_prod:H=0.6918,top10E=0.30,eRank=109.0,q75/q25=3000.12 train_time:207254ms step_avg:74.02ms +[2025-09-03 04:20:34] [Rank 0] step:2801/10000 train_time:207268ms step_avg:74.00ms +[2025-09-03 04:20:34] [Rank 0] step:2801/10000 train_time:207268ms step_avg:74.00ms +[2025-09-03 04:20:36] [Rank 0] step:2821/10000 train_time:208712ms step_avg:73.99ms +[2025-09-03 04:20:36] [Rank 0] step:2821/10000 train_time:208712ms step_avg:73.99ms +[2025-09-03 04:20:37] [Rank 0] step:2841/10000 train_time:210228ms step_avg:74.00ms +[2025-09-03 04:20:37] [Rank 0] step:2841/10000 train_time:210228ms step_avg:74.00ms +[2025-09-03 04:20:39] [Rank 0] step:2861/10000 train_time:211746ms step_avg:74.01ms +[2025-09-03 04:20:39] [Rank 0] step:2861/10000 train_time:211746ms step_avg:74.01ms +[2025-09-03 04:20:40] [Rank 0] step:2881/10000 train_time:213263ms step_avg:74.02ms +[2025-09-03 04:20:40] [Rank 0] step:2881/10000 train_time:213263ms step_avg:74.02ms +[2025-09-03 04:20:42] [Rank 0] step:2901/10000 train_time:214781ms step_avg:74.04ms +[2025-09-03 04:20:42] [Rank 0] step:2901/10000 train_time:214781ms step_avg:74.04ms +[2025-09-03 04:20:43] [Rank 0] step:2921/10000 train_time:216298ms step_avg:74.05ms +[2025-09-03 04:20:43] [Rank 0] step:2921/10000 train_time:216298ms step_avg:74.05ms +[2025-09-03 04:20:45] [Rank 0] step:2941/10000 train_time:217815ms step_avg:74.06ms +[2025-09-03 04:20:45] [Rank 0] step:2941/10000 train_time:217815ms step_avg:74.06ms +[2025-09-03 04:20:46] [Rank 0] step:2961/10000 train_time:219380ms step_avg:74.09ms +[2025-09-03 04:20:46] [Rank 0] step:2961/10000 train_time:219380ms step_avg:74.09ms +[2025-09-03 04:20:48] [Rank 0] step:2981/10000 train_time:220927ms step_avg:74.11ms +[2025-09-03 04:20:48] [Rank 0] step:2981/10000 train_time:220927ms step_avg:74.11ms +[2025-09-03 04:20:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:20:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:21:01] [Rank 0] PRINT: step:3000/10000 val_loss:4.1705 svd_entropy: attn_qk:H=0.7432,top10E=0.29,eRank=146.0,q75/q25=63.46 attn_vo:H=0.8089,top10E=0.18,eRank=252.0,q75/q25=53.25 mlp_w1:H=0.8772,top10E=0.17,eRank=344.6,q75/q25=5.58 mlp_w2:H=0.9704,top10E=0.04,eRank=630.9,q75/q25=2.94 vo_prod:H=0.6977,top10E=0.29,eRank=113.1,q75/q25=2750.71 train_time:222534ms step_avg:74.18ms +[2025-09-03 04:21:01] [Rank 0] PRINT: step:3000/10000 val_loss:4.1705 svd_entropy: attn_qk:H=0.7432,top10E=0.29,eRank=146.0,q75/q25=63.46 attn_vo:H=0.8089,top10E=0.18,eRank=252.0,q75/q25=53.25 mlp_w1:H=0.8772,top10E=0.17,eRank=344.6,q75/q25=5.58 mlp_w2:H=0.9704,top10E=0.04,eRank=630.9,q75/q25=2.94 vo_prod:H=0.6977,top10E=0.29,eRank=113.1,q75/q25=2750.71 train_time:222534ms step_avg:74.18ms +[2025-09-03 04:21:01] [Rank 0] step:3001/10000 train_time:222549ms step_avg:74.16ms +[2025-09-03 04:21:01] [Rank 0] step:3001/10000 train_time:222549ms step_avg:74.16ms +[2025-09-03 04:21:03] [Rank 0] step:3021/10000 train_time:223999ms step_avg:74.15ms +[2025-09-03 04:21:03] [Rank 0] step:3021/10000 train_time:223999ms step_avg:74.15ms +[2025-09-03 04:21:04] [Rank 0] step:3041/10000 train_time:225523ms step_avg:74.16ms +[2025-09-03 04:21:04] [Rank 0] step:3041/10000 train_time:225523ms step_avg:74.16ms +[2025-09-03 04:21:06] [Rank 0] step:3061/10000 train_time:227048ms step_avg:74.17ms +[2025-09-03 04:21:06] [Rank 0] step:3061/10000 train_time:227048ms step_avg:74.17ms +[2025-09-03 04:21:07] [Rank 0] step:3081/10000 train_time:228572ms step_avg:74.19ms +[2025-09-03 04:21:07] [Rank 0] step:3081/10000 train_time:228572ms step_avg:74.19ms +[2025-09-03 04:21:09] [Rank 0] step:3101/10000 train_time:230098ms step_avg:74.20ms +[2025-09-03 04:21:09] [Rank 0] step:3101/10000 train_time:230098ms step_avg:74.20ms +[2025-09-03 04:21:10] [Rank 0] step:3121/10000 train_time:231624ms step_avg:74.21ms +[2025-09-03 04:21:10] [Rank 0] step:3121/10000 train_time:231624ms step_avg:74.21ms +[2025-09-03 04:21:12] [Rank 0] step:3141/10000 train_time:233150ms step_avg:74.23ms +[2025-09-03 04:21:12] [Rank 0] step:3141/10000 train_time:233150ms step_avg:74.23ms +[2025-09-03 04:21:13] [Rank 0] step:3161/10000 train_time:234676ms step_avg:74.24ms +[2025-09-03 04:21:13] [Rank 0] step:3161/10000 train_time:234676ms step_avg:74.24ms +[2025-09-03 04:21:15] [Rank 0] step:3181/10000 train_time:236202ms step_avg:74.25ms +[2025-09-03 04:21:15] [Rank 0] step:3181/10000 train_time:236202ms step_avg:74.25ms +[2025-09-03 04:21:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:21:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:21:28] [Rank 0] PRINT: step:3200/10000 val_loss:4.1383 svd_entropy: attn_qk:H=0.7458,top10E=0.29,eRank=148.2,q75/q25=63.20 attn_vo:H=0.8157,top10E=0.17,eRank=260.3,q75/q25=53.06 mlp_w1:H=0.8806,top10E=0.17,eRank=352.1,q75/q25=5.43 mlp_w2:H=0.9705,top10E=0.04,eRank=631.5,q75/q25=2.93 vo_prod:H=0.7039,top10E=0.28,eRank=117.0,q75/q25=2671.19 train_time:237806ms step_avg:74.31ms +[2025-09-03 04:21:28] [Rank 0] PRINT: step:3200/10000 val_loss:4.1383 svd_entropy: attn_qk:H=0.7458,top10E=0.29,eRank=148.2,q75/q25=63.20 attn_vo:H=0.8157,top10E=0.17,eRank=260.3,q75/q25=53.06 mlp_w1:H=0.8806,top10E=0.17,eRank=352.1,q75/q25=5.43 mlp_w2:H=0.9705,top10E=0.04,eRank=631.5,q75/q25=2.93 vo_prod:H=0.7039,top10E=0.28,eRank=117.0,q75/q25=2671.19 train_time:237806ms step_avg:74.31ms +[2025-09-03 04:21:28] [Rank 0] step:3201/10000 train_time:237820ms step_avg:74.30ms +[2025-09-03 04:21:28] [Rank 0] step:3201/10000 train_time:237820ms step_avg:74.30ms +[2025-09-03 04:21:30] [Rank 0] step:3221/10000 train_time:239276ms step_avg:74.29ms +[2025-09-03 04:21:30] [Rank 0] step:3221/10000 train_time:239276ms step_avg:74.29ms +[2025-09-03 04:21:31] [Rank 0] step:3241/10000 train_time:240801ms step_avg:74.30ms +[2025-09-03 04:21:31] [Rank 0] step:3241/10000 train_time:240801ms step_avg:74.30ms +[2025-09-03 04:21:33] [Rank 0] step:3261/10000 train_time:242326ms step_avg:74.31ms +[2025-09-03 04:21:33] [Rank 0] step:3261/10000 train_time:242326ms step_avg:74.31ms +[2025-09-03 04:21:34] [Rank 0] step:3281/10000 train_time:243853ms step_avg:74.32ms +[2025-09-03 04:21:34] [Rank 0] step:3281/10000 train_time:243853ms step_avg:74.32ms +[2025-09-03 04:21:36] [Rank 0] step:3301/10000 train_time:245380ms step_avg:74.33ms +[2025-09-03 04:21:36] [Rank 0] step:3301/10000 train_time:245380ms step_avg:74.33ms +[2025-09-03 04:21:37] [Rank 0] step:3321/10000 train_time:246906ms step_avg:74.35ms +[2025-09-03 04:21:37] [Rank 0] step:3321/10000 train_time:246906ms step_avg:74.35ms +[2025-09-03 04:21:39] [Rank 0] step:3341/10000 train_time:248434ms step_avg:74.36ms +[2025-09-03 04:21:39] [Rank 0] step:3341/10000 train_time:248434ms step_avg:74.36ms +[2025-09-03 04:21:40] [Rank 0] step:3361/10000 train_time:249962ms step_avg:74.37ms +[2025-09-03 04:21:40] [Rank 0] step:3361/10000 train_time:249962ms step_avg:74.37ms +[2025-09-03 04:21:42] [Rank 0] step:3381/10000 train_time:251489ms step_avg:74.38ms +[2025-09-03 04:21:42] [Rank 0] step:3381/10000 train_time:251489ms step_avg:74.38ms +[2025-09-03 04:21:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:21:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:21:55] [Rank 0] PRINT: step:3400/10000 val_loss:4.0992 svd_entropy: attn_qk:H=0.7486,top10E=0.28,eRank=150.7,q75/q25=63.40 attn_vo:H=0.8193,top10E=0.17,eRank=265.1,q75/q25=52.57 mlp_w1:H=0.8838,top10E=0.17,eRank=359.2,q75/q25=5.31 mlp_w2:H=0.9707,top10E=0.04,eRank=632.0,q75/q25=2.92 vo_prod:H=0.7084,top10E=0.28,eRank=120.7,q75/q25=2547.78 train_time:253096ms step_avg:74.44ms +[2025-09-03 04:21:55] [Rank 0] PRINT: step:3400/10000 val_loss:4.0992 svd_entropy: attn_qk:H=0.7486,top10E=0.28,eRank=150.7,q75/q25=63.40 attn_vo:H=0.8193,top10E=0.17,eRank=265.1,q75/q25=52.57 mlp_w1:H=0.8838,top10E=0.17,eRank=359.2,q75/q25=5.31 mlp_w2:H=0.9707,top10E=0.04,eRank=632.0,q75/q25=2.92 vo_prod:H=0.7084,top10E=0.28,eRank=120.7,q75/q25=2547.78 train_time:253096ms step_avg:74.44ms +[2025-09-03 04:21:55] [Rank 0] step:3401/10000 train_time:253110ms step_avg:74.42ms +[2025-09-03 04:21:55] [Rank 0] step:3401/10000 train_time:253110ms step_avg:74.42ms +[2025-09-03 04:21:57] [Rank 0] step:3421/10000 train_time:254565ms step_avg:74.41ms +[2025-09-03 04:21:57] [Rank 0] step:3421/10000 train_time:254565ms step_avg:74.41ms +[2025-09-03 04:21:58] [Rank 0] step:3441/10000 train_time:256089ms step_avg:74.42ms +[2025-09-03 04:21:58] [Rank 0] step:3441/10000 train_time:256089ms step_avg:74.42ms +[2025-09-03 04:22:00] [Rank 0] step:3461/10000 train_time:257614ms step_avg:74.43ms +[2025-09-03 04:22:00] [Rank 0] step:3461/10000 train_time:257614ms step_avg:74.43ms +[2025-09-03 04:22:01] [Rank 0] step:3481/10000 train_time:259139ms step_avg:74.44ms +[2025-09-03 04:22:01] [Rank 0] step:3481/10000 train_time:259139ms step_avg:74.44ms +[2025-09-03 04:22:03] [Rank 0] step:3501/10000 train_time:260667ms step_avg:74.45ms +[2025-09-03 04:22:03] [Rank 0] step:3501/10000 train_time:260667ms step_avg:74.45ms +[2025-09-03 04:22:04] [Rank 0] step:3521/10000 train_time:262195ms step_avg:74.47ms +[2025-09-03 04:22:04] [Rank 0] step:3521/10000 train_time:262195ms step_avg:74.47ms +[2025-09-03 04:22:06] [Rank 0] step:3541/10000 train_time:263720ms step_avg:74.48ms +[2025-09-03 04:22:06] [Rank 0] step:3541/10000 train_time:263720ms step_avg:74.48ms +[2025-09-03 04:22:07] [Rank 0] step:3561/10000 train_time:265246ms step_avg:74.49ms +[2025-09-03 04:22:07] [Rank 0] step:3561/10000 train_time:265246ms step_avg:74.49ms +[2025-09-03 04:22:09] [Rank 0] step:3581/10000 train_time:266773ms step_avg:74.50ms +[2025-09-03 04:22:09] [Rank 0] step:3581/10000 train_time:266773ms step_avg:74.50ms +[2025-09-03 04:22:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:22:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:22:22] [Rank 0] PRINT: step:3600/10000 val_loss:4.0849 svd_entropy: attn_qk:H=0.7513,top10E=0.28,eRank=153.1,q75/q25=63.29 attn_vo:H=0.8224,top10E=0.16,eRank=269.1,q75/q25=51.50 mlp_w1:H=0.8865,top10E=0.16,eRank=365.4,q75/q25=5.19 mlp_w2:H=0.9708,top10E=0.04,eRank=632.5,q75/q25=2.91 vo_prod:H=0.7125,top10E=0.27,eRank=123.8,q75/q25=2388.82 train_time:268380ms step_avg:74.55ms +[2025-09-03 04:22:22] [Rank 0] PRINT: step:3600/10000 val_loss:4.0849 svd_entropy: attn_qk:H=0.7513,top10E=0.28,eRank=153.1,q75/q25=63.29 attn_vo:H=0.8224,top10E=0.16,eRank=269.1,q75/q25=51.50 mlp_w1:H=0.8865,top10E=0.16,eRank=365.4,q75/q25=5.19 mlp_w2:H=0.9708,top10E=0.04,eRank=632.5,q75/q25=2.91 vo_prod:H=0.7125,top10E=0.27,eRank=123.8,q75/q25=2388.82 train_time:268380ms step_avg:74.55ms +[2025-09-03 04:22:22] [Rank 0] step:3601/10000 train_time:268395ms step_avg:74.53ms +[2025-09-03 04:22:22] [Rank 0] step:3601/10000 train_time:268395ms step_avg:74.53ms +[2025-09-03 04:22:24] [Rank 0] step:3621/10000 train_time:269849ms step_avg:74.52ms +[2025-09-03 04:22:24] [Rank 0] step:3621/10000 train_time:269849ms step_avg:74.52ms +[2025-09-03 04:22:25] [Rank 0] step:3641/10000 train_time:271374ms step_avg:74.53ms +[2025-09-03 04:22:25] [Rank 0] step:3641/10000 train_time:271374ms step_avg:74.53ms +[2025-09-03 04:22:27] [Rank 0] step:3661/10000 train_time:272899ms step_avg:74.54ms +[2025-09-03 04:22:27] [Rank 0] step:3661/10000 train_time:272899ms step_avg:74.54ms +[2025-09-03 04:22:28] [Rank 0] step:3681/10000 train_time:274425ms step_avg:74.55ms +[2025-09-03 04:22:28] [Rank 0] step:3681/10000 train_time:274425ms step_avg:74.55ms +[2025-09-03 04:22:30] [Rank 0] step:3701/10000 train_time:275951ms step_avg:74.56ms +[2025-09-03 04:22:30] [Rank 0] step:3701/10000 train_time:275951ms step_avg:74.56ms +[2025-09-03 04:22:31] [Rank 0] step:3721/10000 train_time:277505ms step_avg:74.58ms +[2025-09-03 04:22:31] [Rank 0] step:3721/10000 train_time:277505ms step_avg:74.58ms +[2025-09-03 04:22:33] [Rank 0] step:3741/10000 train_time:279068ms step_avg:74.60ms +[2025-09-03 04:22:33] [Rank 0] step:3741/10000 train_time:279068ms step_avg:74.60ms +[2025-09-03 04:22:35] [Rank 0] step:3761/10000 train_time:280630ms step_avg:74.62ms +[2025-09-03 04:22:35] [Rank 0] step:3761/10000 train_time:280630ms step_avg:74.62ms +[2025-09-03 04:22:36] [Rank 0] step:3781/10000 train_time:282193ms step_avg:74.63ms +[2025-09-03 04:22:36] [Rank 0] step:3781/10000 train_time:282193ms step_avg:74.63ms +[2025-09-03 04:22:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:22:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:22:49] [Rank 0] PRINT: step:3800/10000 val_loss:4.0419 svd_entropy: attn_qk:H=0.7530,top10E=0.28,eRank=154.7,q75/q25=62.64 attn_vo:H=0.8251,top10E=0.16,eRank=272.8,q75/q25=50.18 mlp_w1:H=0.8890,top10E=0.16,eRank=371.3,q75/q25=5.10 mlp_w2:H=0.9708,top10E=0.04,eRank=632.8,q75/q25=2.91 vo_prod:H=0.7165,top10E=0.27,eRank=127.1,q75/q25=2255.73 train_time:283837ms step_avg:74.69ms +[2025-09-03 04:22:49] [Rank 0] PRINT: step:3800/10000 val_loss:4.0419 svd_entropy: attn_qk:H=0.7530,top10E=0.28,eRank=154.7,q75/q25=62.64 attn_vo:H=0.8251,top10E=0.16,eRank=272.8,q75/q25=50.18 mlp_w1:H=0.8890,top10E=0.16,eRank=371.3,q75/q25=5.10 mlp_w2:H=0.9708,top10E=0.04,eRank=632.8,q75/q25=2.91 vo_prod:H=0.7165,top10E=0.27,eRank=127.1,q75/q25=2255.73 train_time:283837ms step_avg:74.69ms +[2025-09-03 04:22:49] [Rank 0] step:3801/10000 train_time:283852ms step_avg:74.68ms +[2025-09-03 04:22:49] [Rank 0] step:3801/10000 train_time:283852ms step_avg:74.68ms +[2025-09-03 04:22:51] [Rank 0] step:3821/10000 train_time:285347ms step_avg:74.68ms +[2025-09-03 04:22:51] [Rank 0] step:3821/10000 train_time:285347ms step_avg:74.68ms +[2025-09-03 04:22:53] [Rank 0] step:3841/10000 train_time:286945ms step_avg:74.71ms +[2025-09-03 04:22:53] [Rank 0] step:3841/10000 train_time:286945ms step_avg:74.71ms +[2025-09-03 04:22:54] [Rank 0] step:3861/10000 train_time:288563ms step_avg:74.74ms +[2025-09-03 04:22:54] [Rank 0] step:3861/10000 train_time:288563ms step_avg:74.74ms +[2025-09-03 04:22:56] [Rank 0] step:3881/10000 train_time:290124ms step_avg:74.75ms +[2025-09-03 04:22:56] [Rank 0] step:3881/10000 train_time:290124ms step_avg:74.75ms +[2025-09-03 04:22:57] [Rank 0] step:3901/10000 train_time:291685ms step_avg:74.77ms +[2025-09-03 04:22:57] [Rank 0] step:3901/10000 train_time:291685ms step_avg:74.77ms +[2025-09-03 04:22:59] [Rank 0] step:3921/10000 train_time:293249ms step_avg:74.79ms +[2025-09-03 04:22:59] [Rank 0] step:3921/10000 train_time:293249ms step_avg:74.79ms +[2025-09-03 04:23:01] [Rank 0] step:3941/10000 train_time:294812ms step_avg:74.81ms +[2025-09-03 04:23:01] [Rank 0] step:3941/10000 train_time:294812ms step_avg:74.81ms +[2025-09-03 04:23:02] [Rank 0] step:3961/10000 train_time:296373ms step_avg:74.82ms +[2025-09-03 04:23:02] [Rank 0] step:3961/10000 train_time:296373ms step_avg:74.82ms +[2025-09-03 04:23:04] [Rank 0] step:3981/10000 train_time:297937ms step_avg:74.84ms +[2025-09-03 04:23:04] [Rank 0] step:3981/10000 train_time:297937ms step_avg:74.84ms +[2025-09-03 04:23:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:23:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:23:17] [Rank 0] PRINT: step:4000/10000 val_loss:4.0162 svd_entropy: attn_qk:H=0.7552,top10E=0.28,eRank=156.7,q75/q25=62.44 attn_vo:H=0.8276,top10E=0.16,eRank=276.1,q75/q25=49.15 mlp_w1:H=0.8914,top10E=0.16,eRank=377.0,q75/q25=5.01 mlp_w2:H=0.9709,top10E=0.04,eRank=633.1,q75/q25=2.90 vo_prod:H=0.7202,top10E=0.26,eRank=130.0,q75/q25=2141.17 train_time:299580ms step_avg:74.89ms +[2025-09-03 04:23:17] [Rank 0] PRINT: step:4000/10000 val_loss:4.0162 svd_entropy: attn_qk:H=0.7552,top10E=0.28,eRank=156.7,q75/q25=62.44 attn_vo:H=0.8276,top10E=0.16,eRank=276.1,q75/q25=49.15 mlp_w1:H=0.8914,top10E=0.16,eRank=377.0,q75/q25=5.01 mlp_w2:H=0.9709,top10E=0.04,eRank=633.1,q75/q25=2.90 vo_prod:H=0.7202,top10E=0.26,eRank=130.0,q75/q25=2141.17 train_time:299580ms step_avg:74.89ms +[2025-09-03 04:23:17] [Rank 0] step:4001/10000 train_time:299593ms step_avg:74.88ms +[2025-09-03 04:23:17] [Rank 0] step:4001/10000 train_time:299593ms step_avg:74.88ms +[2025-09-03 04:23:19] [Rank 0] step:4021/10000 train_time:301081ms step_avg:74.88ms +[2025-09-03 04:23:19] [Rank 0] step:4021/10000 train_time:301081ms step_avg:74.88ms +[2025-09-03 04:23:20] [Rank 0] step:4041/10000 train_time:302643ms step_avg:74.89ms +[2025-09-03 04:23:20] [Rank 0] step:4041/10000 train_time:302643ms step_avg:74.89ms +[2025-09-03 04:23:22] [Rank 0] step:4061/10000 train_time:304205ms step_avg:74.91ms +[2025-09-03 04:23:22] [Rank 0] step:4061/10000 train_time:304205ms step_avg:74.91ms +[2025-09-03 04:23:24] [Rank 0] step:4081/10000 train_time:306367ms step_avg:75.07ms +[2025-09-03 04:23:24] [Rank 0] step:4081/10000 train_time:306367ms step_avg:75.07ms +[2025-09-03 04:23:25] [Rank 0] step:4101/10000 train_time:307933ms step_avg:75.09ms +[2025-09-03 04:23:25] [Rank 0] step:4101/10000 train_time:307933ms step_avg:75.09ms +[2025-09-03 04:23:27] [Rank 0] step:4121/10000 train_time:309497ms step_avg:75.10ms +[2025-09-03 04:23:27] [Rank 0] step:4121/10000 train_time:309497ms step_avg:75.10ms +[2025-09-03 04:23:29] [Rank 0] step:4141/10000 train_time:311060ms step_avg:75.12ms +[2025-09-03 04:23:29] [Rank 0] step:4141/10000 train_time:311060ms step_avg:75.12ms +[2025-09-03 04:23:30] [Rank 0] step:4161/10000 train_time:312622ms step_avg:75.13ms +[2025-09-03 04:23:30] [Rank 0] step:4161/10000 train_time:312622ms step_avg:75.13ms +[2025-09-03 04:23:32] [Rank 0] step:4181/10000 train_time:314186ms step_avg:75.15ms +[2025-09-03 04:23:32] [Rank 0] step:4181/10000 train_time:314186ms step_avg:75.15ms +[2025-09-03 04:23:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:23:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:23:45] [Rank 0] PRINT: step:4200/10000 val_loss:3.9965 svd_entropy: attn_qk:H=0.7572,top10E=0.27,eRank=158.5,q75/q25=62.03 attn_vo:H=0.8299,top10E=0.16,eRank=279.3,q75/q25=48.12 mlp_w1:H=0.8935,top10E=0.16,eRank=382.2,q75/q25=4.93 mlp_w2:H=0.9710,top10E=0.04,eRank=633.3,q75/q25=2.90 vo_prod:H=0.7235,top10E=0.26,eRank=132.9,q75/q25=2043.79 train_time:315830ms step_avg:75.20ms +[2025-09-03 04:23:45] [Rank 0] PRINT: step:4200/10000 val_loss:3.9965 svd_entropy: attn_qk:H=0.7572,top10E=0.27,eRank=158.5,q75/q25=62.03 attn_vo:H=0.8299,top10E=0.16,eRank=279.3,q75/q25=48.12 mlp_w1:H=0.8935,top10E=0.16,eRank=382.2,q75/q25=4.93 mlp_w2:H=0.9710,top10E=0.04,eRank=633.3,q75/q25=2.90 vo_prod:H=0.7235,top10E=0.26,eRank=132.9,q75/q25=2043.79 train_time:315830ms step_avg:75.20ms +[2025-09-03 04:23:45] [Rank 0] step:4201/10000 train_time:315845ms step_avg:75.18ms +[2025-09-03 04:23:45] [Rank 0] step:4201/10000 train_time:315845ms step_avg:75.18ms +[2025-09-03 04:23:47] [Rank 0] step:4221/10000 train_time:317337ms step_avg:75.18ms +[2025-09-03 04:23:47] [Rank 0] step:4221/10000 train_time:317337ms step_avg:75.18ms +[2025-09-03 04:23:48] [Rank 0] step:4241/10000 train_time:318900ms step_avg:75.19ms +[2025-09-03 04:23:48] [Rank 0] step:4241/10000 train_time:318900ms step_avg:75.19ms +[2025-09-03 04:23:50] [Rank 0] step:4261/10000 train_time:320463ms step_avg:75.21ms +[2025-09-03 04:23:50] [Rank 0] step:4261/10000 train_time:320463ms step_avg:75.21ms +[2025-09-03 04:23:51] [Rank 0] step:4281/10000 train_time:322026ms step_avg:75.22ms +[2025-09-03 04:23:51] [Rank 0] step:4281/10000 train_time:322026ms step_avg:75.22ms +[2025-09-03 04:23:53] [Rank 0] step:4301/10000 train_time:323589ms step_avg:75.24ms +[2025-09-03 04:23:53] [Rank 0] step:4301/10000 train_time:323589ms step_avg:75.24ms +[2025-09-03 04:23:54] [Rank 0] step:4321/10000 train_time:325155ms step_avg:75.25ms +[2025-09-03 04:23:54] [Rank 0] step:4321/10000 train_time:325155ms step_avg:75.25ms +[2025-09-03 04:23:56] [Rank 0] step:4341/10000 train_time:326756ms step_avg:75.27ms +[2025-09-03 04:23:56] [Rank 0] step:4341/10000 train_time:326756ms step_avg:75.27ms +[2025-09-03 04:23:58] [Rank 0] step:4361/10000 train_time:328376ms step_avg:75.30ms +[2025-09-03 04:23:58] [Rank 0] step:4361/10000 train_time:328376ms step_avg:75.30ms +[2025-09-03 04:23:59] [Rank 0] step:4381/10000 train_time:329938ms step_avg:75.31ms +[2025-09-03 04:23:59] [Rank 0] step:4381/10000 train_time:329938ms step_avg:75.31ms +[2025-09-03 04:24:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:24:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:24:12] [Rank 0] PRINT: step:4400/10000 val_loss:3.9747 svd_entropy: attn_qk:H=0.7592,top10E=0.27,eRank=160.5,q75/q25=61.35 attn_vo:H=0.8320,top10E=0.15,eRank=282.1,q75/q25=46.91 mlp_w1:H=0.8955,top10E=0.15,eRank=387.1,q75/q25=4.87 mlp_w2:H=0.9710,top10E=0.04,eRank=633.4,q75/q25=2.90 vo_prod:H=0.7264,top10E=0.26,eRank=135.3,q75/q25=1940.58 train_time:331580ms step_avg:75.36ms +[2025-09-03 04:24:12] [Rank 0] PRINT: step:4400/10000 val_loss:3.9747 svd_entropy: attn_qk:H=0.7592,top10E=0.27,eRank=160.5,q75/q25=61.35 attn_vo:H=0.8320,top10E=0.15,eRank=282.1,q75/q25=46.91 mlp_w1:H=0.8955,top10E=0.15,eRank=387.1,q75/q25=4.87 mlp_w2:H=0.9710,top10E=0.04,eRank=633.4,q75/q25=2.90 vo_prod:H=0.7264,top10E=0.26,eRank=135.3,q75/q25=1940.58 train_time:331580ms step_avg:75.36ms +[2025-09-03 04:24:13] [Rank 0] step:4401/10000 train_time:331595ms step_avg:75.35ms +[2025-09-03 04:24:13] [Rank 0] step:4401/10000 train_time:331595ms step_avg:75.35ms +[2025-09-03 04:24:14] [Rank 0] step:4421/10000 train_time:333076ms step_avg:75.34ms +[2025-09-03 04:24:14] [Rank 0] step:4421/10000 train_time:333076ms step_avg:75.34ms +[2025-09-03 04:24:16] [Rank 0] step:4441/10000 train_time:334635ms step_avg:75.35ms +[2025-09-03 04:24:16] [Rank 0] step:4441/10000 train_time:334635ms step_avg:75.35ms +[2025-09-03 04:24:17] [Rank 0] step:4461/10000 train_time:336200ms step_avg:75.36ms +[2025-09-03 04:24:17] [Rank 0] step:4461/10000 train_time:336200ms step_avg:75.36ms +[2025-09-03 04:24:19] [Rank 0] step:4481/10000 train_time:337770ms step_avg:75.38ms +[2025-09-03 04:24:19] [Rank 0] step:4481/10000 train_time:337770ms step_avg:75.38ms +[2025-09-03 04:24:20] [Rank 0] step:4501/10000 train_time:339337ms step_avg:75.39ms +[2025-09-03 04:24:20] [Rank 0] step:4501/10000 train_time:339337ms step_avg:75.39ms +[2025-09-03 04:24:22] [Rank 0] step:4521/10000 train_time:340903ms step_avg:75.40ms +[2025-09-03 04:24:22] [Rank 0] step:4521/10000 train_time:340903ms step_avg:75.40ms +[2025-09-03 04:24:23] [Rank 0] step:4541/10000 train_time:342473ms step_avg:75.42ms +[2025-09-03 04:24:23] [Rank 0] step:4541/10000 train_time:342473ms step_avg:75.42ms +[2025-09-03 04:24:25] [Rank 0] step:4561/10000 train_time:344040ms step_avg:75.43ms +[2025-09-03 04:24:25] [Rank 0] step:4561/10000 train_time:344040ms step_avg:75.43ms +[2025-09-03 04:24:27] [Rank 0] step:4581/10000 train_time:345611ms step_avg:75.44ms +[2025-09-03 04:24:27] [Rank 0] step:4581/10000 train_time:345611ms step_avg:75.44ms +[2025-09-03 04:24:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:24:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:24:40] [Rank 0] PRINT: step:4600/10000 val_loss:3.9475 svd_entropy: attn_qk:H=0.7612,top10E=0.27,eRank=162.4,q75/q25=61.20 attn_vo:H=0.8340,top10E=0.15,eRank=284.9,q75/q25=45.84 mlp_w1:H=0.8974,top10E=0.15,eRank=391.7,q75/q25=4.79 mlp_w2:H=0.9710,top10E=0.04,eRank=633.5,q75/q25=2.89 vo_prod:H=0.7294,top10E=0.25,eRank=137.9,q75/q25=1838.59 train_time:347261ms step_avg:75.49ms +[2025-09-03 04:24:40] [Rank 0] PRINT: step:4600/10000 val_loss:3.9475 svd_entropy: attn_qk:H=0.7612,top10E=0.27,eRank=162.4,q75/q25=61.20 attn_vo:H=0.8340,top10E=0.15,eRank=284.9,q75/q25=45.84 mlp_w1:H=0.8974,top10E=0.15,eRank=391.7,q75/q25=4.79 mlp_w2:H=0.9710,top10E=0.04,eRank=633.5,q75/q25=2.89 vo_prod:H=0.7294,top10E=0.25,eRank=137.9,q75/q25=1838.59 train_time:347261ms step_avg:75.49ms +[2025-09-03 04:24:40] [Rank 0] step:4601/10000 train_time:347275ms step_avg:75.48ms +[2025-09-03 04:24:40] [Rank 0] step:4601/10000 train_time:347275ms step_avg:75.48ms +[2025-09-03 04:24:42] [Rank 0] step:4621/10000 train_time:348760ms step_avg:75.47ms +[2025-09-03 04:24:42] [Rank 0] step:4621/10000 train_time:348760ms step_avg:75.47ms +[2025-09-03 04:24:43] [Rank 0] step:4641/10000 train_time:350331ms step_avg:75.49ms +[2025-09-03 04:24:43] [Rank 0] step:4641/10000 train_time:350331ms step_avg:75.49ms +[2025-09-03 04:24:45] [Rank 0] step:4661/10000 train_time:351900ms step_avg:75.50ms +[2025-09-03 04:24:45] [Rank 0] step:4661/10000 train_time:351900ms step_avg:75.50ms +[2025-09-03 04:24:46] [Rank 0] step:4681/10000 train_time:353469ms step_avg:75.51ms +[2025-09-03 04:24:46] [Rank 0] step:4681/10000 train_time:353469ms step_avg:75.51ms +[2025-09-03 04:24:48] [Rank 0] step:4701/10000 train_time:355037ms step_avg:75.52ms +[2025-09-03 04:24:48] [Rank 0] step:4701/10000 train_time:355037ms step_avg:75.52ms +[2025-09-03 04:24:49] [Rank 0] step:4721/10000 train_time:356607ms step_avg:75.54ms +[2025-09-03 04:24:49] [Rank 0] step:4721/10000 train_time:356607ms step_avg:75.54ms +[2025-09-03 04:24:51] [Rank 0] step:4741/10000 train_time:358176ms step_avg:75.55ms +[2025-09-03 04:24:51] [Rank 0] step:4741/10000 train_time:358176ms step_avg:75.55ms +[2025-09-03 04:24:53] [Rank 0] step:4761/10000 train_time:359746ms step_avg:75.56ms +[2025-09-03 04:24:53] [Rank 0] step:4761/10000 train_time:359746ms step_avg:75.56ms +[2025-09-03 04:24:54] [Rank 0] step:4781/10000 train_time:361314ms step_avg:75.57ms +[2025-09-03 04:24:54] [Rank 0] step:4781/10000 train_time:361314ms step_avg:75.57ms +[2025-09-03 04:24:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:24:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:25:07] [Rank 0] PRINT: step:4800/10000 val_loss:3.9368 svd_entropy: attn_qk:H=0.7628,top10E=0.27,eRank=164.0,q75/q25=60.59 attn_vo:H=0.8359,top10E=0.15,eRank=287.6,q75/q25=45.10 mlp_w1:H=0.8991,top10E=0.15,eRank=396.1,q75/q25=4.72 mlp_w2:H=0.9710,top10E=0.04,eRank=633.6,q75/q25=2.89 vo_prod:H=0.7324,top10E=0.25,eRank=140.4,q75/q25=1751.99 train_time:362966ms step_avg:75.62ms +[2025-09-03 04:25:07] [Rank 0] PRINT: step:4800/10000 val_loss:3.9368 svd_entropy: attn_qk:H=0.7628,top10E=0.27,eRank=164.0,q75/q25=60.59 attn_vo:H=0.8359,top10E=0.15,eRank=287.6,q75/q25=45.10 mlp_w1:H=0.8991,top10E=0.15,eRank=396.1,q75/q25=4.72 mlp_w2:H=0.9710,top10E=0.04,eRank=633.6,q75/q25=2.89 vo_prod:H=0.7324,top10E=0.25,eRank=140.4,q75/q25=1751.99 train_time:362966ms step_avg:75.62ms +[2025-09-03 04:25:07] [Rank 0] step:4801/10000 train_time:362980ms step_avg:75.61ms +[2025-09-03 04:25:07] [Rank 0] step:4801/10000 train_time:362980ms step_avg:75.61ms +[2025-09-03 04:25:09] [Rank 0] step:4821/10000 train_time:364493ms step_avg:75.61ms +[2025-09-03 04:25:09] [Rank 0] step:4821/10000 train_time:364493ms step_avg:75.61ms +[2025-09-03 04:25:11] [Rank 0] step:4841/10000 train_time:366059ms step_avg:75.62ms +[2025-09-03 04:25:11] [Rank 0] step:4841/10000 train_time:366059ms step_avg:75.62ms +[2025-09-03 04:25:12] [Rank 0] step:4861/10000 train_time:367636ms step_avg:75.63ms +[2025-09-03 04:25:12] [Rank 0] step:4861/10000 train_time:367636ms step_avg:75.63ms +[2025-09-03 04:25:14] [Rank 0] step:4881/10000 train_time:369204ms step_avg:75.64ms +[2025-09-03 04:25:14] [Rank 0] step:4881/10000 train_time:369204ms step_avg:75.64ms +[2025-09-03 04:25:15] [Rank 0] step:4901/10000 train_time:370770ms step_avg:75.65ms +[2025-09-03 04:25:15] [Rank 0] step:4901/10000 train_time:370770ms step_avg:75.65ms +[2025-09-03 04:25:17] [Rank 0] step:4921/10000 train_time:372343ms step_avg:75.66ms +[2025-09-03 04:25:17] [Rank 0] step:4921/10000 train_time:372343ms step_avg:75.66ms +[2025-09-03 04:25:18] [Rank 0] step:4941/10000 train_time:373911ms step_avg:75.68ms +[2025-09-03 04:25:18] [Rank 0] step:4941/10000 train_time:373911ms step_avg:75.68ms +[2025-09-03 04:25:20] [Rank 0] step:4961/10000 train_time:375482ms step_avg:75.69ms +[2025-09-03 04:25:20] [Rank 0] step:4961/10000 train_time:375482ms step_avg:75.69ms +[2025-09-03 04:25:22] [Rank 0] step:4981/10000 train_time:377053ms step_avg:75.70ms +[2025-09-03 04:25:22] [Rank 0] step:4981/10000 train_time:377053ms step_avg:75.70ms +[2025-09-03 04:25:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:25:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:25:35] [Rank 0] PRINT: step:5000/10000 val_loss:3.9162 svd_entropy: attn_qk:H=0.7645,top10E=0.27,eRank=165.7,q75/q25=60.49 attn_vo:H=0.8375,top10E=0.15,eRank=289.9,q75/q25=44.12 mlp_w1:H=0.9007,top10E=0.15,eRank=400.1,q75/q25=4.68 mlp_w2:H=0.9710,top10E=0.04,eRank=633.6,q75/q25=2.89 vo_prod:H=0.7347,top10E=0.25,eRank=142.4,q75/q25=1711.40 train_time:378706ms step_avg:75.74ms +[2025-09-03 04:25:35] [Rank 0] PRINT: step:5000/10000 val_loss:3.9162 svd_entropy: attn_qk:H=0.7645,top10E=0.27,eRank=165.7,q75/q25=60.49 attn_vo:H=0.8375,top10E=0.15,eRank=289.9,q75/q25=44.12 mlp_w1:H=0.9007,top10E=0.15,eRank=400.1,q75/q25=4.68 mlp_w2:H=0.9710,top10E=0.04,eRank=633.6,q75/q25=2.89 vo_prod:H=0.7347,top10E=0.25,eRank=142.4,q75/q25=1711.40 train_time:378706ms step_avg:75.74ms +[2025-09-03 04:25:35] [Rank 0] step:5001/10000 train_time:378720ms step_avg:75.73ms +[2025-09-03 04:25:35] [Rank 0] step:5001/10000 train_time:378720ms step_avg:75.73ms +[2025-09-03 04:25:37] [Rank 0] step:5021/10000 train_time:380222ms step_avg:75.73ms +[2025-09-03 04:25:37] [Rank 0] step:5021/10000 train_time:380222ms step_avg:75.73ms +[2025-09-03 04:25:38] [Rank 0] step:5041/10000 train_time:381790ms step_avg:75.74ms +[2025-09-03 04:25:38] [Rank 0] step:5041/10000 train_time:381790ms step_avg:75.74ms +[2025-09-03 04:25:40] [Rank 0] step:5061/10000 train_time:383357ms step_avg:75.75ms +[2025-09-03 04:25:40] [Rank 0] step:5061/10000 train_time:383357ms step_avg:75.75ms +[2025-09-03 04:25:41] [Rank 0] step:5081/10000 train_time:384927ms step_avg:75.76ms +[2025-09-03 04:25:41] [Rank 0] step:5081/10000 train_time:384927ms step_avg:75.76ms +[2025-09-03 04:25:43] [Rank 0] step:5101/10000 train_time:386498ms step_avg:75.77ms +[2025-09-03 04:25:43] [Rank 0] step:5101/10000 train_time:386498ms step_avg:75.77ms +[2025-09-03 04:25:44] [Rank 0] step:5121/10000 train_time:388065ms step_avg:75.78ms +[2025-09-03 04:25:44] [Rank 0] step:5121/10000 train_time:388065ms step_avg:75.78ms +[2025-09-03 04:25:46] [Rank 0] step:5141/10000 train_time:389638ms step_avg:75.79ms +[2025-09-03 04:25:46] [Rank 0] step:5141/10000 train_time:389638ms step_avg:75.79ms +[2025-09-03 04:25:48] [Rank 0] step:5161/10000 train_time:391205ms step_avg:75.80ms +[2025-09-03 04:25:48] [Rank 0] step:5161/10000 train_time:391205ms step_avg:75.80ms +[2025-09-03 04:25:49] [Rank 0] step:5181/10000 train_time:392778ms step_avg:75.81ms +[2025-09-03 04:25:49] [Rank 0] step:5181/10000 train_time:392778ms step_avg:75.81ms +[2025-09-03 04:25:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:25:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:26:02] [Rank 0] PRINT: step:5200/10000 val_loss:3.8970 svd_entropy: attn_qk:H=0.7660,top10E=0.26,eRank=167.2,q75/q25=59.98 attn_vo:H=0.8390,top10E=0.14,eRank=292.1,q75/q25=43.21 mlp_w1:H=0.9022,top10E=0.15,eRank=404.0,q75/q25=4.63 mlp_w2:H=0.9710,top10E=0.04,eRank=633.6,q75/q25=2.89 vo_prod:H=0.7372,top10E=0.24,eRank=144.6,q75/q25=1642.78 train_time:394455ms step_avg:75.86ms +[2025-09-03 04:26:02] [Rank 0] PRINT: step:5200/10000 val_loss:3.8970 svd_entropy: attn_qk:H=0.7660,top10E=0.26,eRank=167.2,q75/q25=59.98 attn_vo:H=0.8390,top10E=0.14,eRank=292.1,q75/q25=43.21 mlp_w1:H=0.9022,top10E=0.15,eRank=404.0,q75/q25=4.63 mlp_w2:H=0.9710,top10E=0.04,eRank=633.6,q75/q25=2.89 vo_prod:H=0.7372,top10E=0.24,eRank=144.6,q75/q25=1642.78 train_time:394455ms step_avg:75.86ms +[2025-09-03 04:26:02] [Rank 0] step:5201/10000 train_time:394470ms step_avg:75.85ms +[2025-09-03 04:26:02] [Rank 0] step:5201/10000 train_time:394470ms step_avg:75.85ms +[2025-09-03 04:26:04] [Rank 0] step:5221/10000 train_time:395992ms step_avg:75.85ms +[2025-09-03 04:26:04] [Rank 0] step:5221/10000 train_time:395992ms step_avg:75.85ms +[2025-09-03 04:26:06] [Rank 0] step:5241/10000 train_time:397595ms step_avg:75.86ms +[2025-09-03 04:26:06] [Rank 0] step:5241/10000 train_time:397595ms step_avg:75.86ms +[2025-09-03 04:26:07] [Rank 0] step:5261/10000 train_time:399196ms step_avg:75.88ms +[2025-09-03 04:26:07] [Rank 0] step:5261/10000 train_time:399196ms step_avg:75.88ms +[2025-09-03 04:26:09] [Rank 0] step:5281/10000 train_time:400797ms step_avg:75.89ms +[2025-09-03 04:26:09] [Rank 0] step:5281/10000 train_time:400797ms step_avg:75.89ms +[2025-09-03 04:26:11] [Rank 0] step:5301/10000 train_time:402409ms step_avg:75.91ms +[2025-09-03 04:26:11] [Rank 0] step:5301/10000 train_time:402409ms step_avg:75.91ms +[2025-09-03 04:26:12] [Rank 0] step:5321/10000 train_time:404012ms step_avg:75.93ms +[2025-09-03 04:26:12] [Rank 0] step:5321/10000 train_time:404012ms step_avg:75.93ms +[2025-09-03 04:26:14] [Rank 0] step:5341/10000 train_time:405614ms step_avg:75.94ms +[2025-09-03 04:26:14] [Rank 0] step:5341/10000 train_time:405614ms step_avg:75.94ms +[2025-09-03 04:26:15] [Rank 0] step:5361/10000 train_time:407218ms step_avg:75.96ms +[2025-09-03 04:26:15] [Rank 0] step:5361/10000 train_time:407218ms step_avg:75.96ms +[2025-09-03 04:26:17] [Rank 0] step:5381/10000 train_time:408824ms step_avg:75.98ms +[2025-09-03 04:26:17] [Rank 0] step:5381/10000 train_time:408824ms step_avg:75.98ms +[2025-09-03 04:26:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:26:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:26:30] [Rank 0] PRINT: step:5400/10000 val_loss:3.8802 svd_entropy: attn_qk:H=0.7674,top10E=0.26,eRank=168.6,q75/q25=59.32 attn_vo:H=0.8404,top10E=0.14,eRank=294.1,q75/q25=42.46 mlp_w1:H=0.9036,top10E=0.14,eRank=407.7,q75/q25=4.59 mlp_w2:H=0.9711,top10E=0.04,eRank=633.7,q75/q25=2.88 vo_prod:H=0.7394,top10E=0.24,eRank=146.6,q75/q25=1591.94 train_time:410505ms step_avg:76.02ms +[2025-09-03 04:26:30] [Rank 0] PRINT: step:5400/10000 val_loss:3.8802 svd_entropy: attn_qk:H=0.7674,top10E=0.26,eRank=168.6,q75/q25=59.32 attn_vo:H=0.8404,top10E=0.14,eRank=294.1,q75/q25=42.46 mlp_w1:H=0.9036,top10E=0.14,eRank=407.7,q75/q25=4.59 mlp_w2:H=0.9711,top10E=0.04,eRank=633.7,q75/q25=2.88 vo_prod:H=0.7394,top10E=0.24,eRank=146.6,q75/q25=1591.94 train_time:410505ms step_avg:76.02ms +[2025-09-03 04:26:30] [Rank 0] step:5401/10000 train_time:410521ms step_avg:76.01ms +[2025-09-03 04:26:30] [Rank 0] step:5401/10000 train_time:410521ms step_avg:76.01ms +[2025-09-03 04:26:32] [Rank 0] step:5421/10000 train_time:412047ms step_avg:76.01ms +[2025-09-03 04:26:32] [Rank 0] step:5421/10000 train_time:412047ms step_avg:76.01ms +[2025-09-03 04:26:34] [Rank 0] step:5441/10000 train_time:413641ms step_avg:76.02ms +[2025-09-03 04:26:34] [Rank 0] step:5441/10000 train_time:413641ms step_avg:76.02ms +[2025-09-03 04:26:35] [Rank 0] step:5461/10000 train_time:415246ms step_avg:76.04ms +[2025-09-03 04:26:35] [Rank 0] step:5461/10000 train_time:415246ms step_avg:76.04ms +[2025-09-03 04:26:37] [Rank 0] step:5481/10000 train_time:416849ms step_avg:76.05ms +[2025-09-03 04:26:37] [Rank 0] step:5481/10000 train_time:416849ms step_avg:76.05ms +[2025-09-03 04:26:38] [Rank 0] step:5501/10000 train_time:418456ms step_avg:76.07ms +[2025-09-03 04:26:38] [Rank 0] step:5501/10000 train_time:418456ms step_avg:76.07ms +[2025-09-03 04:26:40] [Rank 0] step:5521/10000 train_time:420063ms step_avg:76.08ms +[2025-09-03 04:26:40] [Rank 0] step:5521/10000 train_time:420063ms step_avg:76.08ms +[2025-09-03 04:26:42] [Rank 0] step:5541/10000 train_time:421665ms step_avg:76.10ms +[2025-09-03 04:26:42] [Rank 0] step:5541/10000 train_time:421665ms step_avg:76.10ms +[2025-09-03 04:26:43] [Rank 0] step:5561/10000 train_time:423270ms step_avg:76.11ms +[2025-09-03 04:26:43] [Rank 0] step:5561/10000 train_time:423270ms step_avg:76.11ms +[2025-09-03 04:26:45] [Rank 0] step:5581/10000 train_time:424871ms step_avg:76.13ms +[2025-09-03 04:26:45] [Rank 0] step:5581/10000 train_time:424871ms step_avg:76.13ms +[2025-09-03 04:26:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:26:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:26:58] [Rank 0] PRINT: step:5600/10000 val_loss:3.8670 svd_entropy: attn_qk:H=0.7688,top10E=0.26,eRank=170.0,q75/q25=58.92 attn_vo:H=0.8417,top10E=0.14,eRank=296.0,q75/q25=41.69 mlp_w1:H=0.9049,top10E=0.14,eRank=411.2,q75/q25=4.54 mlp_w2:H=0.9711,top10E=0.04,eRank=633.7,q75/q25=2.88 vo_prod:H=0.7413,top10E=0.24,eRank=148.3,q75/q25=1541.68 train_time:426556ms step_avg:76.17ms +[2025-09-03 04:26:58] [Rank 0] PRINT: step:5600/10000 val_loss:3.8670 svd_entropy: attn_qk:H=0.7688,top10E=0.26,eRank=170.0,q75/q25=58.92 attn_vo:H=0.8417,top10E=0.14,eRank=296.0,q75/q25=41.69 mlp_w1:H=0.9049,top10E=0.14,eRank=411.2,q75/q25=4.54 mlp_w2:H=0.9711,top10E=0.04,eRank=633.7,q75/q25=2.88 vo_prod:H=0.7413,top10E=0.24,eRank=148.3,q75/q25=1541.68 train_time:426556ms step_avg:76.17ms +[2025-09-03 04:26:58] [Rank 0] step:5601/10000 train_time:426572ms step_avg:76.16ms +[2025-09-03 04:26:58] [Rank 0] step:5601/10000 train_time:426572ms step_avg:76.16ms +[2025-09-03 04:27:00] [Rank 0] step:5621/10000 train_time:428104ms step_avg:76.16ms +[2025-09-03 04:27:00] [Rank 0] step:5621/10000 train_time:428104ms step_avg:76.16ms +[2025-09-03 04:27:01] [Rank 0] step:5641/10000 train_time:429704ms step_avg:76.18ms +[2025-09-03 04:27:01] [Rank 0] step:5641/10000 train_time:429704ms step_avg:76.18ms +[2025-09-03 04:27:03] [Rank 0] step:5661/10000 train_time:431304ms step_avg:76.19ms +[2025-09-03 04:27:03] [Rank 0] step:5661/10000 train_time:431304ms step_avg:76.19ms +[2025-09-03 04:27:05] [Rank 0] step:5681/10000 train_time:432908ms step_avg:76.20ms +[2025-09-03 04:27:05] [Rank 0] step:5681/10000 train_time:432908ms step_avg:76.20ms +[2025-09-03 04:27:06] [Rank 0] step:5701/10000 train_time:434556ms step_avg:76.22ms +[2025-09-03 04:27:06] [Rank 0] step:5701/10000 train_time:434556ms step_avg:76.22ms +[2025-09-03 04:27:08] [Rank 0] step:5721/10000 train_time:436160ms step_avg:76.24ms +[2025-09-03 04:27:08] [Rank 0] step:5721/10000 train_time:436160ms step_avg:76.24ms +[2025-09-03 04:27:09] [Rank 0] step:5741/10000 train_time:437759ms step_avg:76.25ms +[2025-09-03 04:27:09] [Rank 0] step:5741/10000 train_time:437759ms step_avg:76.25ms +[2025-09-03 04:27:11] [Rank 0] step:5761/10000 train_time:439363ms step_avg:76.27ms +[2025-09-03 04:27:11] [Rank 0] step:5761/10000 train_time:439363ms step_avg:76.27ms +[2025-09-03 04:27:13] [Rank 0] step:5781/10000 train_time:440970ms step_avg:76.28ms +[2025-09-03 04:27:13] [Rank 0] step:5781/10000 train_time:440970ms step_avg:76.28ms +[2025-09-03 04:27:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:27:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:27:26] [Rank 0] PRINT: step:5800/10000 val_loss:3.8572 svd_entropy: attn_qk:H=0.7702,top10E=0.26,eRank=171.5,q75/q25=58.04 attn_vo:H=0.8429,top10E=0.14,eRank=297.8,q75/q25=41.27 mlp_w1:H=0.9061,top10E=0.14,eRank=414.4,q75/q25=4.50 mlp_w2:H=0.9711,top10E=0.04,eRank=633.7,q75/q25=2.88 vo_prod:H=0.7433,top10E=0.24,eRank=150.0,q75/q25=1495.61 train_time:442656ms step_avg:76.32ms +[2025-09-03 04:27:26] [Rank 0] PRINT: step:5800/10000 val_loss:3.8572 svd_entropy: attn_qk:H=0.7702,top10E=0.26,eRank=171.5,q75/q25=58.04 attn_vo:H=0.8429,top10E=0.14,eRank=297.8,q75/q25=41.27 mlp_w1:H=0.9061,top10E=0.14,eRank=414.4,q75/q25=4.50 mlp_w2:H=0.9711,top10E=0.04,eRank=633.7,q75/q25=2.88 vo_prod:H=0.7433,top10E=0.24,eRank=150.0,q75/q25=1495.61 train_time:442656ms step_avg:76.32ms +[2025-09-03 04:27:26] [Rank 0] step:5801/10000 train_time:442671ms step_avg:76.31ms +[2025-09-03 04:27:26] [Rank 0] step:5801/10000 train_time:442671ms step_avg:76.31ms +[2025-09-03 04:27:28] [Rank 0] step:5821/10000 train_time:444188ms step_avg:76.31ms +[2025-09-03 04:27:28] [Rank 0] step:5821/10000 train_time:444188ms step_avg:76.31ms +[2025-09-03 04:27:29] [Rank 0] step:5841/10000 train_time:445786ms step_avg:76.32ms +[2025-09-03 04:27:29] [Rank 0] step:5841/10000 train_time:445786ms step_avg:76.32ms +[2025-09-03 04:27:31] [Rank 0] step:5861/10000 train_time:447390ms step_avg:76.33ms +[2025-09-03 04:27:31] [Rank 0] step:5861/10000 train_time:447390ms step_avg:76.33ms +[2025-09-03 04:27:32] [Rank 0] step:5881/10000 train_time:448990ms step_avg:76.35ms +[2025-09-03 04:27:32] [Rank 0] step:5881/10000 train_time:448990ms step_avg:76.35ms +[2025-09-03 04:27:34] [Rank 0] step:5901/10000 train_time:450592ms step_avg:76.36ms +[2025-09-03 04:27:34] [Rank 0] step:5901/10000 train_time:450592ms step_avg:76.36ms +[2025-09-03 04:27:36] [Rank 0] step:5921/10000 train_time:452193ms step_avg:76.37ms +[2025-09-03 04:27:36] [Rank 0] step:5921/10000 train_time:452193ms step_avg:76.37ms +[2025-09-03 04:27:37] [Rank 0] step:5941/10000 train_time:453800ms step_avg:76.38ms +[2025-09-03 04:27:37] [Rank 0] step:5941/10000 train_time:453800ms step_avg:76.38ms +[2025-09-03 04:27:39] [Rank 0] step:5961/10000 train_time:455406ms step_avg:76.40ms +[2025-09-03 04:27:39] [Rank 0] step:5961/10000 train_time:455406ms step_avg:76.40ms +[2025-09-03 04:27:40] [Rank 0] step:5981/10000 train_time:457012ms step_avg:76.41ms +[2025-09-03 04:27:40] [Rank 0] step:5981/10000 train_time:457012ms step_avg:76.41ms +[2025-09-03 04:27:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:27:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:27:54] [Rank 0] PRINT: step:6000/10000 val_loss:3.8336 svd_entropy: attn_qk:H=0.7715,top10E=0.26,eRank=172.8,q75/q25=57.82 attn_vo:H=0.8440,top10E=0.14,eRank=299.5,q75/q25=40.63 mlp_w1:H=0.9073,top10E=0.14,eRank=417.6,q75/q25=4.46 mlp_w2:H=0.9711,top10E=0.04,eRank=633.7,q75/q25=2.87 vo_prod:H=0.7450,top10E=0.23,eRank=151.5,q75/q25=1465.63 train_time:458695ms step_avg:76.45ms +[2025-09-03 04:27:54] [Rank 0] PRINT: step:6000/10000 val_loss:3.8336 svd_entropy: attn_qk:H=0.7715,top10E=0.26,eRank=172.8,q75/q25=57.82 attn_vo:H=0.8440,top10E=0.14,eRank=299.5,q75/q25=40.63 mlp_w1:H=0.9073,top10E=0.14,eRank=417.6,q75/q25=4.46 mlp_w2:H=0.9711,top10E=0.04,eRank=633.7,q75/q25=2.87 vo_prod:H=0.7450,top10E=0.23,eRank=151.5,q75/q25=1465.63 train_time:458695ms step_avg:76.45ms +[2025-09-03 04:27:54] [Rank 0] step:6001/10000 train_time:458710ms step_avg:76.44ms +[2025-09-03 04:27:54] [Rank 0] step:6001/10000 train_time:458710ms step_avg:76.44ms +[2025-09-03 04:27:56] [Rank 0] step:6021/10000 train_time:460231ms step_avg:76.44ms +[2025-09-03 04:27:56] [Rank 0] step:6021/10000 train_time:460231ms step_avg:76.44ms +[2025-09-03 04:27:57] [Rank 0] step:6041/10000 train_time:461835ms step_avg:76.45ms +[2025-09-03 04:27:57] [Rank 0] step:6041/10000 train_time:461835ms step_avg:76.45ms +[2025-09-03 04:27:59] [Rank 0] step:6061/10000 train_time:463443ms step_avg:76.46ms +[2025-09-03 04:27:59] [Rank 0] step:6061/10000 train_time:463443ms step_avg:76.46ms +[2025-09-03 04:28:00] [Rank 0] step:6081/10000 train_time:465048ms step_avg:76.48ms +[2025-09-03 04:28:00] [Rank 0] step:6081/10000 train_time:465048ms step_avg:76.48ms +[2025-09-03 04:28:02] [Rank 0] step:6101/10000 train_time:466659ms step_avg:76.49ms +[2025-09-03 04:28:02] [Rank 0] step:6101/10000 train_time:466659ms step_avg:76.49ms +[2025-09-03 04:28:04] [Rank 0] step:6121/10000 train_time:469058ms step_avg:76.63ms +[2025-09-03 04:28:04] [Rank 0] step:6121/10000 train_time:469058ms step_avg:76.63ms +[2025-09-03 04:28:06] [Rank 0] step:6141/10000 train_time:470673ms step_avg:76.64ms +[2025-09-03 04:28:06] [Rank 0] step:6141/10000 train_time:470673ms step_avg:76.64ms +[2025-09-03 04:28:08] [Rank 0] step:6161/10000 train_time:472325ms step_avg:76.66ms +[2025-09-03 04:28:08] [Rank 0] step:6161/10000 train_time:472325ms step_avg:76.66ms +[2025-09-03 04:28:09] [Rank 0] step:6181/10000 train_time:473928ms step_avg:76.68ms +[2025-09-03 04:28:09] [Rank 0] step:6181/10000 train_time:473928ms step_avg:76.68ms +[2025-09-03 04:28:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:28:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:28:23] [Rank 0] PRINT: step:6200/10000 val_loss:3.8187 svd_entropy: attn_qk:H=0.7727,top10E=0.26,eRank=174.1,q75/q25=57.39 attn_vo:H=0.8451,top10E=0.14,eRank=301.1,q75/q25=40.05 mlp_w1:H=0.9084,top10E=0.14,eRank=420.5,q75/q25=4.42 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.87 vo_prod:H=0.7468,top10E=0.23,eRank=153.1,q75/q25=1421.23 train_time:475667ms step_avg:76.72ms +[2025-09-03 04:28:23] [Rank 0] PRINT: step:6200/10000 val_loss:3.8187 svd_entropy: attn_qk:H=0.7727,top10E=0.26,eRank=174.1,q75/q25=57.39 attn_vo:H=0.8451,top10E=0.14,eRank=301.1,q75/q25=40.05 mlp_w1:H=0.9084,top10E=0.14,eRank=420.5,q75/q25=4.42 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.87 vo_prod:H=0.7468,top10E=0.23,eRank=153.1,q75/q25=1421.23 train_time:475667ms step_avg:76.72ms +[2025-09-03 04:28:23] [Rank 0] step:6201/10000 train_time:475682ms step_avg:76.71ms +[2025-09-03 04:28:23] [Rank 0] step:6201/10000 train_time:475682ms step_avg:76.71ms +[2025-09-03 04:28:24] [Rank 0] step:6221/10000 train_time:477221ms step_avg:76.71ms +[2025-09-03 04:28:24] [Rank 0] step:6221/10000 train_time:477221ms step_avg:76.71ms +[2025-09-03 04:28:26] [Rank 0] step:6241/10000 train_time:478824ms step_avg:76.72ms +[2025-09-03 04:28:26] [Rank 0] step:6241/10000 train_time:478824ms step_avg:76.72ms +[2025-09-03 04:28:28] [Rank 0] step:6261/10000 train_time:480431ms step_avg:76.73ms +[2025-09-03 04:28:28] [Rank 0] step:6261/10000 train_time:480431ms step_avg:76.73ms +[2025-09-03 04:28:29] [Rank 0] step:6281/10000 train_time:482042ms step_avg:76.75ms +[2025-09-03 04:28:29] [Rank 0] step:6281/10000 train_time:482042ms step_avg:76.75ms +[2025-09-03 04:28:31] [Rank 0] step:6301/10000 train_time:483649ms step_avg:76.76ms +[2025-09-03 04:28:31] [Rank 0] step:6301/10000 train_time:483649ms step_avg:76.76ms +[2025-09-03 04:28:32] [Rank 0] step:6321/10000 train_time:485258ms step_avg:76.77ms +[2025-09-03 04:28:32] [Rank 0] step:6321/10000 train_time:485258ms step_avg:76.77ms +[2025-09-03 04:28:34] [Rank 0] step:6341/10000 train_time:486868ms step_avg:76.78ms +[2025-09-03 04:28:34] [Rank 0] step:6341/10000 train_time:486868ms step_avg:76.78ms +[2025-09-03 04:28:36] [Rank 0] step:6361/10000 train_time:488483ms step_avg:76.79ms +[2025-09-03 04:28:36] [Rank 0] step:6361/10000 train_time:488483ms step_avg:76.79ms +[2025-09-03 04:28:37] [Rank 0] step:6381/10000 train_time:490094ms step_avg:76.81ms +[2025-09-03 04:28:37] [Rank 0] step:6381/10000 train_time:490094ms step_avg:76.81ms +[2025-09-03 04:28:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:28:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:28:51] [Rank 0] PRINT: step:6400/10000 val_loss:3.8018 svd_entropy: attn_qk:H=0.7738,top10E=0.26,eRank=175.3,q75/q25=57.22 attn_vo:H=0.8461,top10E=0.14,eRank=302.5,q75/q25=39.45 mlp_w1:H=0.9094,top10E=0.14,eRank=423.2,q75/q25=4.41 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.87 vo_prod:H=0.7483,top10E=0.23,eRank=154.5,q75/q25=1379.19 train_time:491787ms step_avg:76.84ms +[2025-09-03 04:28:51] [Rank 0] PRINT: step:6400/10000 val_loss:3.8018 svd_entropy: attn_qk:H=0.7738,top10E=0.26,eRank=175.3,q75/q25=57.22 attn_vo:H=0.8461,top10E=0.14,eRank=302.5,q75/q25=39.45 mlp_w1:H=0.9094,top10E=0.14,eRank=423.2,q75/q25=4.41 mlp_w2:H=0.9711,top10E=0.04,eRank=633.8,q75/q25=2.87 vo_prod:H=0.7483,top10E=0.23,eRank=154.5,q75/q25=1379.19 train_time:491787ms step_avg:76.84ms +[2025-09-03 04:28:51] [Rank 0] step:6401/10000 train_time:491802ms step_avg:76.83ms +[2025-09-03 04:28:51] [Rank 0] step:6401/10000 train_time:491802ms step_avg:76.83ms +[2025-09-03 04:28:52] [Rank 0] step:6421/10000 train_time:493326ms step_avg:76.83ms +[2025-09-03 04:28:52] [Rank 0] step:6421/10000 train_time:493326ms step_avg:76.83ms +[2025-09-03 04:28:54] [Rank 0] step:6441/10000 train_time:494934ms step_avg:76.84ms +[2025-09-03 04:28:54] [Rank 0] step:6441/10000 train_time:494934ms step_avg:76.84ms +[2025-09-03 04:28:55] [Rank 0] step:6461/10000 train_time:496545ms step_avg:76.85ms +[2025-09-03 04:28:55] [Rank 0] step:6461/10000 train_time:496545ms step_avg:76.85ms +[2025-09-03 04:28:57] [Rank 0] step:6481/10000 train_time:498161ms step_avg:76.86ms +[2025-09-03 04:28:57] [Rank 0] step:6481/10000 train_time:498161ms step_avg:76.86ms +[2025-09-03 04:28:59] [Rank 0] step:6501/10000 train_time:499767ms step_avg:76.88ms +[2025-09-03 04:28:59] [Rank 0] step:6501/10000 train_time:499767ms step_avg:76.88ms +[2025-09-03 04:29:00] [Rank 0] step:6521/10000 train_time:501371ms step_avg:76.89ms +[2025-09-03 04:29:00] [Rank 0] step:6521/10000 train_time:501371ms step_avg:76.89ms +[2025-09-03 04:29:02] [Rank 0] step:6541/10000 train_time:502984ms step_avg:76.90ms +[2025-09-03 04:29:02] [Rank 0] step:6541/10000 train_time:502984ms step_avg:76.90ms +[2025-09-03 04:29:04] [Rank 0] step:6561/10000 train_time:504595ms step_avg:76.91ms +[2025-09-03 04:29:04] [Rank 0] step:6561/10000 train_time:504595ms step_avg:76.91ms +[2025-09-03 04:29:05] [Rank 0] step:6581/10000 train_time:506203ms step_avg:76.92ms +[2025-09-03 04:29:05] [Rank 0] step:6581/10000 train_time:506203ms step_avg:76.92ms +[2025-09-03 04:29:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:29:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:29:18] [Rank 0] PRINT: step:6600/10000 val_loss:3.7880 svd_entropy: attn_qk:H=0.7747,top10E=0.25,eRank=176.3,q75/q25=56.78 attn_vo:H=0.8470,top10E=0.14,eRank=303.8,q75/q25=39.07 mlp_w1:H=0.9102,top10E=0.14,eRank=425.5,q75/q25=4.37 mlp_w2:H=0.9711,top10E=0.04,eRank=633.7,q75/q25=2.87 vo_prod:H=0.7497,top10E=0.23,eRank=155.7,q75/q25=1370.47 train_time:507896ms step_avg:76.95ms +[2025-09-03 04:29:18] [Rank 0] PRINT: step:6600/10000 val_loss:3.7880 svd_entropy: attn_qk:H=0.7747,top10E=0.25,eRank=176.3,q75/q25=56.78 attn_vo:H=0.8470,top10E=0.14,eRank=303.8,q75/q25=39.07 mlp_w1:H=0.9102,top10E=0.14,eRank=425.5,q75/q25=4.37 mlp_w2:H=0.9711,top10E=0.04,eRank=633.7,q75/q25=2.87 vo_prod:H=0.7497,top10E=0.23,eRank=155.7,q75/q25=1370.47 train_time:507896ms step_avg:76.95ms +[2025-09-03 04:29:19] [Rank 0] step:6601/10000 train_time:507912ms step_avg:76.94ms +[2025-09-03 04:29:19] [Rank 0] step:6601/10000 train_time:507912ms step_avg:76.94ms +[2025-09-03 04:29:20] [Rank 0] step:6621/10000 train_time:509452ms step_avg:76.94ms +[2025-09-03 04:29:20] [Rank 0] step:6621/10000 train_time:509452ms step_avg:76.94ms +[2025-09-03 04:29:22] [Rank 0] step:6641/10000 train_time:511063ms step_avg:76.96ms +[2025-09-03 04:29:22] [Rank 0] step:6641/10000 train_time:511063ms step_avg:76.96ms +[2025-09-03 04:29:23] [Rank 0] step:6661/10000 train_time:512667ms step_avg:76.97ms +[2025-09-03 04:29:23] [Rank 0] step:6661/10000 train_time:512667ms step_avg:76.97ms +[2025-09-03 04:29:25] [Rank 0] step:6681/10000 train_time:514296ms step_avg:76.98ms +[2025-09-03 04:29:25] [Rank 0] step:6681/10000 train_time:514296ms step_avg:76.98ms +[2025-09-03 04:29:27] [Rank 0] step:6701/10000 train_time:515938ms step_avg:76.99ms +[2025-09-03 04:29:27] [Rank 0] step:6701/10000 train_time:515938ms step_avg:76.99ms +[2025-09-03 04:29:28] [Rank 0] step:6721/10000 train_time:517576ms step_avg:77.01ms +[2025-09-03 04:29:28] [Rank 0] step:6721/10000 train_time:517576ms step_avg:77.01ms +[2025-09-03 04:29:30] [Rank 0] step:6741/10000 train_time:519206ms step_avg:77.02ms +[2025-09-03 04:29:30] [Rank 0] step:6741/10000 train_time:519206ms step_avg:77.02ms +[2025-09-03 04:29:32] [Rank 0] step:6761/10000 train_time:520840ms step_avg:77.04ms +[2025-09-03 04:29:32] [Rank 0] step:6761/10000 train_time:520840ms step_avg:77.04ms +[2025-09-03 04:29:33] [Rank 0] step:6781/10000 train_time:522478ms step_avg:77.05ms +[2025-09-03 04:29:33] [Rank 0] step:6781/10000 train_time:522478ms step_avg:77.05ms +[2025-09-03 04:29:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:29:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:29:47] [Rank 0] PRINT: step:6800/10000 val_loss:3.7722 svd_entropy: attn_qk:H=0.7755,top10E=0.25,eRank=177.1,q75/q25=56.38 attn_vo:H=0.8478,top10E=0.14,eRank=305.0,q75/q25=38.68 mlp_w1:H=0.9110,top10E=0.14,eRank=427.7,q75/q25=4.36 mlp_w2:H=0.9711,top10E=0.04,eRank=633.7,q75/q25=2.87 vo_prod:H=0.7510,top10E=0.23,eRank=156.9,q75/q25=1356.44 train_time:524202ms step_avg:77.09ms +[2025-09-03 04:29:47] [Rank 0] PRINT: step:6800/10000 val_loss:3.7722 svd_entropy: attn_qk:H=0.7755,top10E=0.25,eRank=177.1,q75/q25=56.38 attn_vo:H=0.8478,top10E=0.14,eRank=305.0,q75/q25=38.68 mlp_w1:H=0.9110,top10E=0.14,eRank=427.7,q75/q25=4.36 mlp_w2:H=0.9711,top10E=0.04,eRank=633.7,q75/q25=2.87 vo_prod:H=0.7510,top10E=0.23,eRank=156.9,q75/q25=1356.44 train_time:524202ms step_avg:77.09ms +[2025-09-03 04:29:47] [Rank 0] step:6801/10000 train_time:524217ms step_avg:77.08ms +[2025-09-03 04:29:47] [Rank 0] step:6801/10000 train_time:524217ms step_avg:77.08ms +[2025-09-03 04:29:48] [Rank 0] step:6821/10000 train_time:525778ms step_avg:77.08ms +[2025-09-03 04:29:48] [Rank 0] step:6821/10000 train_time:525778ms step_avg:77.08ms +[2025-09-03 04:29:50] [Rank 0] step:6841/10000 train_time:527407ms step_avg:77.10ms +[2025-09-03 04:29:50] [Rank 0] step:6841/10000 train_time:527407ms step_avg:77.10ms +[2025-09-03 04:29:52] [Rank 0] step:6861/10000 train_time:529042ms step_avg:77.11ms +[2025-09-03 04:29:52] [Rank 0] step:6861/10000 train_time:529042ms step_avg:77.11ms +[2025-09-03 04:29:53] [Rank 0] step:6881/10000 train_time:530675ms step_avg:77.12ms +[2025-09-03 04:29:53] [Rank 0] step:6881/10000 train_time:530675ms step_avg:77.12ms +[2025-09-03 04:29:55] [Rank 0] step:6901/10000 train_time:532309ms step_avg:77.14ms +[2025-09-03 04:29:55] [Rank 0] step:6901/10000 train_time:532309ms step_avg:77.14ms +[2025-09-03 04:29:56] [Rank 0] step:6921/10000 train_time:533939ms step_avg:77.15ms +[2025-09-03 04:29:56] [Rank 0] step:6921/10000 train_time:533939ms step_avg:77.15ms +[2025-09-03 04:29:58] [Rank 0] step:6941/10000 train_time:535578ms step_avg:77.16ms +[2025-09-03 04:29:58] [Rank 0] step:6941/10000 train_time:535578ms step_avg:77.16ms +[2025-09-03 04:30:00] [Rank 0] step:6961/10000 train_time:537227ms step_avg:77.18ms +[2025-09-03 04:30:00] [Rank 0] step:6961/10000 train_time:537227ms step_avg:77.18ms +[2025-09-03 04:30:01] [Rank 0] step:6981/10000 train_time:538864ms step_avg:77.19ms +[2025-09-03 04:30:01] [Rank 0] step:6981/10000 train_time:538864ms step_avg:77.19ms +[2025-09-03 04:30:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:30:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:30:15] [Rank 0] PRINT: step:7000/10000 val_loss:3.7551 svd_entropy: attn_qk:H=0.7763,top10E=0.25,eRank=177.9,q75/q25=56.03 attn_vo:H=0.8485,top10E=0.13,eRank=306.2,q75/q25=38.16 mlp_w1:H=0.9117,top10E=0.13,eRank=429.6,q75/q25=4.33 mlp_w2:H=0.9710,top10E=0.04,eRank=633.7,q75/q25=2.87 vo_prod:H=0.7524,top10E=0.23,eRank=158.2,q75/q25=1346.57 train_time:540588ms step_avg:77.23ms +[2025-09-03 04:30:15] [Rank 0] PRINT: step:7000/10000 val_loss:3.7551 svd_entropy: attn_qk:H=0.7763,top10E=0.25,eRank=177.9,q75/q25=56.03 attn_vo:H=0.8485,top10E=0.13,eRank=306.2,q75/q25=38.16 mlp_w1:H=0.9117,top10E=0.13,eRank=429.6,q75/q25=4.33 mlp_w2:H=0.9710,top10E=0.04,eRank=633.7,q75/q25=2.87 vo_prod:H=0.7524,top10E=0.23,eRank=158.2,q75/q25=1346.57 train_time:540588ms step_avg:77.23ms +[2025-09-03 04:30:15] [Rank 0] step:7001/10000 train_time:540603ms step_avg:77.22ms +[2025-09-03 04:30:15] [Rank 0] step:7001/10000 train_time:540603ms step_avg:77.22ms +[2025-09-03 04:30:17] [Rank 0] step:7021/10000 train_time:542172ms step_avg:77.22ms +[2025-09-03 04:30:17] [Rank 0] step:7021/10000 train_time:542172ms step_avg:77.22ms +[2025-09-03 04:30:18] [Rank 0] step:7041/10000 train_time:543803ms step_avg:77.23ms +[2025-09-03 04:30:18] [Rank 0] step:7041/10000 train_time:543803ms step_avg:77.23ms +[2025-09-03 04:30:20] [Rank 0] step:7061/10000 train_time:545439ms step_avg:77.25ms +[2025-09-03 04:30:20] [Rank 0] step:7061/10000 train_time:545439ms step_avg:77.25ms +[2025-09-03 04:30:21] [Rank 0] step:7081/10000 train_time:547073ms step_avg:77.26ms +[2025-09-03 04:30:21] [Rank 0] step:7081/10000 train_time:547073ms step_avg:77.26ms +[2025-09-03 04:30:23] [Rank 0] step:7101/10000 train_time:548712ms step_avg:77.27ms +[2025-09-03 04:30:23] [Rank 0] step:7101/10000 train_time:548712ms step_avg:77.27ms +[2025-09-03 04:30:25] [Rank 0] step:7121/10000 train_time:550347ms step_avg:77.29ms +[2025-09-03 04:30:25] [Rank 0] step:7121/10000 train_time:550347ms step_avg:77.29ms +[2025-09-03 04:30:26] [Rank 0] step:7141/10000 train_time:551983ms step_avg:77.30ms +[2025-09-03 04:30:26] [Rank 0] step:7141/10000 train_time:551983ms step_avg:77.30ms +[2025-09-03 04:30:28] [Rank 0] step:7161/10000 train_time:553622ms step_avg:77.31ms +[2025-09-03 04:30:28] [Rank 0] step:7161/10000 train_time:553622ms step_avg:77.31ms +[2025-09-03 04:30:30] [Rank 0] step:7181/10000 train_time:555262ms step_avg:77.32ms +[2025-09-03 04:30:30] [Rank 0] step:7181/10000 train_time:555262ms step_avg:77.32ms +[2025-09-03 04:30:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:30:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:30:43] [Rank 0] PRINT: step:7200/10000 val_loss:3.7457 svd_entropy: attn_qk:H=0.7770,top10E=0.25,eRank=178.7,q75/q25=55.56 attn_vo:H=0.8492,top10E=0.13,eRank=307.3,q75/q25=37.80 mlp_w1:H=0.9123,top10E=0.13,eRank=431.4,q75/q25=4.31 mlp_w2:H=0.9710,top10E=0.04,eRank=633.6,q75/q25=2.87 vo_prod:H=0.7536,top10E=0.23,eRank=159.4,q75/q25=1305.62 train_time:556983ms step_avg:77.36ms +[2025-09-03 04:30:43] [Rank 0] PRINT: step:7200/10000 val_loss:3.7457 svd_entropy: attn_qk:H=0.7770,top10E=0.25,eRank=178.7,q75/q25=55.56 attn_vo:H=0.8492,top10E=0.13,eRank=307.3,q75/q25=37.80 mlp_w1:H=0.9123,top10E=0.13,eRank=431.4,q75/q25=4.31 mlp_w2:H=0.9710,top10E=0.04,eRank=633.6,q75/q25=2.87 vo_prod:H=0.7536,top10E=0.23,eRank=159.4,q75/q25=1305.62 train_time:556983ms step_avg:77.36ms +[2025-09-03 04:30:43] [Rank 0] step:7201/10000 train_time:556998ms step_avg:77.35ms +[2025-09-03 04:30:43] [Rank 0] step:7201/10000 train_time:556998ms step_avg:77.35ms +[2025-09-03 04:30:45] [Rank 0] step:7221/10000 train_time:558573ms step_avg:77.35ms +[2025-09-03 04:30:45] [Rank 0] step:7221/10000 train_time:558573ms step_avg:77.35ms +[2025-09-03 04:30:46] [Rank 0] step:7241/10000 train_time:560203ms step_avg:77.37ms +[2025-09-03 04:30:46] [Rank 0] step:7241/10000 train_time:560203ms step_avg:77.37ms +[2025-09-03 04:30:48] [Rank 0] step:7261/10000 train_time:561836ms step_avg:77.38ms +[2025-09-03 04:30:48] [Rank 0] step:7261/10000 train_time:561836ms step_avg:77.38ms +[2025-09-03 04:30:50] [Rank 0] step:7281/10000 train_time:563479ms step_avg:77.39ms +[2025-09-03 04:30:50] [Rank 0] step:7281/10000 train_time:563479ms step_avg:77.39ms +[2025-09-03 04:30:51] [Rank 0] step:7301/10000 train_time:565109ms step_avg:77.40ms +[2025-09-03 04:30:51] [Rank 0] step:7301/10000 train_time:565109ms step_avg:77.40ms +[2025-09-03 04:30:53] [Rank 0] step:7321/10000 train_time:566755ms step_avg:77.41ms +[2025-09-03 04:30:53] [Rank 0] step:7321/10000 train_time:566755ms step_avg:77.41ms +[2025-09-03 04:30:55] [Rank 0] step:7341/10000 train_time:568391ms step_avg:77.43ms +[2025-09-03 04:30:55] [Rank 0] step:7341/10000 train_time:568391ms step_avg:77.43ms +[2025-09-03 04:30:56] [Rank 0] step:7361/10000 train_time:570035ms step_avg:77.44ms +[2025-09-03 04:30:56] [Rank 0] step:7361/10000 train_time:570035ms step_avg:77.44ms +[2025-09-03 04:30:58] [Rank 0] step:7381/10000 train_time:571677ms step_avg:77.45ms +[2025-09-03 04:30:58] [Rank 0] step:7381/10000 train_time:571677ms step_avg:77.45ms +[2025-09-03 04:30:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:30:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:31:11] [Rank 0] PRINT: step:7400/10000 val_loss:3.7269 svd_entropy: attn_qk:H=0.7776,top10E=0.25,eRank=179.4,q75/q25=55.18 attn_vo:H=0.8498,top10E=0.13,eRank=308.2,q75/q25=37.39 mlp_w1:H=0.9129,top10E=0.13,eRank=432.9,q75/q25=4.28 mlp_w2:H=0.9710,top10E=0.04,eRank=633.6,q75/q25=2.87 vo_prod:H=0.7547,top10E=0.22,eRank=160.4,q75/q25=1285.58 train_time:573386ms step_avg:77.48ms +[2025-09-03 04:31:11] [Rank 0] PRINT: step:7400/10000 val_loss:3.7269 svd_entropy: attn_qk:H=0.7776,top10E=0.25,eRank=179.4,q75/q25=55.18 attn_vo:H=0.8498,top10E=0.13,eRank=308.2,q75/q25=37.39 mlp_w1:H=0.9129,top10E=0.13,eRank=432.9,q75/q25=4.28 mlp_w2:H=0.9710,top10E=0.04,eRank=633.6,q75/q25=2.87 vo_prod:H=0.7547,top10E=0.22,eRank=160.4,q75/q25=1285.58 train_time:573386ms step_avg:77.48ms +[2025-09-03 04:31:11] [Rank 0] step:7401/10000 train_time:573401ms step_avg:77.48ms +[2025-09-03 04:31:11] [Rank 0] step:7401/10000 train_time:573401ms step_avg:77.48ms +[2025-09-03 04:31:13] [Rank 0] step:7421/10000 train_time:574976ms step_avg:77.48ms +[2025-09-03 04:31:13] [Rank 0] step:7421/10000 train_time:574976ms step_avg:77.48ms +[2025-09-03 04:31:15] [Rank 0] step:7441/10000 train_time:576613ms step_avg:77.49ms +[2025-09-03 04:31:15] [Rank 0] step:7441/10000 train_time:576613ms step_avg:77.49ms +[2025-09-03 04:31:16] [Rank 0] step:7461/10000 train_time:578250ms step_avg:77.50ms +[2025-09-03 04:31:16] [Rank 0] step:7461/10000 train_time:578250ms step_avg:77.50ms +[2025-09-03 04:31:18] [Rank 0] step:7481/10000 train_time:579895ms step_avg:77.52ms +[2025-09-03 04:31:18] [Rank 0] step:7481/10000 train_time:579895ms step_avg:77.52ms +[2025-09-03 04:31:20] [Rank 0] step:7501/10000 train_time:581575ms step_avg:77.53ms +[2025-09-03 04:31:20] [Rank 0] step:7501/10000 train_time:581575ms step_avg:77.53ms +[2025-09-03 04:31:21] [Rank 0] step:7521/10000 train_time:583217ms step_avg:77.55ms +[2025-09-03 04:31:21] [Rank 0] step:7521/10000 train_time:583217ms step_avg:77.55ms +[2025-09-03 04:31:23] [Rank 0] step:7541/10000 train_time:584872ms step_avg:77.56ms +[2025-09-03 04:31:23] [Rank 0] step:7541/10000 train_time:584872ms step_avg:77.56ms +[2025-09-03 04:31:25] [Rank 0] step:7561/10000 train_time:586506ms step_avg:77.57ms +[2025-09-03 04:31:25] [Rank 0] step:7561/10000 train_time:586506ms step_avg:77.57ms +[2025-09-03 04:31:26] [Rank 0] step:7581/10000 train_time:588156ms step_avg:77.58ms +[2025-09-03 04:31:26] [Rank 0] step:7581/10000 train_time:588156ms step_avg:77.58ms +[2025-09-03 04:31:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:31:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:31:40] [Rank 0] PRINT: step:7600/10000 val_loss:3.7209 svd_entropy: attn_qk:H=0.7783,top10E=0.25,eRank=180.1,q75/q25=54.88 attn_vo:H=0.8503,top10E=0.13,eRank=309.0,q75/q25=37.02 mlp_w1:H=0.9134,top10E=0.13,eRank=434.3,q75/q25=4.27 mlp_w2:H=0.9710,top10E=0.04,eRank=633.7,q75/q25=2.86 vo_prod:H=0.7556,top10E=0.22,eRank=161.3,q75/q25=1274.07 train_time:589891ms step_avg:77.62ms +[2025-09-03 04:31:40] [Rank 0] PRINT: step:7600/10000 val_loss:3.7209 svd_entropy: attn_qk:H=0.7783,top10E=0.25,eRank=180.1,q75/q25=54.88 attn_vo:H=0.8503,top10E=0.13,eRank=309.0,q75/q25=37.02 mlp_w1:H=0.9134,top10E=0.13,eRank=434.3,q75/q25=4.27 mlp_w2:H=0.9710,top10E=0.04,eRank=633.7,q75/q25=2.86 vo_prod:H=0.7556,top10E=0.22,eRank=161.3,q75/q25=1274.07 train_time:589891ms step_avg:77.62ms +[2025-09-03 04:31:40] [Rank 0] step:7601/10000 train_time:589907ms step_avg:77.61ms +[2025-09-03 04:31:40] [Rank 0] step:7601/10000 train_time:589907ms step_avg:77.61ms +[2025-09-03 04:31:41] [Rank 0] step:7621/10000 train_time:591469ms step_avg:77.61ms +[2025-09-03 04:31:41] [Rank 0] step:7621/10000 train_time:591469ms step_avg:77.61ms +[2025-09-03 04:31:43] [Rank 0] step:7641/10000 train_time:593105ms step_avg:77.62ms +[2025-09-03 04:31:43] [Rank 0] step:7641/10000 train_time:593105ms step_avg:77.62ms +[2025-09-03 04:31:45] [Rank 0] step:7661/10000 train_time:594743ms step_avg:77.63ms +[2025-09-03 04:31:45] [Rank 0] step:7661/10000 train_time:594743ms step_avg:77.63ms +[2025-09-03 04:31:46] [Rank 0] step:7681/10000 train_time:596375ms step_avg:77.64ms +[2025-09-03 04:31:46] [Rank 0] step:7681/10000 train_time:596375ms step_avg:77.64ms +[2025-09-03 04:31:48] [Rank 0] step:7701/10000 train_time:598012ms step_avg:77.65ms +[2025-09-03 04:31:48] [Rank 0] step:7701/10000 train_time:598012ms step_avg:77.65ms +[2025-09-03 04:31:50] [Rank 0] step:7721/10000 train_time:599664ms step_avg:77.67ms +[2025-09-03 04:31:50] [Rank 0] step:7721/10000 train_time:599664ms step_avg:77.67ms +[2025-09-03 04:31:51] [Rank 0] step:7741/10000 train_time:601306ms step_avg:77.68ms +[2025-09-03 04:31:51] [Rank 0] step:7741/10000 train_time:601306ms step_avg:77.68ms +[2025-09-03 04:31:53] [Rank 0] step:7761/10000 train_time:602954ms step_avg:77.69ms +[2025-09-03 04:31:53] [Rank 0] step:7761/10000 train_time:602954ms step_avg:77.69ms +[2025-09-03 04:31:54] [Rank 0] step:7781/10000 train_time:604596ms step_avg:77.70ms +[2025-09-03 04:31:54] [Rank 0] step:7781/10000 train_time:604596ms step_avg:77.70ms +[2025-09-03 04:31:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:31:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:32:08] [Rank 0] PRINT: step:7800/10000 val_loss:3.7065 svd_entropy: attn_qk:H=0.7788,top10E=0.25,eRank=180.6,q75/q25=54.51 attn_vo:H=0.8509,top10E=0.13,eRank=309.8,q75/q25=36.77 mlp_w1:H=0.9139,top10E=0.13,eRank=435.7,q75/q25=4.24 mlp_w2:H=0.9710,top10E=0.04,eRank=633.6,q75/q25=2.86 vo_prod:H=0.7565,top10E=0.22,eRank=162.3,q75/q25=1261.25 train_time:606331ms step_avg:77.73ms +[2025-09-03 04:32:08] [Rank 0] PRINT: step:7800/10000 val_loss:3.7065 svd_entropy: attn_qk:H=0.7788,top10E=0.25,eRank=180.6,q75/q25=54.51 attn_vo:H=0.8509,top10E=0.13,eRank=309.8,q75/q25=36.77 mlp_w1:H=0.9139,top10E=0.13,eRank=435.7,q75/q25=4.24 mlp_w2:H=0.9710,top10E=0.04,eRank=633.6,q75/q25=2.86 vo_prod:H=0.7565,top10E=0.22,eRank=162.3,q75/q25=1261.25 train_time:606331ms step_avg:77.73ms +[2025-09-03 04:32:08] [Rank 0] step:7801/10000 train_time:606347ms step_avg:77.73ms +[2025-09-03 04:32:08] [Rank 0] step:7801/10000 train_time:606347ms step_avg:77.73ms +[2025-09-03 04:32:10] [Rank 0] step:7821/10000 train_time:607905ms step_avg:77.73ms +[2025-09-03 04:32:10] [Rank 0] step:7821/10000 train_time:607905ms step_avg:77.73ms +[2025-09-03 04:32:11] [Rank 0] step:7841/10000 train_time:609540ms step_avg:77.74ms +[2025-09-03 04:32:11] [Rank 0] step:7841/10000 train_time:609540ms step_avg:77.74ms +[2025-09-03 04:32:13] [Rank 0] step:7861/10000 train_time:611185ms step_avg:77.75ms +[2025-09-03 04:32:13] [Rank 0] step:7861/10000 train_time:611185ms step_avg:77.75ms +[2025-09-03 04:32:15] [Rank 0] step:7881/10000 train_time:612828ms step_avg:77.76ms +[2025-09-03 04:32:15] [Rank 0] step:7881/10000 train_time:612828ms step_avg:77.76ms +[2025-09-03 04:32:16] [Rank 0] step:7901/10000 train_time:614466ms step_avg:77.77ms +[2025-09-03 04:32:16] [Rank 0] step:7901/10000 train_time:614466ms step_avg:77.77ms +[2025-09-03 04:32:18] [Rank 0] step:7921/10000 train_time:616108ms step_avg:77.78ms +[2025-09-03 04:32:18] [Rank 0] step:7921/10000 train_time:616108ms step_avg:77.78ms +[2025-09-03 04:32:19] [Rank 0] step:7941/10000 train_time:617755ms step_avg:77.79ms +[2025-09-03 04:32:19] [Rank 0] step:7941/10000 train_time:617755ms step_avg:77.79ms +[2025-09-03 04:32:21] [Rank 0] step:7961/10000 train_time:619399ms step_avg:77.80ms +[2025-09-03 04:32:21] [Rank 0] step:7961/10000 train_time:619399ms step_avg:77.80ms +[2025-09-03 04:32:23] [Rank 0] step:7981/10000 train_time:621034ms step_avg:77.81ms +[2025-09-03 04:32:23] [Rank 0] step:7981/10000 train_time:621034ms step_avg:77.81ms +[2025-09-03 04:32:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:32:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:32:36] [Rank 0] PRINT: step:8000/10000 val_loss:3.6901 svd_entropy: attn_qk:H=0.7793,top10E=0.25,eRank=181.1,q75/q25=54.24 attn_vo:H=0.8513,top10E=0.13,eRank=310.6,q75/q25=36.47 mlp_w1:H=0.9143,top10E=0.13,eRank=436.8,q75/q25=4.23 mlp_w2:H=0.9710,top10E=0.04,eRank=633.6,q75/q25=2.86 vo_prod:H=0.7574,top10E=0.22,eRank=163.1,q75/q25=1253.49 train_time:622762ms step_avg:77.85ms +[2025-09-03 04:32:36] [Rank 0] PRINT: step:8000/10000 val_loss:3.6901 svd_entropy: attn_qk:H=0.7793,top10E=0.25,eRank=181.1,q75/q25=54.24 attn_vo:H=0.8513,top10E=0.13,eRank=310.6,q75/q25=36.47 mlp_w1:H=0.9143,top10E=0.13,eRank=436.8,q75/q25=4.23 mlp_w2:H=0.9710,top10E=0.04,eRank=633.6,q75/q25=2.86 vo_prod:H=0.7574,top10E=0.22,eRank=163.1,q75/q25=1253.49 train_time:622762ms step_avg:77.85ms +[2025-09-03 04:32:36] [Rank 0] step:8001/10000 train_time:622777ms step_avg:77.84ms +[2025-09-03 04:32:36] [Rank 0] step:8001/10000 train_time:622777ms step_avg:77.84ms +[2025-09-03 04:32:38] [Rank 0] step:8021/10000 train_time:624341ms step_avg:77.84ms +[2025-09-03 04:32:38] [Rank 0] step:8021/10000 train_time:624341ms step_avg:77.84ms +[2025-09-03 04:32:40] [Rank 0] step:8041/10000 train_time:625991ms step_avg:77.85ms +[2025-09-03 04:32:40] [Rank 0] step:8041/10000 train_time:625991ms step_avg:77.85ms +[2025-09-03 04:32:41] [Rank 0] step:8061/10000 train_time:627634ms step_avg:77.86ms +[2025-09-03 04:32:41] [Rank 0] step:8061/10000 train_time:627634ms step_avg:77.86ms +[2025-09-03 04:32:43] [Rank 0] step:8081/10000 train_time:629261ms step_avg:77.87ms +[2025-09-03 04:32:43] [Rank 0] step:8081/10000 train_time:629261ms step_avg:77.87ms +[2025-09-03 04:32:44] [Rank 0] step:8101/10000 train_time:630909ms step_avg:77.88ms +[2025-09-03 04:32:44] [Rank 0] step:8101/10000 train_time:630909ms step_avg:77.88ms +[2025-09-03 04:32:46] [Rank 0] step:8121/10000 train_time:632548ms step_avg:77.89ms +[2025-09-03 04:32:46] [Rank 0] step:8121/10000 train_time:632548ms step_avg:77.89ms +[2025-09-03 04:32:48] [Rank 0] step:8141/10000 train_time:634774ms step_avg:77.97ms +[2025-09-03 04:32:48] [Rank 0] step:8141/10000 train_time:634774ms step_avg:77.97ms +[2025-09-03 04:32:50] [Rank 0] step:8161/10000 train_time:636431ms step_avg:77.98ms +[2025-09-03 04:32:50] [Rank 0] step:8161/10000 train_time:636431ms step_avg:77.98ms +[2025-09-03 04:32:52] [Rank 0] step:8181/10000 train_time:638102ms step_avg:78.00ms +[2025-09-03 04:32:52] [Rank 0] step:8181/10000 train_time:638102ms step_avg:78.00ms +[2025-09-03 04:32:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:32:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:33:05] [Rank 0] PRINT: step:8200/10000 val_loss:3.6804 svd_entropy: attn_qk:H=0.7796,top10E=0.25,eRank=181.6,q75/q25=53.98 attn_vo:H=0.8517,top10E=0.13,eRank=311.2,q75/q25=36.20 mlp_w1:H=0.9147,top10E=0.13,eRank=437.9,q75/q25=4.21 mlp_w2:H=0.9710,top10E=0.04,eRank=633.6,q75/q25=2.87 vo_prod:H=0.7582,top10E=0.22,eRank=163.9,q75/q25=1226.83 train_time:639878ms step_avg:78.03ms +[2025-09-03 04:33:05] [Rank 0] PRINT: step:8200/10000 val_loss:3.6804 svd_entropy: attn_qk:H=0.7796,top10E=0.25,eRank=181.6,q75/q25=53.98 attn_vo:H=0.8517,top10E=0.13,eRank=311.2,q75/q25=36.20 mlp_w1:H=0.9147,top10E=0.13,eRank=437.9,q75/q25=4.21 mlp_w2:H=0.9710,top10E=0.04,eRank=633.6,q75/q25=2.87 vo_prod:H=0.7582,top10E=0.22,eRank=163.9,q75/q25=1226.83 train_time:639878ms step_avg:78.03ms +[2025-09-03 04:33:05] [Rank 0] step:8201/10000 train_time:639893ms step_avg:78.03ms +[2025-09-03 04:33:05] [Rank 0] step:8201/10000 train_time:639893ms step_avg:78.03ms +[2025-09-03 04:33:07] [Rank 0] step:8221/10000 train_time:641493ms step_avg:78.03ms +[2025-09-03 04:33:07] [Rank 0] step:8221/10000 train_time:641493ms step_avg:78.03ms +[2025-09-03 04:33:08] [Rank 0] step:8241/10000 train_time:643170ms step_avg:78.05ms +[2025-09-03 04:33:08] [Rank 0] step:8241/10000 train_time:643170ms step_avg:78.05ms +[2025-09-03 04:33:10] [Rank 0] step:8261/10000 train_time:644842ms step_avg:78.06ms +[2025-09-03 04:33:10] [Rank 0] step:8261/10000 train_time:644842ms step_avg:78.06ms +[2025-09-03 04:33:12] [Rank 0] step:8281/10000 train_time:646511ms step_avg:78.07ms +[2025-09-03 04:33:12] [Rank 0] step:8281/10000 train_time:646511ms step_avg:78.07ms +[2025-09-03 04:33:13] [Rank 0] step:8301/10000 train_time:648179ms step_avg:78.08ms +[2025-09-03 04:33:13] [Rank 0] step:8301/10000 train_time:648179ms step_avg:78.08ms +[2025-09-03 04:33:15] [Rank 0] step:8321/10000 train_time:649839ms step_avg:78.10ms +[2025-09-03 04:33:15] [Rank 0] step:8321/10000 train_time:649839ms step_avg:78.10ms +[2025-09-03 04:33:17] [Rank 0] step:8341/10000 train_time:651511ms step_avg:78.11ms +[2025-09-03 04:33:17] [Rank 0] step:8341/10000 train_time:651511ms step_avg:78.11ms +[2025-09-03 04:33:18] [Rank 0] step:8361/10000 train_time:653182ms step_avg:78.12ms +[2025-09-03 04:33:18] [Rank 0] step:8361/10000 train_time:653182ms step_avg:78.12ms +[2025-09-03 04:33:20] [Rank 0] step:8381/10000 train_time:654849ms step_avg:78.13ms +[2025-09-03 04:33:20] [Rank 0] step:8381/10000 train_time:654849ms step_avg:78.13ms +[2025-09-03 04:33:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:33:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:33:33] [Rank 0] PRINT: step:8400/10000 val_loss:3.6689 svd_entropy: attn_qk:H=0.7799,top10E=0.25,eRank=181.9,q75/q25=53.80 attn_vo:H=0.8521,top10E=0.13,eRank=311.8,q75/q25=36.06 mlp_w1:H=0.9150,top10E=0.13,eRank=438.9,q75/q25=4.19 mlp_w2:H=0.9710,top10E=0.04,eRank=633.6,q75/q25=2.87 vo_prod:H=0.7589,top10E=0.22,eRank=164.6,q75/q25=1234.77 train_time:656600ms step_avg:78.17ms +[2025-09-03 04:33:33] [Rank 0] PRINT: step:8400/10000 val_loss:3.6689 svd_entropy: attn_qk:H=0.7799,top10E=0.25,eRank=181.9,q75/q25=53.80 attn_vo:H=0.8521,top10E=0.13,eRank=311.8,q75/q25=36.06 mlp_w1:H=0.9150,top10E=0.13,eRank=438.9,q75/q25=4.19 mlp_w2:H=0.9710,top10E=0.04,eRank=633.6,q75/q25=2.87 vo_prod:H=0.7589,top10E=0.22,eRank=164.6,q75/q25=1234.77 train_time:656600ms step_avg:78.17ms +[2025-09-03 04:33:33] [Rank 0] step:8401/10000 train_time:656616ms step_avg:78.16ms +[2025-09-03 04:33:33] [Rank 0] step:8401/10000 train_time:656616ms step_avg:78.16ms +[2025-09-03 04:33:35] [Rank 0] step:8421/10000 train_time:658207ms step_avg:78.16ms +[2025-09-03 04:33:35] [Rank 0] step:8421/10000 train_time:658207ms step_avg:78.16ms +[2025-09-03 04:33:37] [Rank 0] step:8441/10000 train_time:659870ms step_avg:78.17ms +[2025-09-03 04:33:37] [Rank 0] step:8441/10000 train_time:659870ms step_avg:78.17ms +[2025-09-03 04:33:39] [Rank 0] step:8461/10000 train_time:661534ms step_avg:78.19ms +[2025-09-03 04:33:39] [Rank 0] step:8461/10000 train_time:661534ms step_avg:78.19ms +[2025-09-03 04:33:40] [Rank 0] step:8481/10000 train_time:663205ms step_avg:78.20ms +[2025-09-03 04:33:40] [Rank 0] step:8481/10000 train_time:663205ms step_avg:78.20ms +[2025-09-03 04:33:42] [Rank 0] step:8501/10000 train_time:664896ms step_avg:78.21ms +[2025-09-03 04:33:42] [Rank 0] step:8501/10000 train_time:664896ms step_avg:78.21ms +[2025-09-03 04:33:44] [Rank 0] step:8521/10000 train_time:666571ms step_avg:78.23ms +[2025-09-03 04:33:44] [Rank 0] step:8521/10000 train_time:666571ms step_avg:78.23ms +[2025-09-03 04:33:45] [Rank 0] step:8541/10000 train_time:668256ms step_avg:78.24ms +[2025-09-03 04:33:45] [Rank 0] step:8541/10000 train_time:668256ms step_avg:78.24ms +[2025-09-03 04:33:47] [Rank 0] step:8561/10000 train_time:669927ms step_avg:78.25ms +[2025-09-03 04:33:47] [Rank 0] step:8561/10000 train_time:669927ms step_avg:78.25ms +[2025-09-03 04:33:49] [Rank 0] step:8581/10000 train_time:671602ms step_avg:78.27ms +[2025-09-03 04:33:49] [Rank 0] step:8581/10000 train_time:671602ms step_avg:78.27ms +[2025-09-03 04:33:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:33:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:34:02] [Rank 0] PRINT: step:8600/10000 val_loss:3.6587 svd_entropy: attn_qk:H=0.7803,top10E=0.25,eRank=182.2,q75/q25=53.73 attn_vo:H=0.8524,top10E=0.13,eRank=312.4,q75/q25=35.80 mlp_w1:H=0.9153,top10E=0.13,eRank=439.7,q75/q25=4.18 mlp_w2:H=0.9710,top10E=0.04,eRank=633.6,q75/q25=2.86 vo_prod:H=0.7595,top10E=0.22,eRank=165.3,q75/q25=1222.15 train_time:673352ms step_avg:78.30ms +[2025-09-03 04:34:02] [Rank 0] PRINT: step:8600/10000 val_loss:3.6587 svd_entropy: attn_qk:H=0.7803,top10E=0.25,eRank=182.2,q75/q25=53.73 attn_vo:H=0.8524,top10E=0.13,eRank=312.4,q75/q25=35.80 mlp_w1:H=0.9153,top10E=0.13,eRank=439.7,q75/q25=4.18 mlp_w2:H=0.9710,top10E=0.04,eRank=633.6,q75/q25=2.86 vo_prod:H=0.7595,top10E=0.22,eRank=165.3,q75/q25=1222.15 train_time:673352ms step_avg:78.30ms +[2025-09-03 04:34:02] [Rank 0] step:8601/10000 train_time:673367ms step_avg:78.29ms +[2025-09-03 04:34:02] [Rank 0] step:8601/10000 train_time:673367ms step_avg:78.29ms +[2025-09-03 04:34:04] [Rank 0] step:8621/10000 train_time:674952ms step_avg:78.29ms +[2025-09-03 04:34:04] [Rank 0] step:8621/10000 train_time:674952ms step_avg:78.29ms +[2025-09-03 04:34:05] [Rank 0] step:8641/10000 train_time:676620ms step_avg:78.30ms +[2025-09-03 04:34:05] [Rank 0] step:8641/10000 train_time:676620ms step_avg:78.30ms +[2025-09-03 04:34:07] [Rank 0] step:8661/10000 train_time:678291ms step_avg:78.32ms +[2025-09-03 04:34:07] [Rank 0] step:8661/10000 train_time:678291ms step_avg:78.32ms +[2025-09-03 04:34:09] [Rank 0] step:8681/10000 train_time:679963ms step_avg:78.33ms +[2025-09-03 04:34:09] [Rank 0] step:8681/10000 train_time:679963ms step_avg:78.33ms +[2025-09-03 04:34:10] [Rank 0] step:8701/10000 train_time:681628ms step_avg:78.34ms +[2025-09-03 04:34:10] [Rank 0] step:8701/10000 train_time:681628ms step_avg:78.34ms +[2025-09-03 04:34:12] [Rank 0] step:8721/10000 train_time:683302ms step_avg:78.35ms +[2025-09-03 04:34:12] [Rank 0] step:8721/10000 train_time:683302ms step_avg:78.35ms +[2025-09-03 04:34:14] [Rank 0] step:8741/10000 train_time:684960ms step_avg:78.36ms +[2025-09-03 04:34:14] [Rank 0] step:8741/10000 train_time:684960ms step_avg:78.36ms +[2025-09-03 04:34:15] [Rank 0] step:8761/10000 train_time:686629ms step_avg:78.37ms +[2025-09-03 04:34:15] [Rank 0] step:8761/10000 train_time:686629ms step_avg:78.37ms +[2025-09-03 04:34:17] [Rank 0] step:8781/10000 train_time:688302ms step_avg:78.39ms +[2025-09-03 04:34:17] [Rank 0] step:8781/10000 train_time:688302ms step_avg:78.39ms +[2025-09-03 04:34:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:34:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:34:31] [Rank 0] PRINT: step:8800/10000 val_loss:3.6492 svd_entropy: attn_qk:H=0.7805,top10E=0.25,eRank=182.5,q75/q25=53.50 attn_vo:H=0.8528,top10E=0.13,eRank=312.9,q75/q25=35.64 mlp_w1:H=0.9156,top10E=0.13,eRank=440.5,q75/q25=4.18 mlp_w2:H=0.9710,top10E=0.04,eRank=633.6,q75/q25=2.87 vo_prod:H=0.7602,top10E=0.22,eRank=165.9,q75/q25=1210.52 train_time:690058ms step_avg:78.42ms +[2025-09-03 04:34:31] [Rank 0] PRINT: step:8800/10000 val_loss:3.6492 svd_entropy: attn_qk:H=0.7805,top10E=0.25,eRank=182.5,q75/q25=53.50 attn_vo:H=0.8528,top10E=0.13,eRank=312.9,q75/q25=35.64 mlp_w1:H=0.9156,top10E=0.13,eRank=440.5,q75/q25=4.18 mlp_w2:H=0.9710,top10E=0.04,eRank=633.6,q75/q25=2.87 vo_prod:H=0.7602,top10E=0.22,eRank=165.9,q75/q25=1210.52 train_time:690058ms step_avg:78.42ms +[2025-09-03 04:34:31] [Rank 0] step:8801/10000 train_time:690074ms step_avg:78.41ms +[2025-09-03 04:34:31] [Rank 0] step:8801/10000 train_time:690074ms step_avg:78.41ms +[2025-09-03 04:34:32] [Rank 0] step:8821/10000 train_time:691663ms step_avg:78.41ms +[2025-09-03 04:34:32] [Rank 0] step:8821/10000 train_time:691663ms step_avg:78.41ms +[2025-09-03 04:34:34] [Rank 0] step:8841/10000 train_time:693349ms step_avg:78.42ms +[2025-09-03 04:34:34] [Rank 0] step:8841/10000 train_time:693349ms step_avg:78.42ms +[2025-09-03 04:34:36] [Rank 0] step:8861/10000 train_time:695014ms step_avg:78.44ms +[2025-09-03 04:34:36] [Rank 0] step:8861/10000 train_time:695014ms step_avg:78.44ms +[2025-09-03 04:34:37] [Rank 0] step:8881/10000 train_time:696682ms step_avg:78.45ms +[2025-09-03 04:34:37] [Rank 0] step:8881/10000 train_time:696682ms step_avg:78.45ms +[2025-09-03 04:34:39] [Rank 0] step:8901/10000 train_time:698354ms step_avg:78.46ms +[2025-09-03 04:34:39] [Rank 0] step:8901/10000 train_time:698354ms step_avg:78.46ms +[2025-09-03 04:34:41] [Rank 0] step:8921/10000 train_time:700038ms step_avg:78.47ms +[2025-09-03 04:34:41] [Rank 0] step:8921/10000 train_time:700038ms step_avg:78.47ms +[2025-09-03 04:34:42] [Rank 0] step:8941/10000 train_time:701718ms step_avg:78.48ms +[2025-09-03 04:34:42] [Rank 0] step:8941/10000 train_time:701718ms step_avg:78.48ms +[2025-09-03 04:34:44] [Rank 0] step:8961/10000 train_time:703385ms step_avg:78.49ms +[2025-09-03 04:34:44] [Rank 0] step:8961/10000 train_time:703385ms step_avg:78.49ms +[2025-09-03 04:34:46] [Rank 0] step:8981/10000 train_time:705051ms step_avg:78.50ms +[2025-09-03 04:34:46] [Rank 0] step:8981/10000 train_time:705051ms step_avg:78.50ms +[2025-09-03 04:34:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:34:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:34:59] [Rank 0] PRINT: step:9000/10000 val_loss:3.6400 svd_entropy: attn_qk:H=0.7808,top10E=0.25,eRank=182.8,q75/q25=53.31 attn_vo:H=0.8530,top10E=0.13,eRank=313.3,q75/q25=35.53 mlp_w1:H=0.9158,top10E=0.13,eRank=441.1,q75/q25=4.17 mlp_w2:H=0.9710,top10E=0.04,eRank=633.5,q75/q25=2.86 vo_prod:H=0.7607,top10E=0.22,eRank=166.5,q75/q25=1213.70 train_time:706803ms step_avg:78.53ms +[2025-09-03 04:34:59] [Rank 0] PRINT: step:9000/10000 val_loss:3.6400 svd_entropy: attn_qk:H=0.7808,top10E=0.25,eRank=182.8,q75/q25=53.31 attn_vo:H=0.8530,top10E=0.13,eRank=313.3,q75/q25=35.53 mlp_w1:H=0.9158,top10E=0.13,eRank=441.1,q75/q25=4.17 mlp_w2:H=0.9710,top10E=0.04,eRank=633.5,q75/q25=2.86 vo_prod:H=0.7607,top10E=0.22,eRank=166.5,q75/q25=1213.70 train_time:706803ms step_avg:78.53ms +[2025-09-03 04:34:59] [Rank 0] step:9001/10000 train_time:706818ms step_avg:78.53ms +[2025-09-03 04:34:59] [Rank 0] step:9001/10000 train_time:706818ms step_avg:78.53ms +[2025-09-03 04:35:01] [Rank 0] step:9021/10000 train_time:708421ms step_avg:78.53ms +[2025-09-03 04:35:01] [Rank 0] step:9021/10000 train_time:708421ms step_avg:78.53ms +[2025-09-03 04:35:03] [Rank 0] step:9041/10000 train_time:710094ms step_avg:78.54ms +[2025-09-03 04:35:03] [Rank 0] step:9041/10000 train_time:710094ms step_avg:78.54ms +[2025-09-03 04:35:04] [Rank 0] step:9061/10000 train_time:711770ms step_avg:78.55ms +[2025-09-03 04:35:04] [Rank 0] step:9061/10000 train_time:711770ms step_avg:78.55ms +[2025-09-03 04:35:06] [Rank 0] step:9081/10000 train_time:713447ms step_avg:78.56ms +[2025-09-03 04:35:06] [Rank 0] step:9081/10000 train_time:713447ms step_avg:78.56ms +[2025-09-03 04:35:08] [Rank 0] step:9101/10000 train_time:715137ms step_avg:78.58ms +[2025-09-03 04:35:08] [Rank 0] step:9101/10000 train_time:715137ms step_avg:78.58ms +[2025-09-03 04:35:09] [Rank 0] step:9121/10000 train_time:716811ms step_avg:78.59ms +[2025-09-03 04:35:09] [Rank 0] step:9121/10000 train_time:716811ms step_avg:78.59ms +[2025-09-03 04:35:11] [Rank 0] step:9141/10000 train_time:718471ms step_avg:78.60ms +[2025-09-03 04:35:11] [Rank 0] step:9141/10000 train_time:718471ms step_avg:78.60ms +[2025-09-03 04:35:13] [Rank 0] step:9161/10000 train_time:720137ms step_avg:78.61ms +[2025-09-03 04:35:13] [Rank 0] step:9161/10000 train_time:720137ms step_avg:78.61ms +[2025-09-03 04:35:14] [Rank 0] step:9181/10000 train_time:721842ms step_avg:78.62ms +[2025-09-03 04:35:14] [Rank 0] step:9181/10000 train_time:721842ms step_avg:78.62ms +[2025-09-03 04:35:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:35:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:35:28] [Rank 0] PRINT: step:9200/10000 val_loss:3.6323 svd_entropy: attn_qk:H=0.7810,top10E=0.25,eRank=183.0,q75/q25=53.10 attn_vo:H=0.8533,top10E=0.13,eRank=313.7,q75/q25=35.31 mlp_w1:H=0.9160,top10E=0.13,eRank=441.7,q75/q25=4.15 mlp_w2:H=0.9710,top10E=0.04,eRank=633.5,q75/q25=2.86 vo_prod:H=0.7612,top10E=0.22,eRank=167.0,q75/q25=1202.33 train_time:723594ms step_avg:78.65ms +[2025-09-03 04:35:28] [Rank 0] PRINT: step:9200/10000 val_loss:3.6323 svd_entropy: attn_qk:H=0.7810,top10E=0.25,eRank=183.0,q75/q25=53.10 attn_vo:H=0.8533,top10E=0.13,eRank=313.7,q75/q25=35.31 mlp_w1:H=0.9160,top10E=0.13,eRank=441.7,q75/q25=4.15 mlp_w2:H=0.9710,top10E=0.04,eRank=633.5,q75/q25=2.86 vo_prod:H=0.7612,top10E=0.22,eRank=167.0,q75/q25=1202.33 train_time:723594ms step_avg:78.65ms +[2025-09-03 04:35:28] [Rank 0] step:9201/10000 train_time:723609ms step_avg:78.64ms +[2025-09-03 04:35:28] [Rank 0] step:9201/10000 train_time:723609ms step_avg:78.64ms +[2025-09-03 04:35:30] [Rank 0] step:9221/10000 train_time:725206ms step_avg:78.65ms +[2025-09-03 04:35:30] [Rank 0] step:9221/10000 train_time:725206ms step_avg:78.65ms +[2025-09-03 04:35:31] [Rank 0] step:9241/10000 train_time:726926ms step_avg:78.66ms +[2025-09-03 04:35:31] [Rank 0] step:9241/10000 train_time:726926ms step_avg:78.66ms +[2025-09-03 04:35:33] [Rank 0] step:9261/10000 train_time:728651ms step_avg:78.68ms +[2025-09-03 04:35:33] [Rank 0] step:9261/10000 train_time:728651ms step_avg:78.68ms +[2025-09-03 04:35:35] [Rank 0] step:9281/10000 train_time:730310ms step_avg:78.69ms +[2025-09-03 04:35:35] [Rank 0] step:9281/10000 train_time:730310ms step_avg:78.69ms +[2025-09-03 04:35:36] [Rank 0] step:9301/10000 train_time:731981ms step_avg:78.70ms +[2025-09-03 04:35:36] [Rank 0] step:9301/10000 train_time:731981ms step_avg:78.70ms +[2025-09-03 04:35:38] [Rank 0] step:9321/10000 train_time:733658ms step_avg:78.71ms +[2025-09-03 04:35:38] [Rank 0] step:9321/10000 train_time:733658ms step_avg:78.71ms +[2025-09-03 04:35:40] [Rank 0] step:9341/10000 train_time:735330ms step_avg:78.72ms +[2025-09-03 04:35:40] [Rank 0] step:9341/10000 train_time:735330ms step_avg:78.72ms +[2025-09-03 04:35:41] [Rank 0] step:9361/10000 train_time:737009ms step_avg:78.73ms +[2025-09-03 04:35:41] [Rank 0] step:9361/10000 train_time:737009ms step_avg:78.73ms +[2025-09-03 04:35:43] [Rank 0] step:9381/10000 train_time:738695ms step_avg:78.74ms +[2025-09-03 04:35:43] [Rank 0] step:9381/10000 train_time:738695ms step_avg:78.74ms +[2025-09-03 04:35:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:35:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:35:56] [Rank 0] PRINT: step:9400/10000 val_loss:3.6241 svd_entropy: attn_qk:H=0.7811,top10E=0.25,eRank=183.2,q75/q25=52.82 attn_vo:H=0.8535,top10E=0.13,eRank=314.0,q75/q25=35.13 mlp_w1:H=0.9162,top10E=0.13,eRank=442.2,q75/q25=4.14 mlp_w2:H=0.9710,top10E=0.04,eRank=633.5,q75/q25=2.87 vo_prod:H=0.7616,top10E=0.22,eRank=167.4,q75/q25=1194.72 train_time:740459ms step_avg:78.77ms +[2025-09-03 04:35:56] [Rank 0] PRINT: step:9400/10000 val_loss:3.6241 svd_entropy: attn_qk:H=0.7811,top10E=0.25,eRank=183.2,q75/q25=52.82 attn_vo:H=0.8535,top10E=0.13,eRank=314.0,q75/q25=35.13 mlp_w1:H=0.9162,top10E=0.13,eRank=442.2,q75/q25=4.14 mlp_w2:H=0.9710,top10E=0.04,eRank=633.5,q75/q25=2.87 vo_prod:H=0.7616,top10E=0.22,eRank=167.4,q75/q25=1194.72 train_time:740459ms step_avg:78.77ms +[2025-09-03 04:35:57] [Rank 0] step:9401/10000 train_time:740475ms step_avg:78.77ms +[2025-09-03 04:35:57] [Rank 0] step:9401/10000 train_time:740475ms step_avg:78.77ms +[2025-09-03 04:35:58] [Rank 0] step:9421/10000 train_time:742062ms step_avg:78.77ms +[2025-09-03 04:35:58] [Rank 0] step:9421/10000 train_time:742062ms step_avg:78.77ms +[2025-09-03 04:36:00] [Rank 0] step:9441/10000 train_time:743737ms step_avg:78.78ms +[2025-09-03 04:36:00] [Rank 0] step:9441/10000 train_time:743737ms step_avg:78.78ms +[2025-09-03 04:36:02] [Rank 0] step:9461/10000 train_time:745414ms step_avg:78.79ms +[2025-09-03 04:36:02] [Rank 0] step:9461/10000 train_time:745414ms step_avg:78.79ms +[2025-09-03 04:36:03] [Rank 0] step:9481/10000 train_time:747091ms step_avg:78.80ms +[2025-09-03 04:36:03] [Rank 0] step:9481/10000 train_time:747091ms step_avg:78.80ms +[2025-09-03 04:36:05] [Rank 0] step:9501/10000 train_time:748778ms step_avg:78.81ms +[2025-09-03 04:36:05] [Rank 0] step:9501/10000 train_time:748778ms step_avg:78.81ms +[2025-09-03 04:36:07] [Rank 0] step:9521/10000 train_time:750442ms step_avg:78.82ms +[2025-09-03 04:36:07] [Rank 0] step:9521/10000 train_time:750442ms step_avg:78.82ms +[2025-09-03 04:36:08] [Rank 0] step:9541/10000 train_time:752115ms step_avg:78.83ms +[2025-09-03 04:36:08] [Rank 0] step:9541/10000 train_time:752115ms step_avg:78.83ms +[2025-09-03 04:36:10] [Rank 0] step:9561/10000 train_time:753781ms step_avg:78.84ms +[2025-09-03 04:36:10] [Rank 0] step:9561/10000 train_time:753781ms step_avg:78.84ms +[2025-09-03 04:36:12] [Rank 0] step:9581/10000 train_time:755454ms step_avg:78.85ms +[2025-09-03 04:36:12] [Rank 0] step:9581/10000 train_time:755454ms step_avg:78.85ms +[2025-09-03 04:36:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:36:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:36:25] [Rank 0] PRINT: step:9600/10000 val_loss:3.6178 svd_entropy: attn_qk:H=0.7813,top10E=0.25,eRank=183.3,q75/q25=52.84 attn_vo:H=0.8537,top10E=0.13,eRank=314.3,q75/q25=35.05 mlp_w1:H=0.9163,top10E=0.13,eRank=442.6,q75/q25=4.14 mlp_w2:H=0.9710,top10E=0.04,eRank=633.5,q75/q25=2.87 vo_prod:H=0.7620,top10E=0.22,eRank=167.8,q75/q25=1191.61 train_time:757225ms step_avg:78.88ms +[2025-09-03 04:36:25] [Rank 0] PRINT: step:9600/10000 val_loss:3.6178 svd_entropy: attn_qk:H=0.7813,top10E=0.25,eRank=183.3,q75/q25=52.84 attn_vo:H=0.8537,top10E=0.13,eRank=314.3,q75/q25=35.05 mlp_w1:H=0.9163,top10E=0.13,eRank=442.6,q75/q25=4.14 mlp_w2:H=0.9710,top10E=0.04,eRank=633.5,q75/q25=2.87 vo_prod:H=0.7620,top10E=0.22,eRank=167.8,q75/q25=1191.61 train_time:757225ms step_avg:78.88ms +[2025-09-03 04:36:25] [Rank 0] step:9601/10000 train_time:757241ms step_avg:78.87ms +[2025-09-03 04:36:25] [Rank 0] step:9601/10000 train_time:757241ms step_avg:78.87ms +[2025-09-03 04:36:27] [Rank 0] step:9621/10000 train_time:758828ms step_avg:78.87ms +[2025-09-03 04:36:27] [Rank 0] step:9621/10000 train_time:758828ms step_avg:78.87ms +[2025-09-03 04:36:29] [Rank 0] step:9641/10000 train_time:760507ms step_avg:78.88ms +[2025-09-03 04:36:29] [Rank 0] step:9641/10000 train_time:760507ms step_avg:78.88ms +[2025-09-03 04:36:30] [Rank 0] step:9661/10000 train_time:762210ms step_avg:78.90ms +[2025-09-03 04:36:30] [Rank 0] step:9661/10000 train_time:762210ms step_avg:78.90ms +[2025-09-03 04:36:32] [Rank 0] step:9681/10000 train_time:763903ms step_avg:78.91ms +[2025-09-03 04:36:32] [Rank 0] step:9681/10000 train_time:763903ms step_avg:78.91ms +[2025-09-03 04:36:34] [Rank 0] step:9701/10000 train_time:765615ms step_avg:78.92ms +[2025-09-03 04:36:34] [Rank 0] step:9701/10000 train_time:765615ms step_avg:78.92ms +[2025-09-03 04:36:35] [Rank 0] step:9721/10000 train_time:767306ms step_avg:78.93ms +[2025-09-03 04:36:35] [Rank 0] step:9721/10000 train_time:767306ms step_avg:78.93ms +[2025-09-03 04:36:37] [Rank 0] step:9741/10000 train_time:769026ms step_avg:78.95ms +[2025-09-03 04:36:37] [Rank 0] step:9741/10000 train_time:769026ms step_avg:78.95ms +[2025-09-03 04:36:39] [Rank 0] step:9761/10000 train_time:770732ms step_avg:78.96ms +[2025-09-03 04:36:39] [Rank 0] step:9761/10000 train_time:770732ms step_avg:78.96ms +[2025-09-03 04:36:40] [Rank 0] step:9781/10000 train_time:772440ms step_avg:78.97ms +[2025-09-03 04:36:40] [Rank 0] step:9781/10000 train_time:772440ms step_avg:78.97ms +[2025-09-03 04:36:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:36:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:36:54] [Rank 0] PRINT: step:9800/10000 val_loss:3.6104 svd_entropy: attn_qk:H=0.7814,top10E=0.25,eRank=183.4,q75/q25=52.82 attn_vo:H=0.8538,top10E=0.13,eRank=314.5,q75/q25=35.03 mlp_w1:H=0.9164,top10E=0.13,eRank=442.9,q75/q25=4.13 mlp_w2:H=0.9710,top10E=0.04,eRank=633.5,q75/q25=2.87 vo_prod:H=0.7623,top10E=0.22,eRank=168.1,q75/q25=1191.00 train_time:774245ms step_avg:79.00ms +[2025-09-03 04:36:54] [Rank 0] PRINT: step:9800/10000 val_loss:3.6104 svd_entropy: attn_qk:H=0.7814,top10E=0.25,eRank=183.4,q75/q25=52.82 attn_vo:H=0.8538,top10E=0.13,eRank=314.5,q75/q25=35.03 mlp_w1:H=0.9164,top10E=0.13,eRank=442.9,q75/q25=4.13 mlp_w2:H=0.9710,top10E=0.04,eRank=633.5,q75/q25=2.87 vo_prod:H=0.7623,top10E=0.22,eRank=168.1,q75/q25=1191.00 train_time:774245ms step_avg:79.00ms +[2025-09-03 04:36:54] [Rank 0] step:9801/10000 train_time:774260ms step_avg:79.00ms +[2025-09-03 04:36:54] [Rank 0] step:9801/10000 train_time:774260ms step_avg:79.00ms +[2025-09-03 04:36:56] [Rank 0] step:9821/10000 train_time:775869ms step_avg:79.00ms +[2025-09-03 04:36:56] [Rank 0] step:9821/10000 train_time:775869ms step_avg:79.00ms +[2025-09-03 04:36:57] [Rank 0] step:9841/10000 train_time:777579ms step_avg:79.01ms +[2025-09-03 04:36:57] [Rank 0] step:9841/10000 train_time:777579ms step_avg:79.01ms +[2025-09-03 04:36:59] [Rank 0] step:9861/10000 train_time:779265ms step_avg:79.02ms +[2025-09-03 04:36:59] [Rank 0] step:9861/10000 train_time:779265ms step_avg:79.02ms +[2025-09-03 04:37:01] [Rank 0] step:9881/10000 train_time:780953ms step_avg:79.04ms +[2025-09-03 04:37:01] [Rank 0] step:9881/10000 train_time:780953ms step_avg:79.04ms +[2025-09-03 04:37:03] [Rank 0] step:9901/10000 train_time:782656ms step_avg:79.05ms +[2025-09-03 04:37:03] [Rank 0] step:9901/10000 train_time:782656ms step_avg:79.05ms +[2025-09-03 04:37:04] [Rank 0] step:9921/10000 train_time:784354ms step_avg:79.06ms +[2025-09-03 04:37:04] [Rank 0] step:9921/10000 train_time:784354ms step_avg:79.06ms +[2025-09-03 04:37:06] [Rank 0] step:9941/10000 train_time:786057ms step_avg:79.07ms +[2025-09-03 04:37:06] [Rank 0] step:9941/10000 train_time:786057ms step_avg:79.07ms +[2025-09-03 04:37:08] [Rank 0] step:9961/10000 train_time:787757ms step_avg:79.08ms +[2025-09-03 04:37:08] [Rank 0] step:9961/10000 train_time:787757ms step_avg:79.08ms +[2025-09-03 04:37:09] [Rank 0] step:9981/10000 train_time:789455ms step_avg:79.10ms +[2025-09-03 04:37:09] [Rank 0] step:9981/10000 train_time:789455ms step_avg:79.10ms +[2025-09-03 04:37:11] [Rank 0] step:10000/10000 train_time:791094ms step_avg:79.11ms +[2025-09-03 04:37:11] [Rank 0] step:10000/10000 train_time:791094ms step_avg:79.11ms +[2025-09-03 04:37:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:37:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:37:23] [Rank 0] PRINT: step:10000/10000 val_loss:3.6051 svd_entropy: attn_qk:H=0.7814,top10E=0.25,eRank=183.5,q75/q25=52.72 attn_vo:H=0.8539,top10E=0.13,eRank=314.7,q75/q25=34.95 mlp_w1:H=0.9165,top10E=0.13,eRank=443.1,q75/q25=4.13 mlp_w2:H=0.9710,top10E=0.04,eRank=633.4,q75/q25=2.87 vo_prod:H=0.7626,top10E=0.22,eRank=168.4,q75/q25=1183.41 train_time:791276ms step_avg:79.13ms +[2025-09-03 04:37:23] [Rank 0] PRINT: step:10000/10000 val_loss:3.6051 svd_entropy: attn_qk:H=0.7814,top10E=0.25,eRank=183.5,q75/q25=52.72 attn_vo:H=0.8539,top10E=0.13,eRank=314.7,q75/q25=34.95 mlp_w1:H=0.9165,top10E=0.13,eRank=443.1,q75/q25=4.13 mlp_w2:H=0.9710,top10E=0.04,eRank=633.4,q75/q25=2.87 vo_prod:H=0.7626,top10E=0.22,eRank=168.4,q75/q25=1183.41 train_time:791276ms step_avg:79.13ms +[2025-09-03 04:37:23] [Rank 0] PRINT: --- Training Finished: Wed Sep 3 04:37:23 2025 --- +[2025-09-03 04:37:23] [Rank 0] PRINT: --- Training Finished: Wed Sep 3 04:37:23 2025 --- +[2025-09-03 04:37:23] [Rank 0] PRINT: Peak memory allocated: 10086 MiB reserved: 15078 MiB +[2025-09-03 04:37:23] [Rank 0] PRINT: Peak memory allocated: 10086 MiB reserved: 15078 MiB diff --git a/logs_svd_qkvo/mode_14_param_qkvo_seed_41/config.json b/logs_svd_qkvo/mode_14_param_qkvo_seed_41/config.json new file mode 100644 index 0000000000000000000000000000000000000000..eb768da3819c891a2e660c734e6a1991f4bc6630 --- /dev/null +++ b/logs_svd_qkvo/mode_14_param_qkvo_seed_41/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 41, + "optimizer_mode": 14, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "73cd31ae-a08f-45a6-97bf-66b4a336b1c9", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_14_param_qkvo_seed_41/training_log_73cd31ae-a08f-45a6-97bf-66b4a336b1c9.txt b/logs_svd_qkvo/mode_14_param_qkvo_seed_41/training_log_73cd31ae-a08f-45a6-97bf-66b4a336b1c9.txt new file mode 100644 index 0000000000000000000000000000000000000000..e4aa9d6fe5f8f8d9db0db44161a63bb0fb117292 --- /dev/null +++ b/logs_svd_qkvo/mode_14_param_qkvo_seed_41/training_log_73cd31ae-a08f-45a6-97bf-66b4a336b1c9.txt @@ -0,0 +1,2984 @@ +[2025-09-02 04:43:59] [Rank 0] PRINT: --- Script Start: Tue Sep 2 04:43:59 2025 --- +[2025-09-02 04:43:59] [Rank 0] PRINT: --- Script Start: Tue Sep 2 04:43:59 2025 --- +[2025-09-02 04:43:59] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=41, optimizer_mode=14, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 04:43:59] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=41, optimizer_mode=14, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 04:43:59] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 04:43:59] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 04:43:59] [Rank 0] PRINT: Using fixed seed: 41 +[2025-09-02 04:43:59] [Rank 0] PRINT: Using fixed seed: 41 +[2025-09-02 04:43:59] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_14_param_qkvo_seed_41 +[2025-09-02 04:43:59] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_14_param_qkvo_seed_41 +[2025-09-02 04:43:59] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 04:43:59] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 04:43:59] [Rank 0] PRINT: Constructing model... +[2025-09-02 04:43:59] [Rank 0] PRINT: Constructing model... +[2025-09-02 04:44:01] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 04:44:01] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 04:44:01] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 04:44:01] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 04:44:01] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 04:44:01] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 04:44:01] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 14 +[2025-09-02 04:44:01] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 14 +[2025-09-02 04:44:01] [Rank 0] PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 04:44:01] [Rank 0] PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 04:44:01] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 04:44:01] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 04:44:01] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 04:44:01] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 04:44:01] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 04:44:01] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 04:44:01] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 04:44:01] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 04:44:01] [Rank 0] PRINT: Starting warmup... +[2025-09-02 04:44:01] [Rank 0] PRINT: Starting warmup... +[2025-09-02 04:48:01] [Rank 0] PRINT: Warmup complete. +[2025-09-02 04:48:01] [Rank 0] PRINT: Warmup complete. +[2025-09-02 04:48:01] [Rank 0] PRINT: Starting training... +[2025-09-02 04:48:01] [Rank 0] PRINT: Starting training... +[2025-09-02 04:48:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:48:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:49:27] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9248,top10E=0.05,eRank=465.9,q75/q25=10.27 attn_vo:H=0.4624,top10E=0.02,eRank=232.9,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 04:49:27] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9248,top10E=0.05,eRank=465.9,q75/q25=10.27 attn_vo:H=0.4624,top10E=0.02,eRank=232.9,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 04:49:29] [Rank 0] step:21/10000 train_time:1310ms step_avg:62.40ms +[2025-09-02 04:49:29] [Rank 0] step:21/10000 train_time:1310ms step_avg:62.40ms +[2025-09-02 04:49:30] [Rank 0] step:41/10000 train_time:2714ms step_avg:66.20ms +[2025-09-02 04:49:30] [Rank 0] step:41/10000 train_time:2714ms step_avg:66.20ms +[2025-09-02 04:49:31] [Rank 0] step:61/10000 train_time:4124ms step_avg:67.61ms +[2025-09-02 04:49:31] [Rank 0] step:61/10000 train_time:4124ms step_avg:67.61ms +[2025-09-02 04:49:33] [Rank 0] step:81/10000 train_time:5537ms step_avg:68.36ms +[2025-09-02 04:49:33] [Rank 0] step:81/10000 train_time:5537ms step_avg:68.36ms +[2025-09-02 04:49:34] [Rank 0] step:101/10000 train_time:6952ms step_avg:68.83ms +[2025-09-02 04:49:34] [Rank 0] step:101/10000 train_time:6952ms step_avg:68.83ms +[2025-09-02 04:49:36] [Rank 0] step:121/10000 train_time:8367ms step_avg:69.15ms +[2025-09-02 04:49:36] [Rank 0] step:121/10000 train_time:8367ms step_avg:69.15ms +[2025-09-02 04:49:37] [Rank 0] step:141/10000 train_time:9782ms step_avg:69.38ms +[2025-09-02 04:49:37] [Rank 0] step:141/10000 train_time:9782ms step_avg:69.38ms +[2025-09-02 04:49:39] [Rank 0] step:161/10000 train_time:11198ms step_avg:69.55ms +[2025-09-02 04:49:39] [Rank 0] step:161/10000 train_time:11198ms step_avg:69.55ms +[2025-09-02 04:49:40] [Rank 0] step:181/10000 train_time:12613ms step_avg:69.69ms +[2025-09-02 04:49:40] [Rank 0] step:181/10000 train_time:12613ms step_avg:69.69ms +[2025-09-02 04:49:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:49:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:49:53] [Rank 0] PRINT: step:200/10000 val_loss:6.6141 svd_entropy: attn_qk:H=0.5004,top10E=0.72,eRank=74.9,q75/q25=12.16 attn_vo:H=0.4419,top10E=0.66,eRank=61.9,q75/q25=inf mlp_w1:H=0.4300,top10E=0.77,eRank=21.2,q75/q25=2.65 mlp_w2:H=0.1776,top10E=0.97,eRank=4.2,q75/q25=94.53 vo_prod:H=0.1920,top10E=0.87,eRank=6.7,q75/q25=inf train_time:14173ms step_avg:70.86ms +[2025-09-02 04:49:53] [Rank 0] PRINT: step:200/10000 val_loss:6.6141 svd_entropy: attn_qk:H=0.5004,top10E=0.72,eRank=74.9,q75/q25=12.16 attn_vo:H=0.4419,top10E=0.66,eRank=61.9,q75/q25=inf mlp_w1:H=0.4300,top10E=0.77,eRank=21.2,q75/q25=2.65 mlp_w2:H=0.1776,top10E=0.97,eRank=4.2,q75/q25=94.53 vo_prod:H=0.1920,top10E=0.87,eRank=6.7,q75/q25=inf train_time:14173ms step_avg:70.86ms +[2025-09-02 04:49:53] [Rank 0] step:201/10000 train_time:14184ms step_avg:70.57ms +[2025-09-02 04:49:53] [Rank 0] step:201/10000 train_time:14184ms step_avg:70.57ms +[2025-09-02 04:49:55] [Rank 0] step:221/10000 train_time:15483ms step_avg:70.06ms +[2025-09-02 04:49:55] [Rank 0] step:221/10000 train_time:15483ms step_avg:70.06ms +[2025-09-02 04:49:56] [Rank 0] step:241/10000 train_time:16899ms step_avg:70.12ms +[2025-09-02 04:49:56] [Rank 0] step:241/10000 train_time:16899ms step_avg:70.12ms +[2025-09-02 04:49:58] [Rank 0] step:261/10000 train_time:18315ms step_avg:70.17ms +[2025-09-02 04:49:58] [Rank 0] step:261/10000 train_time:18315ms step_avg:70.17ms +[2025-09-02 04:49:59] [Rank 0] step:281/10000 train_time:19731ms step_avg:70.22ms +[2025-09-02 04:49:59] [Rank 0] step:281/10000 train_time:19731ms step_avg:70.22ms +[2025-09-02 04:50:00] [Rank 0] step:301/10000 train_time:21149ms step_avg:70.26ms +[2025-09-02 04:50:00] [Rank 0] step:301/10000 train_time:21149ms step_avg:70.26ms +[2025-09-02 04:50:02] [Rank 0] step:321/10000 train_time:22568ms step_avg:70.30ms +[2025-09-02 04:50:02] [Rank 0] step:321/10000 train_time:22568ms step_avg:70.30ms +[2025-09-02 04:50:03] [Rank 0] step:341/10000 train_time:23986ms step_avg:70.34ms +[2025-09-02 04:50:03] [Rank 0] step:341/10000 train_time:23986ms step_avg:70.34ms +[2025-09-02 04:50:05] [Rank 0] step:361/10000 train_time:25405ms step_avg:70.37ms +[2025-09-02 04:50:05] [Rank 0] step:361/10000 train_time:25405ms step_avg:70.37ms +[2025-09-02 04:50:06] [Rank 0] step:381/10000 train_time:26824ms step_avg:70.40ms +[2025-09-02 04:50:06] [Rank 0] step:381/10000 train_time:26824ms step_avg:70.40ms +[2025-09-02 04:50:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:50:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:50:19] [Rank 0] PRINT: step:400/10000 val_loss:6.0493 svd_entropy: attn_qk:H=0.5481,top10E=0.63,eRank=83.0,q75/q25=13.56 attn_vo:H=0.5155,top10E=0.54,eRank=76.7,q75/q25=inf mlp_w1:H=0.4603,top10E=0.70,eRank=33.7,q75/q25=3.29 mlp_w2:H=0.5496,top10E=0.60,eRank=39.1,q75/q25=13.21 vo_prod:H=0.3380,top10E=0.79,eRank=14.6,q75/q25=inf train_time:28384ms step_avg:70.96ms +[2025-09-02 04:50:19] [Rank 0] PRINT: step:400/10000 val_loss:6.0493 svd_entropy: attn_qk:H=0.5481,top10E=0.63,eRank=83.0,q75/q25=13.56 attn_vo:H=0.5155,top10E=0.54,eRank=76.7,q75/q25=inf mlp_w1:H=0.4603,top10E=0.70,eRank=33.7,q75/q25=3.29 mlp_w2:H=0.5496,top10E=0.60,eRank=39.1,q75/q25=13.21 vo_prod:H=0.3380,top10E=0.79,eRank=14.6,q75/q25=inf train_time:28384ms step_avg:70.96ms +[2025-09-02 04:50:19] [Rank 0] step:401/10000 train_time:28395ms step_avg:70.81ms +[2025-09-02 04:50:19] [Rank 0] step:401/10000 train_time:28395ms step_avg:70.81ms +[2025-09-02 04:50:21] [Rank 0] step:421/10000 train_time:29685ms step_avg:70.51ms +[2025-09-02 04:50:21] [Rank 0] step:421/10000 train_time:29685ms step_avg:70.51ms +[2025-09-02 04:50:22] [Rank 0] step:441/10000 train_time:31103ms step_avg:70.53ms +[2025-09-02 04:50:22] [Rank 0] step:441/10000 train_time:31103ms step_avg:70.53ms +[2025-09-02 04:50:24] [Rank 0] step:461/10000 train_time:32523ms step_avg:70.55ms +[2025-09-02 04:50:24] [Rank 0] step:461/10000 train_time:32523ms step_avg:70.55ms +[2025-09-02 04:50:25] [Rank 0] step:481/10000 train_time:33943ms step_avg:70.57ms +[2025-09-02 04:50:25] [Rank 0] step:481/10000 train_time:33943ms step_avg:70.57ms +[2025-09-02 04:50:27] [Rank 0] step:501/10000 train_time:35361ms step_avg:70.58ms +[2025-09-02 04:50:27] [Rank 0] step:501/10000 train_time:35361ms step_avg:70.58ms +[2025-09-02 04:50:28] [Rank 0] step:521/10000 train_time:36780ms step_avg:70.59ms +[2025-09-02 04:50:28] [Rank 0] step:521/10000 train_time:36780ms step_avg:70.59ms +[2025-09-02 04:50:29] [Rank 0] step:541/10000 train_time:38200ms step_avg:70.61ms +[2025-09-02 04:50:29] [Rank 0] step:541/10000 train_time:38200ms step_avg:70.61ms +[2025-09-02 04:50:31] [Rank 0] step:561/10000 train_time:39621ms step_avg:70.63ms +[2025-09-02 04:50:31] [Rank 0] step:561/10000 train_time:39621ms step_avg:70.63ms +[2025-09-02 04:50:32] [Rank 0] step:581/10000 train_time:41041ms step_avg:70.64ms +[2025-09-02 04:50:32] [Rank 0] step:581/10000 train_time:41041ms step_avg:70.64ms +[2025-09-02 04:50:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:50:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:50:45] [Rank 0] PRINT: step:600/10000 val_loss:5.7270 svd_entropy: attn_qk:H=0.5818,top10E=0.56,eRank=90.4,q75/q25=15.19 attn_vo:H=0.5596,top10E=0.47,eRank=90.5,q75/q25=inf mlp_w1:H=0.5105,top10E=0.62,eRank=47.1,q75/q25=3.86 mlp_w2:H=0.6395,top10E=0.44,eRank=70.6,q75/q25=9.36 vo_prod:H=0.4024,top10E=0.70,eRank=21.2,q75/q25=inf train_time:42605ms step_avg:71.01ms +[2025-09-02 04:50:45] [Rank 0] PRINT: step:600/10000 val_loss:5.7270 svd_entropy: attn_qk:H=0.5818,top10E=0.56,eRank=90.4,q75/q25=15.19 attn_vo:H=0.5596,top10E=0.47,eRank=90.5,q75/q25=inf mlp_w1:H=0.5105,top10E=0.62,eRank=47.1,q75/q25=3.86 mlp_w2:H=0.6395,top10E=0.44,eRank=70.6,q75/q25=9.36 vo_prod:H=0.4024,top10E=0.70,eRank=21.2,q75/q25=inf train_time:42605ms step_avg:71.01ms +[2025-09-02 04:50:45] [Rank 0] step:601/10000 train_time:42616ms step_avg:70.91ms +[2025-09-02 04:50:45] [Rank 0] step:601/10000 train_time:42616ms step_avg:70.91ms +[2025-09-02 04:50:47] [Rank 0] step:621/10000 train_time:43905ms step_avg:70.70ms +[2025-09-02 04:50:47] [Rank 0] step:621/10000 train_time:43905ms step_avg:70.70ms +[2025-09-02 04:50:48] [Rank 0] step:641/10000 train_time:45323ms step_avg:70.71ms +[2025-09-02 04:50:48] [Rank 0] step:641/10000 train_time:45323ms step_avg:70.71ms +[2025-09-02 04:50:50] [Rank 0] step:661/10000 train_time:46741ms step_avg:70.71ms +[2025-09-02 04:50:50] [Rank 0] step:661/10000 train_time:46741ms step_avg:70.71ms +[2025-09-02 04:50:51] [Rank 0] step:681/10000 train_time:48161ms step_avg:70.72ms +[2025-09-02 04:50:51] [Rank 0] step:681/10000 train_time:48161ms step_avg:70.72ms +[2025-09-02 04:50:53] [Rank 0] step:701/10000 train_time:49580ms step_avg:70.73ms +[2025-09-02 04:50:53] [Rank 0] step:701/10000 train_time:49580ms step_avg:70.73ms +[2025-09-02 04:50:54] [Rank 0] step:721/10000 train_time:50999ms step_avg:70.73ms +[2025-09-02 04:50:54] [Rank 0] step:721/10000 train_time:50999ms step_avg:70.73ms +[2025-09-02 04:50:55] [Rank 0] step:741/10000 train_time:52420ms step_avg:70.74ms +[2025-09-02 04:50:55] [Rank 0] step:741/10000 train_time:52420ms step_avg:70.74ms +[2025-09-02 04:50:57] [Rank 0] step:761/10000 train_time:53850ms step_avg:70.76ms +[2025-09-02 04:50:57] [Rank 0] step:761/10000 train_time:53850ms step_avg:70.76ms +[2025-09-02 04:50:58] [Rank 0] step:781/10000 train_time:55363ms step_avg:70.89ms +[2025-09-02 04:50:58] [Rank 0] step:781/10000 train_time:55363ms step_avg:70.89ms +[2025-09-02 04:51:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:51:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:51:12] [Rank 0] PRINT: step:800/10000 val_loss:5.4880 svd_entropy: attn_qk:H=0.5736,top10E=0.56,eRank=57.9,q75/q25=17.28 attn_vo:H=0.6164,top10E=0.44,eRank=96.8,q75/q25=32.74 mlp_w1:H=0.5512,top10E=0.57,eRank=58.6,q75/q25=4.32 mlp_w2:H=0.6845,top10E=0.36,eRank=94.9,q75/q25=9.13 vo_prod:H=0.4972,top10E=0.67,eRank=31.9,q75/q25=323.49 train_time:56940ms step_avg:71.17ms +[2025-09-02 04:51:12] [Rank 0] PRINT: step:800/10000 val_loss:5.4880 svd_entropy: attn_qk:H=0.5736,top10E=0.56,eRank=57.9,q75/q25=17.28 attn_vo:H=0.6164,top10E=0.44,eRank=96.8,q75/q25=32.74 mlp_w1:H=0.5512,top10E=0.57,eRank=58.6,q75/q25=4.32 mlp_w2:H=0.6845,top10E=0.36,eRank=94.9,q75/q25=9.13 vo_prod:H=0.4972,top10E=0.67,eRank=31.9,q75/q25=323.49 train_time:56940ms step_avg:71.17ms +[2025-09-02 04:51:12] [Rank 0] step:801/10000 train_time:56951ms step_avg:71.10ms +[2025-09-02 04:51:12] [Rank 0] step:801/10000 train_time:56951ms step_avg:71.10ms +[2025-09-02 04:51:13] [Rank 0] step:821/10000 train_time:58255ms step_avg:70.96ms +[2025-09-02 04:51:13] [Rank 0] step:821/10000 train_time:58255ms step_avg:70.96ms +[2025-09-02 04:51:15] [Rank 0] step:841/10000 train_time:59685ms step_avg:70.97ms +[2025-09-02 04:51:15] [Rank 0] step:841/10000 train_time:59685ms step_avg:70.97ms +[2025-09-02 04:51:16] [Rank 0] step:861/10000 train_time:61117ms step_avg:70.98ms +[2025-09-02 04:51:16] [Rank 0] step:861/10000 train_time:61117ms step_avg:70.98ms +[2025-09-02 04:51:17] [Rank 0] step:881/10000 train_time:62549ms step_avg:71.00ms +[2025-09-02 04:51:17] [Rank 0] step:881/10000 train_time:62549ms step_avg:71.00ms +[2025-09-02 04:51:19] [Rank 0] step:901/10000 train_time:63980ms step_avg:71.01ms +[2025-09-02 04:51:19] [Rank 0] step:901/10000 train_time:63980ms step_avg:71.01ms +[2025-09-02 04:51:20] [Rank 0] step:921/10000 train_time:65413ms step_avg:71.02ms +[2025-09-02 04:51:20] [Rank 0] step:921/10000 train_time:65413ms step_avg:71.02ms +[2025-09-02 04:51:22] [Rank 0] step:941/10000 train_time:66847ms step_avg:71.04ms +[2025-09-02 04:51:22] [Rank 0] step:941/10000 train_time:66847ms step_avg:71.04ms +[2025-09-02 04:51:23] [Rank 0] step:961/10000 train_time:68281ms step_avg:71.05ms +[2025-09-02 04:51:23] [Rank 0] step:961/10000 train_time:68281ms step_avg:71.05ms +[2025-09-02 04:51:25] [Rank 0] step:981/10000 train_time:69715ms step_avg:71.06ms +[2025-09-02 04:51:25] [Rank 0] step:981/10000 train_time:69715ms step_avg:71.06ms +[2025-09-02 04:51:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:51:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:51:38] [Rank 0] PRINT: step:1000/10000 val_loss:5.3188 svd_entropy: attn_qk:H=0.5940,top10E=0.52,eRank=63.7,q75/q25=19.92 attn_vo:H=0.6368,top10E=0.41,eRank=107.0,q75/q25=37.27 mlp_w1:H=0.5821,top10E=0.53,eRank=68.4,q75/q25=4.81 mlp_w2:H=0.7140,top10E=0.31,eRank=115.2,q75/q25=10.03 vo_prod:H=0.5212,top10E=0.62,eRank=36.1,q75/q25=480.41 train_time:71291ms step_avg:71.29ms +[2025-09-02 04:51:38] [Rank 0] PRINT: step:1000/10000 val_loss:5.3188 svd_entropy: attn_qk:H=0.5940,top10E=0.52,eRank=63.7,q75/q25=19.92 attn_vo:H=0.6368,top10E=0.41,eRank=107.0,q75/q25=37.27 mlp_w1:H=0.5821,top10E=0.53,eRank=68.4,q75/q25=4.81 mlp_w2:H=0.7140,top10E=0.31,eRank=115.2,q75/q25=10.03 vo_prod:H=0.5212,top10E=0.62,eRank=36.1,q75/q25=480.41 train_time:71291ms step_avg:71.29ms +[2025-09-02 04:51:38] [Rank 0] step:1001/10000 train_time:71302ms step_avg:71.23ms +[2025-09-02 04:51:38] [Rank 0] step:1001/10000 train_time:71302ms step_avg:71.23ms +[2025-09-02 04:51:39] [Rank 0] step:1021/10000 train_time:72599ms step_avg:71.11ms +[2025-09-02 04:51:39] [Rank 0] step:1021/10000 train_time:72599ms step_avg:71.11ms +[2025-09-02 04:51:41] [Rank 0] step:1041/10000 train_time:74031ms step_avg:71.11ms +[2025-09-02 04:51:41] [Rank 0] step:1041/10000 train_time:74031ms step_avg:71.11ms +[2025-09-02 04:51:42] [Rank 0] step:1061/10000 train_time:75465ms step_avg:71.13ms +[2025-09-02 04:51:42] [Rank 0] step:1061/10000 train_time:75465ms step_avg:71.13ms +[2025-09-02 04:51:44] [Rank 0] step:1081/10000 train_time:76898ms step_avg:71.14ms +[2025-09-02 04:51:44] [Rank 0] step:1081/10000 train_time:76898ms step_avg:71.14ms +[2025-09-02 04:51:45] [Rank 0] step:1101/10000 train_time:78331ms step_avg:71.15ms +[2025-09-02 04:51:45] [Rank 0] step:1101/10000 train_time:78331ms step_avg:71.15ms +[2025-09-02 04:51:47] [Rank 0] step:1121/10000 train_time:79765ms step_avg:71.16ms +[2025-09-02 04:51:47] [Rank 0] step:1121/10000 train_time:79765ms step_avg:71.16ms +[2025-09-02 04:51:48] [Rank 0] step:1141/10000 train_time:81199ms step_avg:71.17ms +[2025-09-02 04:51:48] [Rank 0] step:1141/10000 train_time:81199ms step_avg:71.17ms +[2025-09-02 04:51:49] [Rank 0] step:1161/10000 train_time:82634ms step_avg:71.17ms +[2025-09-02 04:51:49] [Rank 0] step:1161/10000 train_time:82634ms step_avg:71.17ms +[2025-09-02 04:51:51] [Rank 0] step:1181/10000 train_time:84070ms step_avg:71.19ms +[2025-09-02 04:51:51] [Rank 0] step:1181/10000 train_time:84070ms step_avg:71.19ms +[2025-09-02 04:51:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:51:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:52:04] [Rank 0] PRINT: step:1200/10000 val_loss:5.1516 svd_entropy: attn_qk:H=0.6097,top10E=0.49,eRank=69.1,q75/q25=23.28 attn_vo:H=0.6561,top10E=0.39,eRank=118.8,q75/q25=41.46 mlp_w1:H=0.6076,top10E=0.50,eRank=77.5,q75/q25=5.34 mlp_w2:H=0.7384,top10E=0.27,eRank=135.5,q75/q25=11.42 vo_prod:H=0.5429,top10E=0.57,eRank=41.0,q75/q25=671.70 train_time:85647ms step_avg:71.37ms +[2025-09-02 04:52:04] [Rank 0] PRINT: step:1200/10000 val_loss:5.1516 svd_entropy: attn_qk:H=0.6097,top10E=0.49,eRank=69.1,q75/q25=23.28 attn_vo:H=0.6561,top10E=0.39,eRank=118.8,q75/q25=41.46 mlp_w1:H=0.6076,top10E=0.50,eRank=77.5,q75/q25=5.34 mlp_w2:H=0.7384,top10E=0.27,eRank=135.5,q75/q25=11.42 vo_prod:H=0.5429,top10E=0.57,eRank=41.0,q75/q25=671.70 train_time:85647ms step_avg:71.37ms +[2025-09-02 04:52:04] [Rank 0] step:1201/10000 train_time:85658ms step_avg:71.32ms +[2025-09-02 04:52:04] [Rank 0] step:1201/10000 train_time:85658ms step_avg:71.32ms +[2025-09-02 04:52:06] [Rank 0] step:1221/10000 train_time:86963ms step_avg:71.22ms +[2025-09-02 04:52:06] [Rank 0] step:1221/10000 train_time:86963ms step_avg:71.22ms +[2025-09-02 04:52:07] [Rank 0] step:1241/10000 train_time:88394ms step_avg:71.23ms +[2025-09-02 04:52:07] [Rank 0] step:1241/10000 train_time:88394ms step_avg:71.23ms +[2025-09-02 04:52:09] [Rank 0] step:1261/10000 train_time:89827ms step_avg:71.23ms +[2025-09-02 04:52:09] [Rank 0] step:1261/10000 train_time:89827ms step_avg:71.23ms +[2025-09-02 04:52:10] [Rank 0] step:1281/10000 train_time:91261ms step_avg:71.24ms +[2025-09-02 04:52:10] [Rank 0] step:1281/10000 train_time:91261ms step_avg:71.24ms +[2025-09-02 04:52:11] [Rank 0] step:1301/10000 train_time:92695ms step_avg:71.25ms +[2025-09-02 04:52:11] [Rank 0] step:1301/10000 train_time:92695ms step_avg:71.25ms +[2025-09-02 04:52:13] [Rank 0] step:1321/10000 train_time:94128ms step_avg:71.26ms +[2025-09-02 04:52:13] [Rank 0] step:1321/10000 train_time:94128ms step_avg:71.26ms +[2025-09-02 04:52:14] [Rank 0] step:1341/10000 train_time:95562ms step_avg:71.26ms +[2025-09-02 04:52:14] [Rank 0] step:1341/10000 train_time:95562ms step_avg:71.26ms +[2025-09-02 04:52:16] [Rank 0] step:1361/10000 train_time:96997ms step_avg:71.27ms +[2025-09-02 04:52:16] [Rank 0] step:1361/10000 train_time:96997ms step_avg:71.27ms +[2025-09-02 04:52:17] [Rank 0] step:1381/10000 train_time:98433ms step_avg:71.28ms +[2025-09-02 04:52:17] [Rank 0] step:1381/10000 train_time:98433ms step_avg:71.28ms +[2025-09-02 04:52:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:52:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:52:30] [Rank 0] PRINT: step:1400/10000 val_loss:5.0152 svd_entropy: attn_qk:H=0.6234,top10E=0.46,eRank=74.4,q75/q25=27.65 attn_vo:H=0.6730,top10E=0.36,eRank=130.4,q75/q25=47.23 mlp_w1:H=0.6287,top10E=0.47,eRank=86.0,q75/q25=5.94 mlp_w2:H=0.7579,top10E=0.24,eRank=154.3,q75/q25=13.03 vo_prod:H=0.5610,top10E=0.54,eRank=45.7,q75/q25=943.97 train_time:100012ms step_avg:71.44ms +[2025-09-02 04:52:30] [Rank 0] PRINT: step:1400/10000 val_loss:5.0152 svd_entropy: attn_qk:H=0.6234,top10E=0.46,eRank=74.4,q75/q25=27.65 attn_vo:H=0.6730,top10E=0.36,eRank=130.4,q75/q25=47.23 mlp_w1:H=0.6287,top10E=0.47,eRank=86.0,q75/q25=5.94 mlp_w2:H=0.7579,top10E=0.24,eRank=154.3,q75/q25=13.03 vo_prod:H=0.5610,top10E=0.54,eRank=45.7,q75/q25=943.97 train_time:100012ms step_avg:71.44ms +[2025-09-02 04:52:30] [Rank 0] step:1401/10000 train_time:100022ms step_avg:71.39ms +[2025-09-02 04:52:30] [Rank 0] step:1401/10000 train_time:100022ms step_avg:71.39ms +[2025-09-02 04:52:32] [Rank 0] step:1421/10000 train_time:101324ms step_avg:71.30ms +[2025-09-02 04:52:32] [Rank 0] step:1421/10000 train_time:101324ms step_avg:71.30ms +[2025-09-02 04:52:33] [Rank 0] step:1441/10000 train_time:102755ms step_avg:71.31ms +[2025-09-02 04:52:33] [Rank 0] step:1441/10000 train_time:102755ms step_avg:71.31ms +[2025-09-02 04:52:35] [Rank 0] step:1461/10000 train_time:104188ms step_avg:71.31ms +[2025-09-02 04:52:35] [Rank 0] step:1461/10000 train_time:104188ms step_avg:71.31ms +[2025-09-02 04:52:36] [Rank 0] step:1481/10000 train_time:105621ms step_avg:71.32ms +[2025-09-02 04:52:36] [Rank 0] step:1481/10000 train_time:105621ms step_avg:71.32ms +[2025-09-02 04:52:38] [Rank 0] step:1501/10000 train_time:107063ms step_avg:71.33ms +[2025-09-02 04:52:38] [Rank 0] step:1501/10000 train_time:107063ms step_avg:71.33ms +[2025-09-02 04:52:39] [Rank 0] step:1521/10000 train_time:108506ms step_avg:71.34ms +[2025-09-02 04:52:39] [Rank 0] step:1521/10000 train_time:108506ms step_avg:71.34ms +[2025-09-02 04:52:41] [Rank 0] step:1541/10000 train_time:109952ms step_avg:71.35ms +[2025-09-02 04:52:41] [Rank 0] step:1541/10000 train_time:109952ms step_avg:71.35ms +[2025-09-02 04:52:42] [Rank 0] step:1561/10000 train_time:111397ms step_avg:71.36ms +[2025-09-02 04:52:42] [Rank 0] step:1561/10000 train_time:111397ms step_avg:71.36ms +[2025-09-02 04:52:43] [Rank 0] step:1581/10000 train_time:112844ms step_avg:71.37ms +[2025-09-02 04:52:43] [Rank 0] step:1581/10000 train_time:112844ms step_avg:71.37ms +[2025-09-02 04:52:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:52:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:52:57] [Rank 0] PRINT: step:1600/10000 val_loss:4.8701 svd_entropy: attn_qk:H=0.6349,top10E=0.44,eRank=78.9,q75/q25=33.17 attn_vo:H=0.6883,top10E=0.34,eRank=141.6,q75/q25=53.86 mlp_w1:H=0.6472,top10E=0.45,eRank=94.3,q75/q25=6.56 mlp_w2:H=0.7735,top10E=0.22,eRank=171.2,q75/q25=14.67 vo_prod:H=0.5779,top10E=0.50,eRank=50.7,q75/q25=1368.77 train_time:114434ms step_avg:71.52ms +[2025-09-02 04:52:57] [Rank 0] PRINT: step:1600/10000 val_loss:4.8701 svd_entropy: attn_qk:H=0.6349,top10E=0.44,eRank=78.9,q75/q25=33.17 attn_vo:H=0.6883,top10E=0.34,eRank=141.6,q75/q25=53.86 mlp_w1:H=0.6472,top10E=0.45,eRank=94.3,q75/q25=6.56 mlp_w2:H=0.7735,top10E=0.22,eRank=171.2,q75/q25=14.67 vo_prod:H=0.5779,top10E=0.50,eRank=50.7,q75/q25=1368.77 train_time:114434ms step_avg:71.52ms +[2025-09-02 04:52:57] [Rank 0] step:1601/10000 train_time:114445ms step_avg:71.48ms +[2025-09-02 04:52:57] [Rank 0] step:1601/10000 train_time:114445ms step_avg:71.48ms +[2025-09-02 04:52:58] [Rank 0] step:1621/10000 train_time:115769ms step_avg:71.42ms +[2025-09-02 04:52:58] [Rank 0] step:1621/10000 train_time:115769ms step_avg:71.42ms +[2025-09-02 04:53:00] [Rank 0] step:1641/10000 train_time:117210ms step_avg:71.43ms +[2025-09-02 04:53:00] [Rank 0] step:1641/10000 train_time:117210ms step_avg:71.43ms +[2025-09-02 04:53:01] [Rank 0] step:1661/10000 train_time:118656ms step_avg:71.44ms +[2025-09-02 04:53:01] [Rank 0] step:1661/10000 train_time:118656ms step_avg:71.44ms +[2025-09-02 04:53:03] [Rank 0] step:1681/10000 train_time:120098ms step_avg:71.44ms +[2025-09-02 04:53:03] [Rank 0] step:1681/10000 train_time:120098ms step_avg:71.44ms +[2025-09-02 04:53:04] [Rank 0] step:1701/10000 train_time:121541ms step_avg:71.45ms +[2025-09-02 04:53:04] [Rank 0] step:1701/10000 train_time:121541ms step_avg:71.45ms +[2025-09-02 04:53:05] [Rank 0] step:1721/10000 train_time:122984ms step_avg:71.46ms +[2025-09-02 04:53:05] [Rank 0] step:1721/10000 train_time:122984ms step_avg:71.46ms +[2025-09-02 04:53:07] [Rank 0] step:1741/10000 train_time:124428ms step_avg:71.47ms +[2025-09-02 04:53:07] [Rank 0] step:1741/10000 train_time:124428ms step_avg:71.47ms +[2025-09-02 04:53:08] [Rank 0] step:1761/10000 train_time:125874ms step_avg:71.48ms +[2025-09-02 04:53:08] [Rank 0] step:1761/10000 train_time:125874ms step_avg:71.48ms +[2025-09-02 04:53:10] [Rank 0] step:1781/10000 train_time:127318ms step_avg:71.49ms +[2025-09-02 04:53:10] [Rank 0] step:1781/10000 train_time:127318ms step_avg:71.49ms +[2025-09-02 04:53:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:53:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:53:23] [Rank 0] PRINT: step:1800/10000 val_loss:4.7634 svd_entropy: attn_qk:H=0.6448,top10E=0.43,eRank=83.2,q75/q25=38.62 attn_vo:H=0.7014,top10E=0.33,eRank=151.3,q75/q25=61.61 mlp_w1:H=0.6635,top10E=0.42,eRank=102.2,q75/q25=7.23 mlp_w2:H=0.7854,top10E=0.21,eRank=185.5,q75/q25=15.95 vo_prod:H=0.5918,top10E=0.47,eRank=55.3,q75/q25=2068.99 train_time:128908ms step_avg:71.62ms +[2025-09-02 04:53:23] [Rank 0] PRINT: step:1800/10000 val_loss:4.7634 svd_entropy: attn_qk:H=0.6448,top10E=0.43,eRank=83.2,q75/q25=38.62 attn_vo:H=0.7014,top10E=0.33,eRank=151.3,q75/q25=61.61 mlp_w1:H=0.6635,top10E=0.42,eRank=102.2,q75/q25=7.23 mlp_w2:H=0.7854,top10E=0.21,eRank=185.5,q75/q25=15.95 vo_prod:H=0.5918,top10E=0.47,eRank=55.3,q75/q25=2068.99 train_time:128908ms step_avg:71.62ms +[2025-09-02 04:53:23] [Rank 0] step:1801/10000 train_time:128919ms step_avg:71.58ms +[2025-09-02 04:53:23] [Rank 0] step:1801/10000 train_time:128919ms step_avg:71.58ms +[2025-09-02 04:53:25] [Rank 0] step:1821/10000 train_time:130233ms step_avg:71.52ms +[2025-09-02 04:53:25] [Rank 0] step:1821/10000 train_time:130233ms step_avg:71.52ms +[2025-09-02 04:53:26] [Rank 0] step:1841/10000 train_time:131674ms step_avg:71.52ms +[2025-09-02 04:53:26] [Rank 0] step:1841/10000 train_time:131674ms step_avg:71.52ms +[2025-09-02 04:53:27] [Rank 0] step:1861/10000 train_time:133117ms step_avg:71.53ms +[2025-09-02 04:53:27] [Rank 0] step:1861/10000 train_time:133117ms step_avg:71.53ms +[2025-09-02 04:53:29] [Rank 0] step:1881/10000 train_time:134560ms step_avg:71.54ms +[2025-09-02 04:53:29] [Rank 0] step:1881/10000 train_time:134560ms step_avg:71.54ms +[2025-09-02 04:53:30] [Rank 0] step:1901/10000 train_time:136003ms step_avg:71.54ms +[2025-09-02 04:53:30] [Rank 0] step:1901/10000 train_time:136003ms step_avg:71.54ms +[2025-09-02 04:53:32] [Rank 0] step:1921/10000 train_time:137447ms step_avg:71.55ms +[2025-09-02 04:53:32] [Rank 0] step:1921/10000 train_time:137447ms step_avg:71.55ms +[2025-09-02 04:53:33] [Rank 0] step:1941/10000 train_time:138892ms step_avg:71.56ms +[2025-09-02 04:53:33] [Rank 0] step:1941/10000 train_time:138892ms step_avg:71.56ms +[2025-09-02 04:53:35] [Rank 0] step:1961/10000 train_time:140336ms step_avg:71.56ms +[2025-09-02 04:53:35] [Rank 0] step:1961/10000 train_time:140336ms step_avg:71.56ms +[2025-09-02 04:53:36] [Rank 0] step:1981/10000 train_time:141781ms step_avg:71.57ms +[2025-09-02 04:53:36] [Rank 0] step:1981/10000 train_time:141781ms step_avg:71.57ms +[2025-09-02 04:53:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:53:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:53:49] [Rank 0] PRINT: step:2000/10000 val_loss:4.6854 svd_entropy: attn_qk:H=0.6535,top10E=0.41,eRank=87.1,q75/q25=44.16 attn_vo:H=0.7123,top10E=0.31,eRank=159.5,q75/q25=69.42 mlp_w1:H=0.6773,top10E=0.41,eRank=109.6,q75/q25=7.91 mlp_w2:H=0.7951,top10E=0.20,eRank=198.1,q75/q25=17.17 vo_prod:H=0.6036,top10E=0.45,eRank=59.6,q75/q25=3072.40 train_time:143370ms step_avg:71.69ms +[2025-09-02 04:53:49] [Rank 0] PRINT: step:2000/10000 val_loss:4.6854 svd_entropy: attn_qk:H=0.6535,top10E=0.41,eRank=87.1,q75/q25=44.16 attn_vo:H=0.7123,top10E=0.31,eRank=159.5,q75/q25=69.42 mlp_w1:H=0.6773,top10E=0.41,eRank=109.6,q75/q25=7.91 mlp_w2:H=0.7951,top10E=0.20,eRank=198.1,q75/q25=17.17 vo_prod:H=0.6036,top10E=0.45,eRank=59.6,q75/q25=3072.40 train_time:143370ms step_avg:71.69ms +[2025-09-02 04:53:49] [Rank 0] step:2001/10000 train_time:143381ms step_avg:71.65ms +[2025-09-02 04:53:49] [Rank 0] step:2001/10000 train_time:143381ms step_avg:71.65ms +[2025-09-02 04:53:51] [Rank 0] step:2021/10000 train_time:144689ms step_avg:71.59ms +[2025-09-02 04:53:51] [Rank 0] step:2021/10000 train_time:144689ms step_avg:71.59ms +[2025-09-02 04:53:52] [Rank 0] step:2041/10000 train_time:146245ms step_avg:71.65ms +[2025-09-02 04:53:52] [Rank 0] step:2041/10000 train_time:146245ms step_avg:71.65ms +[2025-09-02 04:53:54] [Rank 0] step:2061/10000 train_time:147689ms step_avg:71.66ms +[2025-09-02 04:53:54] [Rank 0] step:2061/10000 train_time:147689ms step_avg:71.66ms +[2025-09-02 04:53:55] [Rank 0] step:2081/10000 train_time:149133ms step_avg:71.66ms +[2025-09-02 04:53:55] [Rank 0] step:2081/10000 train_time:149133ms step_avg:71.66ms +[2025-09-02 04:53:57] [Rank 0] step:2101/10000 train_time:150578ms step_avg:71.67ms +[2025-09-02 04:53:57] [Rank 0] step:2101/10000 train_time:150578ms step_avg:71.67ms +[2025-09-02 04:53:58] [Rank 0] step:2121/10000 train_time:152022ms step_avg:71.67ms +[2025-09-02 04:53:58] [Rank 0] step:2121/10000 train_time:152022ms step_avg:71.67ms +[2025-09-02 04:54:00] [Rank 0] step:2141/10000 train_time:153467ms step_avg:71.68ms +[2025-09-02 04:54:00] [Rank 0] step:2141/10000 train_time:153467ms step_avg:71.68ms +[2025-09-02 04:54:01] [Rank 0] step:2161/10000 train_time:154911ms step_avg:71.69ms +[2025-09-02 04:54:01] [Rank 0] step:2161/10000 train_time:154911ms step_avg:71.69ms +[2025-09-02 04:54:03] [Rank 0] step:2181/10000 train_time:156357ms step_avg:71.69ms +[2025-09-02 04:54:03] [Rank 0] step:2181/10000 train_time:156357ms step_avg:71.69ms +[2025-09-02 04:54:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:54:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:54:15] [Rank 0] PRINT: step:2200/10000 val_loss:4.6014 svd_entropy: attn_qk:H=0.6606,top10E=0.40,eRank=90.7,q75/q25=49.28 attn_vo:H=0.7215,top10E=0.30,eRank=166.5,q75/q25=75.66 mlp_w1:H=0.6898,top10E=0.39,eRank=116.9,q75/q25=8.49 mlp_w2:H=0.8035,top10E=0.19,eRank=209.5,q75/q25=17.76 vo_prod:H=0.6134,top10E=0.43,eRank=63.3,q75/q25=4193.99 train_time:157948ms step_avg:71.79ms +[2025-09-02 04:54:15] [Rank 0] PRINT: step:2200/10000 val_loss:4.6014 svd_entropy: attn_qk:H=0.6606,top10E=0.40,eRank=90.7,q75/q25=49.28 attn_vo:H=0.7215,top10E=0.30,eRank=166.5,q75/q25=75.66 mlp_w1:H=0.6898,top10E=0.39,eRank=116.9,q75/q25=8.49 mlp_w2:H=0.8035,top10E=0.19,eRank=209.5,q75/q25=17.76 vo_prod:H=0.6134,top10E=0.43,eRank=63.3,q75/q25=4193.99 train_time:157948ms step_avg:71.79ms +[2025-09-02 04:54:15] [Rank 0] step:2201/10000 train_time:157959ms step_avg:71.77ms +[2025-09-02 04:54:15] [Rank 0] step:2201/10000 train_time:157959ms step_avg:71.77ms +[2025-09-02 04:54:17] [Rank 0] step:2221/10000 train_time:159266ms step_avg:71.71ms +[2025-09-02 04:54:17] [Rank 0] step:2221/10000 train_time:159266ms step_avg:71.71ms +[2025-09-02 04:54:18] [Rank 0] step:2241/10000 train_time:160743ms step_avg:71.73ms +[2025-09-02 04:54:18] [Rank 0] step:2241/10000 train_time:160743ms step_avg:71.73ms +[2025-09-02 04:54:20] [Rank 0] step:2261/10000 train_time:162230ms step_avg:71.75ms +[2025-09-02 04:54:20] [Rank 0] step:2261/10000 train_time:162230ms step_avg:71.75ms +[2025-09-02 04:54:21] [Rank 0] step:2281/10000 train_time:163720ms step_avg:71.78ms +[2025-09-02 04:54:21] [Rank 0] step:2281/10000 train_time:163720ms step_avg:71.78ms +[2025-09-02 04:54:23] [Rank 0] step:2301/10000 train_time:165209ms step_avg:71.80ms +[2025-09-02 04:54:23] [Rank 0] step:2301/10000 train_time:165209ms step_avg:71.80ms +[2025-09-02 04:54:24] [Rank 0] step:2321/10000 train_time:166700ms step_avg:71.82ms +[2025-09-02 04:54:24] [Rank 0] step:2321/10000 train_time:166700ms step_avg:71.82ms +[2025-09-02 04:54:26] [Rank 0] step:2341/10000 train_time:168190ms step_avg:71.85ms +[2025-09-02 04:54:26] [Rank 0] step:2341/10000 train_time:168190ms step_avg:71.85ms +[2025-09-02 04:54:27] [Rank 0] step:2361/10000 train_time:169681ms step_avg:71.87ms +[2025-09-02 04:54:27] [Rank 0] step:2361/10000 train_time:169681ms step_avg:71.87ms +[2025-09-02 04:54:29] [Rank 0] step:2381/10000 train_time:171173ms step_avg:71.89ms +[2025-09-02 04:54:29] [Rank 0] step:2381/10000 train_time:171173ms step_avg:71.89ms +[2025-09-02 04:54:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:54:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:54:42] [Rank 0] PRINT: step:2400/10000 val_loss:4.5238 svd_entropy: attn_qk:H=0.6667,top10E=0.39,eRank=93.7,q75/q25=54.92 attn_vo:H=0.7297,top10E=0.29,eRank=172.9,q75/q25=80.99 mlp_w1:H=0.7006,top10E=0.38,eRank=123.8,q75/q25=9.05 mlp_w2:H=0.8110,top10E=0.18,eRank=220.3,q75/q25=18.23 vo_prod:H=0.6226,top10E=0.42,eRank=67.1,q75/q25=5710.64 train_time:172815ms step_avg:72.01ms +[2025-09-02 04:54:42] [Rank 0] PRINT: step:2400/10000 val_loss:4.5238 svd_entropy: attn_qk:H=0.6667,top10E=0.39,eRank=93.7,q75/q25=54.92 attn_vo:H=0.7297,top10E=0.29,eRank=172.9,q75/q25=80.99 mlp_w1:H=0.7006,top10E=0.38,eRank=123.8,q75/q25=9.05 mlp_w2:H=0.8110,top10E=0.18,eRank=220.3,q75/q25=18.23 vo_prod:H=0.6226,top10E=0.42,eRank=67.1,q75/q25=5710.64 train_time:172815ms step_avg:72.01ms +[2025-09-02 04:54:42] [Rank 0] step:2401/10000 train_time:172826ms step_avg:71.98ms +[2025-09-02 04:54:42] [Rank 0] step:2401/10000 train_time:172826ms step_avg:71.98ms +[2025-09-02 04:54:44] [Rank 0] step:2421/10000 train_time:174187ms step_avg:71.95ms +[2025-09-02 04:54:44] [Rank 0] step:2421/10000 train_time:174187ms step_avg:71.95ms +[2025-09-02 04:54:45] [Rank 0] step:2441/10000 train_time:175674ms step_avg:71.97ms +[2025-09-02 04:54:45] [Rank 0] step:2441/10000 train_time:175674ms step_avg:71.97ms +[2025-09-02 04:54:46] [Rank 0] step:2461/10000 train_time:177162ms step_avg:71.99ms +[2025-09-02 04:54:46] [Rank 0] step:2461/10000 train_time:177162ms step_avg:71.99ms +[2025-09-02 04:54:48] [Rank 0] step:2481/10000 train_time:178651ms step_avg:72.01ms +[2025-09-02 04:54:48] [Rank 0] step:2481/10000 train_time:178651ms step_avg:72.01ms +[2025-09-02 04:54:49] [Rank 0] step:2501/10000 train_time:180139ms step_avg:72.03ms +[2025-09-02 04:54:49] [Rank 0] step:2501/10000 train_time:180139ms step_avg:72.03ms +[2025-09-02 04:54:51] [Rank 0] step:2521/10000 train_time:181629ms step_avg:72.05ms +[2025-09-02 04:54:51] [Rank 0] step:2521/10000 train_time:181629ms step_avg:72.05ms +[2025-09-02 04:54:52] [Rank 0] step:2541/10000 train_time:183119ms step_avg:72.07ms +[2025-09-02 04:54:52] [Rank 0] step:2541/10000 train_time:183119ms step_avg:72.07ms +[2025-09-02 04:54:54] [Rank 0] step:2561/10000 train_time:184609ms step_avg:72.08ms +[2025-09-02 04:54:54] [Rank 0] step:2561/10000 train_time:184609ms step_avg:72.08ms +[2025-09-02 04:54:55] [Rank 0] step:2581/10000 train_time:186099ms step_avg:72.10ms +[2025-09-02 04:54:55] [Rank 0] step:2581/10000 train_time:186099ms step_avg:72.10ms +[2025-09-02 04:54:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:54:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:55:08] [Rank 0] PRINT: step:2600/10000 val_loss:4.4649 svd_entropy: attn_qk:H=0.6726,top10E=0.38,eRank=96.8,q75/q25=60.06 attn_vo:H=0.7370,top10E=0.27,eRank=178.7,q75/q25=85.74 mlp_w1:H=0.7107,top10E=0.36,eRank=130.7,q75/q25=9.46 mlp_w2:H=0.8179,top10E=0.17,eRank=230.8,q75/q25=18.23 vo_prod:H=0.6311,top10E=0.40,eRank=70.8,q75/q25=7119.16 train_time:187739ms step_avg:72.21ms +[2025-09-02 04:55:08] [Rank 0] PRINT: step:2600/10000 val_loss:4.4649 svd_entropy: attn_qk:H=0.6726,top10E=0.38,eRank=96.8,q75/q25=60.06 attn_vo:H=0.7370,top10E=0.27,eRank=178.7,q75/q25=85.74 mlp_w1:H=0.7107,top10E=0.36,eRank=130.7,q75/q25=9.46 mlp_w2:H=0.8179,top10E=0.17,eRank=230.8,q75/q25=18.23 vo_prod:H=0.6311,top10E=0.40,eRank=70.8,q75/q25=7119.16 train_time:187739ms step_avg:72.21ms +[2025-09-02 04:55:09] [Rank 0] step:2601/10000 train_time:187749ms step_avg:72.18ms +[2025-09-02 04:55:09] [Rank 0] step:2601/10000 train_time:187749ms step_avg:72.18ms +[2025-09-02 04:55:10] [Rank 0] step:2621/10000 train_time:189092ms step_avg:72.15ms +[2025-09-02 04:55:10] [Rank 0] step:2621/10000 train_time:189092ms step_avg:72.15ms +[2025-09-02 04:55:12] [Rank 0] step:2641/10000 train_time:190625ms step_avg:72.18ms +[2025-09-02 04:55:12] [Rank 0] step:2641/10000 train_time:190625ms step_avg:72.18ms +[2025-09-02 04:55:13] [Rank 0] step:2661/10000 train_time:192113ms step_avg:72.20ms +[2025-09-02 04:55:13] [Rank 0] step:2661/10000 train_time:192113ms step_avg:72.20ms +[2025-09-02 04:55:15] [Rank 0] step:2681/10000 train_time:193600ms step_avg:72.21ms +[2025-09-02 04:55:15] [Rank 0] step:2681/10000 train_time:193600ms step_avg:72.21ms +[2025-09-02 04:55:16] [Rank 0] step:2701/10000 train_time:195089ms step_avg:72.23ms +[2025-09-02 04:55:16] [Rank 0] step:2701/10000 train_time:195089ms step_avg:72.23ms +[2025-09-02 04:55:18] [Rank 0] step:2721/10000 train_time:196578ms step_avg:72.24ms +[2025-09-02 04:55:18] [Rank 0] step:2721/10000 train_time:196578ms step_avg:72.24ms +[2025-09-02 04:55:19] [Rank 0] step:2741/10000 train_time:198068ms step_avg:72.26ms +[2025-09-02 04:55:19] [Rank 0] step:2741/10000 train_time:198068ms step_avg:72.26ms +[2025-09-02 04:55:21] [Rank 0] step:2761/10000 train_time:199556ms step_avg:72.28ms +[2025-09-02 04:55:21] [Rank 0] step:2761/10000 train_time:199556ms step_avg:72.28ms +[2025-09-02 04:55:22] [Rank 0] step:2781/10000 train_time:201045ms step_avg:72.29ms +[2025-09-02 04:55:22] [Rank 0] step:2781/10000 train_time:201045ms step_avg:72.29ms +[2025-09-02 04:55:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:55:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:55:35] [Rank 0] PRINT: step:2800/10000 val_loss:4.4199 svd_entropy: attn_qk:H=0.6782,top10E=0.37,eRank=99.9,q75/q25=65.09 attn_vo:H=0.7436,top10E=0.27,eRank=184.1,q75/q25=89.59 mlp_w1:H=0.7195,top10E=0.35,eRank=137.3,q75/q25=9.90 mlp_w2:H=0.8239,top10E=0.16,eRank=240.3,q75/q25=18.16 vo_prod:H=0.6387,top10E=0.39,eRank=74.3,q75/q25=8667.14 train_time:202684ms step_avg:72.39ms +[2025-09-02 04:55:35] [Rank 0] PRINT: step:2800/10000 val_loss:4.4199 svd_entropy: attn_qk:H=0.6782,top10E=0.37,eRank=99.9,q75/q25=65.09 attn_vo:H=0.7436,top10E=0.27,eRank=184.1,q75/q25=89.59 mlp_w1:H=0.7195,top10E=0.35,eRank=137.3,q75/q25=9.90 mlp_w2:H=0.8239,top10E=0.16,eRank=240.3,q75/q25=18.16 vo_prod:H=0.6387,top10E=0.39,eRank=74.3,q75/q25=8667.14 train_time:202684ms step_avg:72.39ms +[2025-09-02 04:55:35] [Rank 0] step:2801/10000 train_time:202695ms step_avg:72.37ms +[2025-09-02 04:55:35] [Rank 0] step:2801/10000 train_time:202695ms step_avg:72.37ms +[2025-09-02 04:55:37] [Rank 0] step:2821/10000 train_time:204049ms step_avg:72.33ms +[2025-09-02 04:55:37] [Rank 0] step:2821/10000 train_time:204049ms step_avg:72.33ms +[2025-09-02 04:55:38] [Rank 0] step:2841/10000 train_time:205538ms step_avg:72.35ms +[2025-09-02 04:55:38] [Rank 0] step:2841/10000 train_time:205538ms step_avg:72.35ms +[2025-09-02 04:55:40] [Rank 0] step:2861/10000 train_time:207026ms step_avg:72.36ms +[2025-09-02 04:55:40] [Rank 0] step:2861/10000 train_time:207026ms step_avg:72.36ms +[2025-09-02 04:55:41] [Rank 0] step:2881/10000 train_time:208514ms step_avg:72.38ms +[2025-09-02 04:55:41] [Rank 0] step:2881/10000 train_time:208514ms step_avg:72.38ms +[2025-09-02 04:55:43] [Rank 0] step:2901/10000 train_time:210003ms step_avg:72.39ms +[2025-09-02 04:55:43] [Rank 0] step:2901/10000 train_time:210003ms step_avg:72.39ms +[2025-09-02 04:55:44] [Rank 0] step:2921/10000 train_time:211492ms step_avg:72.40ms +[2025-09-02 04:55:44] [Rank 0] step:2921/10000 train_time:211492ms step_avg:72.40ms +[2025-09-02 04:55:46] [Rank 0] step:2941/10000 train_time:212981ms step_avg:72.42ms +[2025-09-02 04:55:46] [Rank 0] step:2941/10000 train_time:212981ms step_avg:72.42ms +[2025-09-02 04:55:47] [Rank 0] step:2961/10000 train_time:214470ms step_avg:72.43ms +[2025-09-02 04:55:47] [Rank 0] step:2961/10000 train_time:214470ms step_avg:72.43ms +[2025-09-02 04:55:49] [Rank 0] step:2981/10000 train_time:215966ms step_avg:72.45ms +[2025-09-02 04:55:49] [Rank 0] step:2981/10000 train_time:215966ms step_avg:72.45ms +[2025-09-02 04:55:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:55:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:56:02] [Rank 0] PRINT: step:3000/10000 val_loss:4.3750 svd_entropy: attn_qk:H=0.6832,top10E=0.36,eRank=102.8,q75/q25=69.49 attn_vo:H=0.7495,top10E=0.26,eRank=189.2,q75/q25=92.00 mlp_w1:H=0.7275,top10E=0.34,eRank=143.5,q75/q25=10.27 mlp_w2:H=0.8290,top10E=0.16,eRank=248.5,q75/q25=18.01 vo_prod:H=0.6453,top10E=0.37,eRank=77.6,q75/q25=10224.10 train_time:217615ms step_avg:72.54ms +[2025-09-02 04:56:02] [Rank 0] PRINT: step:3000/10000 val_loss:4.3750 svd_entropy: attn_qk:H=0.6832,top10E=0.36,eRank=102.8,q75/q25=69.49 attn_vo:H=0.7495,top10E=0.26,eRank=189.2,q75/q25=92.00 mlp_w1:H=0.7275,top10E=0.34,eRank=143.5,q75/q25=10.27 mlp_w2:H=0.8290,top10E=0.16,eRank=248.5,q75/q25=18.01 vo_prod:H=0.6453,top10E=0.37,eRank=77.6,q75/q25=10224.10 train_time:217615ms step_avg:72.54ms +[2025-09-02 04:56:02] [Rank 0] step:3001/10000 train_time:217626ms step_avg:72.52ms +[2025-09-02 04:56:02] [Rank 0] step:3001/10000 train_time:217626ms step_avg:72.52ms +[2025-09-02 04:56:03] [Rank 0] step:3021/10000 train_time:218997ms step_avg:72.49ms +[2025-09-02 04:56:03] [Rank 0] step:3021/10000 train_time:218997ms step_avg:72.49ms +[2025-09-02 04:56:05] [Rank 0] step:3041/10000 train_time:220493ms step_avg:72.51ms +[2025-09-02 04:56:05] [Rank 0] step:3041/10000 train_time:220493ms step_avg:72.51ms +[2025-09-02 04:56:06] [Rank 0] step:3061/10000 train_time:221990ms step_avg:72.52ms +[2025-09-02 04:56:06] [Rank 0] step:3061/10000 train_time:221990ms step_avg:72.52ms +[2025-09-02 04:56:08] [Rank 0] step:3081/10000 train_time:223487ms step_avg:72.54ms +[2025-09-02 04:56:08] [Rank 0] step:3081/10000 train_time:223487ms step_avg:72.54ms +[2025-09-02 04:56:09] [Rank 0] step:3101/10000 train_time:224985ms step_avg:72.55ms +[2025-09-02 04:56:09] [Rank 0] step:3101/10000 train_time:224985ms step_avg:72.55ms +[2025-09-02 04:56:11] [Rank 0] step:3121/10000 train_time:226483ms step_avg:72.57ms +[2025-09-02 04:56:11] [Rank 0] step:3121/10000 train_time:226483ms step_avg:72.57ms +[2025-09-02 04:56:12] [Rank 0] step:3141/10000 train_time:227982ms step_avg:72.58ms +[2025-09-02 04:56:12] [Rank 0] step:3141/10000 train_time:227982ms step_avg:72.58ms +[2025-09-02 04:56:14] [Rank 0] step:3161/10000 train_time:229480ms step_avg:72.60ms +[2025-09-02 04:56:14] [Rank 0] step:3161/10000 train_time:229480ms step_avg:72.60ms +[2025-09-02 04:56:15] [Rank 0] step:3181/10000 train_time:230983ms step_avg:72.61ms +[2025-09-02 04:56:15] [Rank 0] step:3181/10000 train_time:230983ms step_avg:72.61ms +[2025-09-02 04:56:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:56:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:56:28] [Rank 0] PRINT: step:3200/10000 val_loss:4.3334 svd_entropy: attn_qk:H=0.6879,top10E=0.36,eRank=105.6,q75/q25=73.14 attn_vo:H=0.7548,top10E=0.25,eRank=193.8,q75/q25=95.28 mlp_w1:H=0.7345,top10E=0.33,eRank=149.3,q75/q25=10.57 mlp_w2:H=0.8335,top10E=0.15,eRank=256.2,q75/q25=17.91 vo_prod:H=0.6514,top10E=0.36,eRank=80.8,q75/q25=11503.85 train_time:232633ms step_avg:72.70ms +[2025-09-02 04:56:28] [Rank 0] PRINT: step:3200/10000 val_loss:4.3334 svd_entropy: attn_qk:H=0.6879,top10E=0.36,eRank=105.6,q75/q25=73.14 attn_vo:H=0.7548,top10E=0.25,eRank=193.8,q75/q25=95.28 mlp_w1:H=0.7345,top10E=0.33,eRank=149.3,q75/q25=10.57 mlp_w2:H=0.8335,top10E=0.15,eRank=256.2,q75/q25=17.91 vo_prod:H=0.6514,top10E=0.36,eRank=80.8,q75/q25=11503.85 train_time:232633ms step_avg:72.70ms +[2025-09-02 04:56:28] [Rank 0] step:3201/10000 train_time:232645ms step_avg:72.68ms +[2025-09-02 04:56:28] [Rank 0] step:3201/10000 train_time:232645ms step_avg:72.68ms +[2025-09-02 04:56:30] [Rank 0] step:3221/10000 train_time:233993ms step_avg:72.65ms +[2025-09-02 04:56:30] [Rank 0] step:3221/10000 train_time:233993ms step_avg:72.65ms +[2025-09-02 04:56:31] [Rank 0] step:3241/10000 train_time:235490ms step_avg:72.66ms +[2025-09-02 04:56:31] [Rank 0] step:3241/10000 train_time:235490ms step_avg:72.66ms +[2025-09-02 04:56:33] [Rank 0] step:3261/10000 train_time:236985ms step_avg:72.67ms +[2025-09-02 04:56:33] [Rank 0] step:3261/10000 train_time:236985ms step_avg:72.67ms +[2025-09-02 04:56:34] [Rank 0] step:3281/10000 train_time:238482ms step_avg:72.69ms +[2025-09-02 04:56:34] [Rank 0] step:3281/10000 train_time:238482ms step_avg:72.69ms +[2025-09-02 04:56:36] [Rank 0] step:3301/10000 train_time:239978ms step_avg:72.70ms +[2025-09-02 04:56:36] [Rank 0] step:3301/10000 train_time:239978ms step_avg:72.70ms +[2025-09-02 04:56:37] [Rank 0] step:3321/10000 train_time:241474ms step_avg:72.71ms +[2025-09-02 04:56:37] [Rank 0] step:3321/10000 train_time:241474ms step_avg:72.71ms +[2025-09-02 04:56:39] [Rank 0] step:3341/10000 train_time:242970ms step_avg:72.72ms +[2025-09-02 04:56:39] [Rank 0] step:3341/10000 train_time:242970ms step_avg:72.72ms +[2025-09-02 04:56:40] [Rank 0] step:3361/10000 train_time:244468ms step_avg:72.74ms +[2025-09-02 04:56:40] [Rank 0] step:3361/10000 train_time:244468ms step_avg:72.74ms +[2025-09-02 04:56:42] [Rank 0] step:3381/10000 train_time:245965ms step_avg:72.75ms +[2025-09-02 04:56:42] [Rank 0] step:3381/10000 train_time:245965ms step_avg:72.75ms +[2025-09-02 04:56:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:56:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:56:55] [Rank 0] PRINT: step:3400/10000 val_loss:4.2894 svd_entropy: attn_qk:H=0.6926,top10E=0.35,eRank=108.5,q75/q25=77.06 attn_vo:H=0.7598,top10E=0.24,eRank=198.3,q75/q25=96.73 mlp_w1:H=0.7413,top10E=0.32,eRank=155.3,q75/q25=10.89 mlp_w2:H=0.8377,top10E=0.15,eRank=263.6,q75/q25=17.77 vo_prod:H=0.6572,top10E=0.36,eRank=83.9,q75/q25=12662.34 train_time:247612ms step_avg:72.83ms +[2025-09-02 04:56:55] [Rank 0] PRINT: step:3400/10000 val_loss:4.2894 svd_entropy: attn_qk:H=0.6926,top10E=0.35,eRank=108.5,q75/q25=77.06 attn_vo:H=0.7598,top10E=0.24,eRank=198.3,q75/q25=96.73 mlp_w1:H=0.7413,top10E=0.32,eRank=155.3,q75/q25=10.89 mlp_w2:H=0.8377,top10E=0.15,eRank=263.6,q75/q25=17.77 vo_prod:H=0.6572,top10E=0.36,eRank=83.9,q75/q25=12662.34 train_time:247612ms step_avg:72.83ms +[2025-09-02 04:56:55] [Rank 0] step:3401/10000 train_time:247624ms step_avg:72.81ms +[2025-09-02 04:56:55] [Rank 0] step:3401/10000 train_time:247624ms step_avg:72.81ms +[2025-09-02 04:56:57] [Rank 0] step:3421/10000 train_time:248993ms step_avg:72.78ms +[2025-09-02 04:56:57] [Rank 0] step:3421/10000 train_time:248993ms step_avg:72.78ms +[2025-09-02 04:56:58] [Rank 0] step:3441/10000 train_time:250486ms step_avg:72.79ms +[2025-09-02 04:56:58] [Rank 0] step:3441/10000 train_time:250486ms step_avg:72.79ms +[2025-09-02 04:57:00] [Rank 0] step:3461/10000 train_time:251981ms step_avg:72.81ms +[2025-09-02 04:57:00] [Rank 0] step:3461/10000 train_time:251981ms step_avg:72.81ms +[2025-09-02 04:57:01] [Rank 0] step:3481/10000 train_time:253477ms step_avg:72.82ms +[2025-09-02 04:57:01] [Rank 0] step:3481/10000 train_time:253477ms step_avg:72.82ms +[2025-09-02 04:57:03] [Rank 0] step:3501/10000 train_time:254973ms step_avg:72.83ms +[2025-09-02 04:57:03] [Rank 0] step:3501/10000 train_time:254973ms step_avg:72.83ms +[2025-09-02 04:57:04] [Rank 0] step:3521/10000 train_time:256471ms step_avg:72.84ms +[2025-09-02 04:57:04] [Rank 0] step:3521/10000 train_time:256471ms step_avg:72.84ms +[2025-09-02 04:57:06] [Rank 0] step:3541/10000 train_time:257968ms step_avg:72.85ms +[2025-09-02 04:57:06] [Rank 0] step:3541/10000 train_time:257968ms step_avg:72.85ms +[2025-09-02 04:57:07] [Rank 0] step:3561/10000 train_time:259464ms step_avg:72.86ms +[2025-09-02 04:57:07] [Rank 0] step:3561/10000 train_time:259464ms step_avg:72.86ms +[2025-09-02 04:57:09] [Rank 0] step:3581/10000 train_time:260960ms step_avg:72.87ms +[2025-09-02 04:57:09] [Rank 0] step:3581/10000 train_time:260960ms step_avg:72.87ms +[2025-09-02 04:57:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:57:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:57:22] [Rank 0] PRINT: step:3600/10000 val_loss:4.2766 svd_entropy: attn_qk:H=0.6968,top10E=0.34,eRank=111.2,q75/q25=79.79 attn_vo:H=0.7642,top10E=0.23,eRank=202.3,q75/q25=98.69 mlp_w1:H=0.7474,top10E=0.32,eRank=160.8,q75/q25=11.22 mlp_w2:H=0.8413,top10E=0.15,eRank=270.1,q75/q25=17.74 vo_prod:H=0.6622,top10E=0.35,eRank=86.6,q75/q25=13400.60 train_time:262610ms step_avg:72.95ms +[2025-09-02 04:57:22] [Rank 0] PRINT: step:3600/10000 val_loss:4.2766 svd_entropy: attn_qk:H=0.6968,top10E=0.34,eRank=111.2,q75/q25=79.79 attn_vo:H=0.7642,top10E=0.23,eRank=202.3,q75/q25=98.69 mlp_w1:H=0.7474,top10E=0.32,eRank=160.8,q75/q25=11.22 mlp_w2:H=0.8413,top10E=0.15,eRank=270.1,q75/q25=17.74 vo_prod:H=0.6622,top10E=0.35,eRank=86.6,q75/q25=13400.60 train_time:262610ms step_avg:72.95ms +[2025-09-02 04:57:22] [Rank 0] step:3601/10000 train_time:262621ms step_avg:72.93ms +[2025-09-02 04:57:22] [Rank 0] step:3601/10000 train_time:262621ms step_avg:72.93ms +[2025-09-02 04:57:23] [Rank 0] step:3621/10000 train_time:263983ms step_avg:72.90ms +[2025-09-02 04:57:23] [Rank 0] step:3621/10000 train_time:263983ms step_avg:72.90ms +[2025-09-02 04:57:25] [Rank 0] step:3641/10000 train_time:265477ms step_avg:72.91ms +[2025-09-02 04:57:25] [Rank 0] step:3641/10000 train_time:265477ms step_avg:72.91ms +[2025-09-02 04:57:26] [Rank 0] step:3661/10000 train_time:266974ms step_avg:72.92ms +[2025-09-02 04:57:26] [Rank 0] step:3661/10000 train_time:266974ms step_avg:72.92ms +[2025-09-02 04:57:28] [Rank 0] step:3681/10000 train_time:268469ms step_avg:72.93ms +[2025-09-02 04:57:28] [Rank 0] step:3681/10000 train_time:268469ms step_avg:72.93ms +[2025-09-02 04:57:29] [Rank 0] step:3701/10000 train_time:269966ms step_avg:72.94ms +[2025-09-02 04:57:29] [Rank 0] step:3701/10000 train_time:269966ms step_avg:72.94ms +[2025-09-02 04:57:31] [Rank 0] step:3721/10000 train_time:271487ms step_avg:72.96ms +[2025-09-02 04:57:31] [Rank 0] step:3721/10000 train_time:271487ms step_avg:72.96ms +[2025-09-02 04:57:32] [Rank 0] step:3741/10000 train_time:273024ms step_avg:72.98ms +[2025-09-02 04:57:32] [Rank 0] step:3741/10000 train_time:273024ms step_avg:72.98ms +[2025-09-02 04:57:34] [Rank 0] step:3761/10000 train_time:274556ms step_avg:73.00ms +[2025-09-02 04:57:34] [Rank 0] step:3761/10000 train_time:274556ms step_avg:73.00ms +[2025-09-02 04:57:35] [Rank 0] step:3781/10000 train_time:276089ms step_avg:73.02ms +[2025-09-02 04:57:35] [Rank 0] step:3781/10000 train_time:276089ms step_avg:73.02ms +[2025-09-02 04:57:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:57:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:57:49] [Rank 0] PRINT: step:3800/10000 val_loss:4.2153 svd_entropy: attn_qk:H=0.7003,top10E=0.34,eRank=113.5,q75/q25=82.48 attn_vo:H=0.7683,top10E=0.23,eRank=206.3,q75/q25=99.62 mlp_w1:H=0.7531,top10E=0.31,eRank=166.2,q75/q25=11.46 mlp_w2:H=0.8447,top10E=0.14,eRank=276.2,q75/q25=17.55 vo_prod:H=0.6669,top10E=0.34,eRank=89.3,q75/q25=14454.65 train_time:277776ms step_avg:73.10ms +[2025-09-02 04:57:49] [Rank 0] PRINT: step:3800/10000 val_loss:4.2153 svd_entropy: attn_qk:H=0.7003,top10E=0.34,eRank=113.5,q75/q25=82.48 attn_vo:H=0.7683,top10E=0.23,eRank=206.3,q75/q25=99.62 mlp_w1:H=0.7531,top10E=0.31,eRank=166.2,q75/q25=11.46 mlp_w2:H=0.8447,top10E=0.14,eRank=276.2,q75/q25=17.55 vo_prod:H=0.6669,top10E=0.34,eRank=89.3,q75/q25=14454.65 train_time:277776ms step_avg:73.10ms +[2025-09-02 04:57:49] [Rank 0] step:3801/10000 train_time:277787ms step_avg:73.08ms +[2025-09-02 04:57:49] [Rank 0] step:3801/10000 train_time:277787ms step_avg:73.08ms +[2025-09-02 04:57:50] [Rank 0] step:3821/10000 train_time:279189ms step_avg:73.07ms +[2025-09-02 04:57:50] [Rank 0] step:3821/10000 train_time:279189ms step_avg:73.07ms +[2025-09-02 04:57:52] [Rank 0] step:3841/10000 train_time:280725ms step_avg:73.09ms +[2025-09-02 04:57:52] [Rank 0] step:3841/10000 train_time:280725ms step_avg:73.09ms +[2025-09-02 04:57:53] [Rank 0] step:3861/10000 train_time:282258ms step_avg:73.10ms +[2025-09-02 04:57:53] [Rank 0] step:3861/10000 train_time:282258ms step_avg:73.10ms +[2025-09-02 04:57:55] [Rank 0] step:3881/10000 train_time:283790ms step_avg:73.12ms +[2025-09-02 04:57:55] [Rank 0] step:3881/10000 train_time:283790ms step_avg:73.12ms +[2025-09-02 04:57:56] [Rank 0] step:3901/10000 train_time:285324ms step_avg:73.14ms +[2025-09-02 04:57:56] [Rank 0] step:3901/10000 train_time:285324ms step_avg:73.14ms +[2025-09-02 04:57:58] [Rank 0] step:3921/10000 train_time:286857ms step_avg:73.16ms +[2025-09-02 04:57:58] [Rank 0] step:3921/10000 train_time:286857ms step_avg:73.16ms +[2025-09-02 04:57:59] [Rank 0] step:3941/10000 train_time:288390ms step_avg:73.18ms +[2025-09-02 04:57:59] [Rank 0] step:3941/10000 train_time:288390ms step_avg:73.18ms +[2025-09-02 04:58:01] [Rank 0] step:3961/10000 train_time:289922ms step_avg:73.19ms +[2025-09-02 04:58:01] [Rank 0] step:3961/10000 train_time:289922ms step_avg:73.19ms +[2025-09-02 04:58:02] [Rank 0] step:3981/10000 train_time:291457ms step_avg:73.21ms +[2025-09-02 04:58:02] [Rank 0] step:3981/10000 train_time:291457ms step_avg:73.21ms +[2025-09-02 04:58:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:58:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:58:16] [Rank 0] PRINT: step:4000/10000 val_loss:4.1855 svd_entropy: attn_qk:H=0.7040,top10E=0.33,eRank=116.0,q75/q25=85.22 attn_vo:H=0.7721,top10E=0.22,eRank=210.0,q75/q25=100.15 mlp_w1:H=0.7588,top10E=0.30,eRank=171.7,q75/q25=11.78 mlp_w2:H=0.8478,top10E=0.14,eRank=282.0,q75/q25=17.35 vo_prod:H=0.6715,top10E=0.33,eRank=92.0,q75/q25=15130.60 train_time:293144ms step_avg:73.29ms +[2025-09-02 04:58:16] [Rank 0] PRINT: step:4000/10000 val_loss:4.1855 svd_entropy: attn_qk:H=0.7040,top10E=0.33,eRank=116.0,q75/q25=85.22 attn_vo:H=0.7721,top10E=0.22,eRank=210.0,q75/q25=100.15 mlp_w1:H=0.7588,top10E=0.30,eRank=171.7,q75/q25=11.78 mlp_w2:H=0.8478,top10E=0.14,eRank=282.0,q75/q25=17.35 vo_prod:H=0.6715,top10E=0.33,eRank=92.0,q75/q25=15130.60 train_time:293144ms step_avg:73.29ms +[2025-09-02 04:58:16] [Rank 0] step:4001/10000 train_time:293156ms step_avg:73.27ms +[2025-09-02 04:58:16] [Rank 0] step:4001/10000 train_time:293156ms step_avg:73.27ms +[2025-09-02 04:58:17] [Rank 0] step:4021/10000 train_time:294556ms step_avg:73.25ms +[2025-09-02 04:58:17] [Rank 0] step:4021/10000 train_time:294556ms step_avg:73.25ms +[2025-09-02 04:58:19] [Rank 0] step:4041/10000 train_time:296088ms step_avg:73.27ms +[2025-09-02 04:58:19] [Rank 0] step:4041/10000 train_time:296088ms step_avg:73.27ms +[2025-09-02 04:58:20] [Rank 0] step:4061/10000 train_time:297621ms step_avg:73.29ms +[2025-09-02 04:58:20] [Rank 0] step:4061/10000 train_time:297621ms step_avg:73.29ms +[2025-09-02 04:58:22] [Rank 0] step:4081/10000 train_time:299254ms step_avg:73.33ms +[2025-09-02 04:58:22] [Rank 0] step:4081/10000 train_time:299254ms step_avg:73.33ms +[2025-09-02 04:58:24] [Rank 0] step:4101/10000 train_time:300788ms step_avg:73.34ms +[2025-09-02 04:58:24] [Rank 0] step:4101/10000 train_time:300788ms step_avg:73.34ms +[2025-09-02 04:58:25] [Rank 0] step:4121/10000 train_time:302323ms step_avg:73.36ms +[2025-09-02 04:58:25] [Rank 0] step:4121/10000 train_time:302323ms step_avg:73.36ms +[2025-09-02 04:58:27] [Rank 0] step:4141/10000 train_time:303857ms step_avg:73.38ms +[2025-09-02 04:58:27] [Rank 0] step:4141/10000 train_time:303857ms step_avg:73.38ms +[2025-09-02 04:58:28] [Rank 0] step:4161/10000 train_time:305390ms step_avg:73.39ms +[2025-09-02 04:58:28] [Rank 0] step:4161/10000 train_time:305390ms step_avg:73.39ms +[2025-09-02 04:58:30] [Rank 0] step:4181/10000 train_time:306926ms step_avg:73.41ms +[2025-09-02 04:58:30] [Rank 0] step:4181/10000 train_time:306926ms step_avg:73.41ms +[2025-09-02 04:58:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:58:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:58:43] [Rank 0] PRINT: step:4200/10000 val_loss:4.1685 svd_entropy: attn_qk:H=0.7074,top10E=0.33,eRank=118.4,q75/q25=87.03 attn_vo:H=0.7757,top10E=0.22,eRank=213.5,q75/q25=101.11 mlp_w1:H=0.7639,top10E=0.30,eRank=177.0,q75/q25=12.04 mlp_w2:H=0.8506,top10E=0.14,eRank=287.4,q75/q25=17.26 vo_prod:H=0.6756,top10E=0.33,eRank=94.5,q75/q25=15434.45 train_time:308614ms step_avg:73.48ms +[2025-09-02 04:58:43] [Rank 0] PRINT: step:4200/10000 val_loss:4.1685 svd_entropy: attn_qk:H=0.7074,top10E=0.33,eRank=118.4,q75/q25=87.03 attn_vo:H=0.7757,top10E=0.22,eRank=213.5,q75/q25=101.11 mlp_w1:H=0.7639,top10E=0.30,eRank=177.0,q75/q25=12.04 mlp_w2:H=0.8506,top10E=0.14,eRank=287.4,q75/q25=17.26 vo_prod:H=0.6756,top10E=0.33,eRank=94.5,q75/q25=15434.45 train_time:308614ms step_avg:73.48ms +[2025-09-02 04:58:43] [Rank 0] step:4201/10000 train_time:308625ms step_avg:73.46ms +[2025-09-02 04:58:43] [Rank 0] step:4201/10000 train_time:308625ms step_avg:73.46ms +[2025-09-02 04:58:44] [Rank 0] step:4221/10000 train_time:310006ms step_avg:73.44ms +[2025-09-02 04:58:44] [Rank 0] step:4221/10000 train_time:310006ms step_avg:73.44ms +[2025-09-02 04:58:46] [Rank 0] step:4241/10000 train_time:311541ms step_avg:73.46ms +[2025-09-02 04:58:46] [Rank 0] step:4241/10000 train_time:311541ms step_avg:73.46ms +[2025-09-02 04:58:48] [Rank 0] step:4261/10000 train_time:313073ms step_avg:73.47ms +[2025-09-02 04:58:48] [Rank 0] step:4261/10000 train_time:313073ms step_avg:73.47ms +[2025-09-02 04:58:49] [Rank 0] step:4281/10000 train_time:314605ms step_avg:73.49ms +[2025-09-02 04:58:49] [Rank 0] step:4281/10000 train_time:314605ms step_avg:73.49ms +[2025-09-02 04:58:51] [Rank 0] step:4301/10000 train_time:316139ms step_avg:73.50ms +[2025-09-02 04:58:51] [Rank 0] step:4301/10000 train_time:316139ms step_avg:73.50ms +[2025-09-02 04:58:52] [Rank 0] step:4321/10000 train_time:317673ms step_avg:73.52ms +[2025-09-02 04:58:52] [Rank 0] step:4321/10000 train_time:317673ms step_avg:73.52ms +[2025-09-02 04:58:54] [Rank 0] step:4341/10000 train_time:319204ms step_avg:73.53ms +[2025-09-02 04:58:54] [Rank 0] step:4341/10000 train_time:319204ms step_avg:73.53ms +[2025-09-02 04:58:55] [Rank 0] step:4361/10000 train_time:320738ms step_avg:73.55ms +[2025-09-02 04:58:55] [Rank 0] step:4361/10000 train_time:320738ms step_avg:73.55ms +[2025-09-02 04:58:57] [Rank 0] step:4381/10000 train_time:322269ms step_avg:73.56ms +[2025-09-02 04:58:57] [Rank 0] step:4381/10000 train_time:322269ms step_avg:73.56ms +[2025-09-02 04:58:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:58:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:59:10] [Rank 0] PRINT: step:4400/10000 val_loss:4.1491 svd_entropy: attn_qk:H=0.7106,top10E=0.32,eRank=120.6,q75/q25=89.23 attn_vo:H=0.7790,top10E=0.21,eRank=216.8,q75/q25=101.29 mlp_w1:H=0.7687,top10E=0.29,eRank=182.0,q75/q25=12.30 mlp_w2:H=0.8531,top10E=0.14,eRank=292.4,q75/q25=17.04 vo_prod:H=0.6797,top10E=0.32,eRank=97.0,q75/q25=16070.90 train_time:323956ms step_avg:73.63ms +[2025-09-02 04:59:10] [Rank 0] PRINT: step:4400/10000 val_loss:4.1491 svd_entropy: attn_qk:H=0.7106,top10E=0.32,eRank=120.6,q75/q25=89.23 attn_vo:H=0.7790,top10E=0.21,eRank=216.8,q75/q25=101.29 mlp_w1:H=0.7687,top10E=0.29,eRank=182.0,q75/q25=12.30 mlp_w2:H=0.8531,top10E=0.14,eRank=292.4,q75/q25=17.04 vo_prod:H=0.6797,top10E=0.32,eRank=97.0,q75/q25=16070.90 train_time:323956ms step_avg:73.63ms +[2025-09-02 04:59:10] [Rank 0] step:4401/10000 train_time:323967ms step_avg:73.61ms +[2025-09-02 04:59:10] [Rank 0] step:4401/10000 train_time:323967ms step_avg:73.61ms +[2025-09-02 04:59:11] [Rank 0] step:4421/10000 train_time:325358ms step_avg:73.59ms +[2025-09-02 04:59:11] [Rank 0] step:4421/10000 train_time:325358ms step_avg:73.59ms +[2025-09-02 04:59:13] [Rank 0] step:4441/10000 train_time:326887ms step_avg:73.61ms +[2025-09-02 04:59:13] [Rank 0] step:4441/10000 train_time:326887ms step_avg:73.61ms +[2025-09-02 04:59:15] [Rank 0] step:4461/10000 train_time:328423ms step_avg:73.62ms +[2025-09-02 04:59:15] [Rank 0] step:4461/10000 train_time:328423ms step_avg:73.62ms +[2025-09-02 04:59:16] [Rank 0] step:4481/10000 train_time:329960ms step_avg:73.64ms +[2025-09-02 04:59:16] [Rank 0] step:4481/10000 train_time:329960ms step_avg:73.64ms +[2025-09-02 04:59:18] [Rank 0] step:4501/10000 train_time:331497ms step_avg:73.65ms +[2025-09-02 04:59:18] [Rank 0] step:4501/10000 train_time:331497ms step_avg:73.65ms +[2025-09-02 04:59:19] [Rank 0] step:4521/10000 train_time:333033ms step_avg:73.66ms +[2025-09-02 04:59:19] [Rank 0] step:4521/10000 train_time:333033ms step_avg:73.66ms +[2025-09-02 04:59:21] [Rank 0] step:4541/10000 train_time:334570ms step_avg:73.68ms +[2025-09-02 04:59:21] [Rank 0] step:4541/10000 train_time:334570ms step_avg:73.68ms +[2025-09-02 04:59:22] [Rank 0] step:4561/10000 train_time:336108ms step_avg:73.69ms +[2025-09-02 04:59:22] [Rank 0] step:4561/10000 train_time:336108ms step_avg:73.69ms +[2025-09-02 04:59:24] [Rank 0] step:4581/10000 train_time:337648ms step_avg:73.71ms +[2025-09-02 04:59:24] [Rank 0] step:4581/10000 train_time:337648ms step_avg:73.71ms +[2025-09-02 04:59:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:59:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:59:37] [Rank 0] PRINT: step:4600/10000 val_loss:4.1097 svd_entropy: attn_qk:H=0.7137,top10E=0.32,eRank=122.9,q75/q25=90.82 attn_vo:H=0.7822,top10E=0.21,eRank=220.2,q75/q25=101.48 mlp_w1:H=0.7733,top10E=0.28,eRank=187.0,q75/q25=12.45 mlp_w2:H=0.8557,top10E=0.13,eRank=297.3,q75/q25=16.96 vo_prod:H=0.6836,top10E=0.31,eRank=99.5,q75/q25=16375.95 train_time:339342ms step_avg:73.77ms +[2025-09-02 04:59:37] [Rank 0] PRINT: step:4600/10000 val_loss:4.1097 svd_entropy: attn_qk:H=0.7137,top10E=0.32,eRank=122.9,q75/q25=90.82 attn_vo:H=0.7822,top10E=0.21,eRank=220.2,q75/q25=101.48 mlp_w1:H=0.7733,top10E=0.28,eRank=187.0,q75/q25=12.45 mlp_w2:H=0.8557,top10E=0.13,eRank=297.3,q75/q25=16.96 vo_prod:H=0.6836,top10E=0.31,eRank=99.5,q75/q25=16375.95 train_time:339342ms step_avg:73.77ms +[2025-09-02 04:59:37] [Rank 0] step:4601/10000 train_time:339353ms step_avg:73.76ms +[2025-09-02 04:59:37] [Rank 0] step:4601/10000 train_time:339353ms step_avg:73.76ms +[2025-09-02 04:59:39] [Rank 0] step:4621/10000 train_time:340740ms step_avg:73.74ms +[2025-09-02 04:59:39] [Rank 0] step:4621/10000 train_time:340740ms step_avg:73.74ms +[2025-09-02 04:59:40] [Rank 0] step:4641/10000 train_time:342280ms step_avg:73.75ms +[2025-09-02 04:59:40] [Rank 0] step:4641/10000 train_time:342280ms step_avg:73.75ms +[2025-09-02 04:59:42] [Rank 0] step:4661/10000 train_time:343817ms step_avg:73.76ms +[2025-09-02 04:59:42] [Rank 0] step:4661/10000 train_time:343817ms step_avg:73.76ms +[2025-09-02 04:59:43] [Rank 0] step:4681/10000 train_time:345356ms step_avg:73.78ms +[2025-09-02 04:59:43] [Rank 0] step:4681/10000 train_time:345356ms step_avg:73.78ms +[2025-09-02 04:59:45] [Rank 0] step:4701/10000 train_time:346896ms step_avg:73.79ms +[2025-09-02 04:59:45] [Rank 0] step:4701/10000 train_time:346896ms step_avg:73.79ms +[2025-09-02 04:59:46] [Rank 0] step:4721/10000 train_time:348433ms step_avg:73.80ms +[2025-09-02 04:59:46] [Rank 0] step:4721/10000 train_time:348433ms step_avg:73.80ms +[2025-09-02 04:59:48] [Rank 0] step:4741/10000 train_time:349972ms step_avg:73.82ms +[2025-09-02 04:59:48] [Rank 0] step:4741/10000 train_time:349972ms step_avg:73.82ms +[2025-09-02 04:59:49] [Rank 0] step:4761/10000 train_time:351512ms step_avg:73.83ms +[2025-09-02 04:59:49] [Rank 0] step:4761/10000 train_time:351512ms step_avg:73.83ms +[2025-09-02 04:59:51] [Rank 0] step:4781/10000 train_time:353051ms step_avg:73.84ms +[2025-09-02 04:59:51] [Rank 0] step:4781/10000 train_time:353051ms step_avg:73.84ms +[2025-09-02 04:59:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 04:59:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:00:04] [Rank 0] PRINT: step:4800/10000 val_loss:4.0948 svd_entropy: attn_qk:H=0.7166,top10E=0.31,eRank=125.0,q75/q25=92.46 attn_vo:H=0.7851,top10E=0.21,eRank=223.3,q75/q25=101.63 mlp_w1:H=0.7776,top10E=0.28,eRank=191.9,q75/q25=12.68 mlp_w2:H=0.8580,top10E=0.13,eRank=302.0,q75/q25=16.75 vo_prod:H=0.6871,top10E=0.31,eRank=101.9,q75/q25=16262.29 train_time:354748ms step_avg:73.91ms +[2025-09-02 05:00:04] [Rank 0] PRINT: step:4800/10000 val_loss:4.0948 svd_entropy: attn_qk:H=0.7166,top10E=0.31,eRank=125.0,q75/q25=92.46 attn_vo:H=0.7851,top10E=0.21,eRank=223.3,q75/q25=101.63 mlp_w1:H=0.7776,top10E=0.28,eRank=191.9,q75/q25=12.68 mlp_w2:H=0.8580,top10E=0.13,eRank=302.0,q75/q25=16.75 vo_prod:H=0.6871,top10E=0.31,eRank=101.9,q75/q25=16262.29 train_time:354748ms step_avg:73.91ms +[2025-09-02 05:00:04] [Rank 0] step:4801/10000 train_time:354760ms step_avg:73.89ms +[2025-09-02 05:00:04] [Rank 0] step:4801/10000 train_time:354760ms step_avg:73.89ms +[2025-09-02 05:00:06] [Rank 0] step:4821/10000 train_time:356170ms step_avg:73.88ms +[2025-09-02 05:00:06] [Rank 0] step:4821/10000 train_time:356170ms step_avg:73.88ms +[2025-09-02 05:00:07] [Rank 0] step:4841/10000 train_time:357707ms step_avg:73.89ms +[2025-09-02 05:00:07] [Rank 0] step:4841/10000 train_time:357707ms step_avg:73.89ms +[2025-09-02 05:00:09] [Rank 0] step:4861/10000 train_time:359249ms step_avg:73.90ms +[2025-09-02 05:00:09] [Rank 0] step:4861/10000 train_time:359249ms step_avg:73.90ms +[2025-09-02 05:00:10] [Rank 0] step:4881/10000 train_time:360789ms step_avg:73.92ms +[2025-09-02 05:00:10] [Rank 0] step:4881/10000 train_time:360789ms step_avg:73.92ms +[2025-09-02 05:00:12] [Rank 0] step:4901/10000 train_time:362327ms step_avg:73.93ms +[2025-09-02 05:00:12] [Rank 0] step:4901/10000 train_time:362327ms step_avg:73.93ms +[2025-09-02 05:00:13] [Rank 0] step:4921/10000 train_time:363870ms step_avg:73.94ms +[2025-09-02 05:00:13] [Rank 0] step:4921/10000 train_time:363870ms step_avg:73.94ms +[2025-09-02 05:00:15] [Rank 0] step:4941/10000 train_time:365412ms step_avg:73.95ms +[2025-09-02 05:00:15] [Rank 0] step:4941/10000 train_time:365412ms step_avg:73.95ms +[2025-09-02 05:00:16] [Rank 0] step:4961/10000 train_time:366949ms step_avg:73.97ms +[2025-09-02 05:00:16] [Rank 0] step:4961/10000 train_time:366949ms step_avg:73.97ms +[2025-09-02 05:00:18] [Rank 0] step:4981/10000 train_time:368492ms step_avg:73.98ms +[2025-09-02 05:00:18] [Rank 0] step:4981/10000 train_time:368492ms step_avg:73.98ms +[2025-09-02 05:00:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:00:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:00:31] [Rank 0] PRINT: step:5000/10000 val_loss:4.0734 svd_entropy: attn_qk:H=0.7194,top10E=0.31,eRank=127.1,q75/q25=93.75 attn_vo:H=0.7879,top10E=0.20,eRank=226.3,q75/q25=102.19 mlp_w1:H=0.7816,top10E=0.27,eRank=196.5,q75/q25=12.79 mlp_w2:H=0.8602,top10E=0.13,eRank=306.4,q75/q25=16.59 vo_prod:H=0.6907,top10E=0.30,eRank=104.2,q75/q25=16233.66 train_time:370186ms step_avg:74.04ms +[2025-09-02 05:00:31] [Rank 0] PRINT: step:5000/10000 val_loss:4.0734 svd_entropy: attn_qk:H=0.7194,top10E=0.31,eRank=127.1,q75/q25=93.75 attn_vo:H=0.7879,top10E=0.20,eRank=226.3,q75/q25=102.19 mlp_w1:H=0.7816,top10E=0.27,eRank=196.5,q75/q25=12.79 mlp_w2:H=0.8602,top10E=0.13,eRank=306.4,q75/q25=16.59 vo_prod:H=0.6907,top10E=0.30,eRank=104.2,q75/q25=16233.66 train_time:370186ms step_avg:74.04ms +[2025-09-02 05:00:31] [Rank 0] step:5001/10000 train_time:370198ms step_avg:74.02ms +[2025-09-02 05:00:31] [Rank 0] step:5001/10000 train_time:370198ms step_avg:74.02ms +[2025-09-02 05:00:33] [Rank 0] step:5021/10000 train_time:371587ms step_avg:74.01ms +[2025-09-02 05:00:33] [Rank 0] step:5021/10000 train_time:371587ms step_avg:74.01ms +[2025-09-02 05:00:34] [Rank 0] step:5041/10000 train_time:373129ms step_avg:74.02ms +[2025-09-02 05:00:34] [Rank 0] step:5041/10000 train_time:373129ms step_avg:74.02ms +[2025-09-02 05:00:36] [Rank 0] step:5061/10000 train_time:374665ms step_avg:74.03ms +[2025-09-02 05:00:36] [Rank 0] step:5061/10000 train_time:374665ms step_avg:74.03ms +[2025-09-02 05:00:37] [Rank 0] step:5081/10000 train_time:376204ms step_avg:74.04ms +[2025-09-02 05:00:37] [Rank 0] step:5081/10000 train_time:376204ms step_avg:74.04ms +[2025-09-02 05:00:39] [Rank 0] step:5101/10000 train_time:377743ms step_avg:74.05ms +[2025-09-02 05:00:39] [Rank 0] step:5101/10000 train_time:377743ms step_avg:74.05ms +[2025-09-02 05:00:41] [Rank 0] step:5121/10000 train_time:379283ms step_avg:74.06ms +[2025-09-02 05:00:41] [Rank 0] step:5121/10000 train_time:379283ms step_avg:74.06ms +[2025-09-02 05:00:42] [Rank 0] step:5141/10000 train_time:380826ms step_avg:74.08ms +[2025-09-02 05:00:42] [Rank 0] step:5141/10000 train_time:380826ms step_avg:74.08ms +[2025-09-02 05:00:44] [Rank 0] step:5161/10000 train_time:382366ms step_avg:74.09ms +[2025-09-02 05:00:44] [Rank 0] step:5161/10000 train_time:382366ms step_avg:74.09ms +[2025-09-02 05:00:45] [Rank 0] step:5181/10000 train_time:383909ms step_avg:74.10ms +[2025-09-02 05:00:45] [Rank 0] step:5181/10000 train_time:383909ms step_avg:74.10ms +[2025-09-02 05:00:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:00:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:00:58] [Rank 0] PRINT: step:5200/10000 val_loss:4.0507 svd_entropy: attn_qk:H=0.7221,top10E=0.31,eRank=129.2,q75/q25=94.14 attn_vo:H=0.7904,top10E=0.20,eRank=229.0,q75/q25=101.30 mlp_w1:H=0.7854,top10E=0.27,eRank=201.1,q75/q25=13.01 mlp_w2:H=0.8621,top10E=0.13,eRank=310.4,q75/q25=16.51 vo_prod:H=0.6936,top10E=0.30,eRank=106.2,q75/q25=16125.43 train_time:385629ms step_avg:74.16ms +[2025-09-02 05:00:58] [Rank 0] PRINT: step:5200/10000 val_loss:4.0507 svd_entropy: attn_qk:H=0.7221,top10E=0.31,eRank=129.2,q75/q25=94.14 attn_vo:H=0.7904,top10E=0.20,eRank=229.0,q75/q25=101.30 mlp_w1:H=0.7854,top10E=0.27,eRank=201.1,q75/q25=13.01 mlp_w2:H=0.8621,top10E=0.13,eRank=310.4,q75/q25=16.51 vo_prod:H=0.6936,top10E=0.30,eRank=106.2,q75/q25=16125.43 train_time:385629ms step_avg:74.16ms +[2025-09-02 05:00:58] [Rank 0] step:5201/10000 train_time:385640ms step_avg:74.15ms +[2025-09-02 05:00:58] [Rank 0] step:5201/10000 train_time:385640ms step_avg:74.15ms +[2025-09-02 05:01:00] [Rank 0] step:5221/10000 train_time:387074ms step_avg:74.14ms +[2025-09-02 05:01:00] [Rank 0] step:5221/10000 train_time:387074ms step_avg:74.14ms +[2025-09-02 05:01:02] [Rank 0] step:5241/10000 train_time:388643ms step_avg:74.15ms +[2025-09-02 05:01:02] [Rank 0] step:5241/10000 train_time:388643ms step_avg:74.15ms +[2025-09-02 05:01:03] [Rank 0] step:5261/10000 train_time:390212ms step_avg:74.17ms +[2025-09-02 05:01:03] [Rank 0] step:5261/10000 train_time:390212ms step_avg:74.17ms +[2025-09-02 05:01:05] [Rank 0] step:5281/10000 train_time:391782ms step_avg:74.19ms +[2025-09-02 05:01:05] [Rank 0] step:5281/10000 train_time:391782ms step_avg:74.19ms +[2025-09-02 05:01:06] [Rank 0] step:5301/10000 train_time:393359ms step_avg:74.20ms +[2025-09-02 05:01:06] [Rank 0] step:5301/10000 train_time:393359ms step_avg:74.20ms +[2025-09-02 05:01:08] [Rank 0] step:5321/10000 train_time:394928ms step_avg:74.22ms +[2025-09-02 05:01:08] [Rank 0] step:5321/10000 train_time:394928ms step_avg:74.22ms +[2025-09-02 05:01:09] [Rank 0] step:5341/10000 train_time:396497ms step_avg:74.24ms +[2025-09-02 05:01:09] [Rank 0] step:5341/10000 train_time:396497ms step_avg:74.24ms +[2025-09-02 05:01:11] [Rank 0] step:5361/10000 train_time:398071ms step_avg:74.25ms +[2025-09-02 05:01:11] [Rank 0] step:5361/10000 train_time:398071ms step_avg:74.25ms +[2025-09-02 05:01:13] [Rank 0] step:5381/10000 train_time:399646ms step_avg:74.27ms +[2025-09-02 05:01:13] [Rank 0] step:5381/10000 train_time:399646ms step_avg:74.27ms +[2025-09-02 05:01:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:01:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:01:26] [Rank 0] PRINT: step:5400/10000 val_loss:4.0324 svd_entropy: attn_qk:H=0.7246,top10E=0.30,eRank=131.1,q75/q25=95.40 attn_vo:H=0.7928,top10E=0.19,eRank=231.7,q75/q25=100.71 mlp_w1:H=0.7890,top10E=0.26,eRank=205.5,q75/q25=13.21 mlp_w2:H=0.8639,top10E=0.13,eRank=314.2,q75/q25=16.32 vo_prod:H=0.6966,top10E=0.29,eRank=108.3,q75/q25=15786.71 train_time:401373ms step_avg:74.33ms +[2025-09-02 05:01:26] [Rank 0] PRINT: step:5400/10000 val_loss:4.0324 svd_entropy: attn_qk:H=0.7246,top10E=0.30,eRank=131.1,q75/q25=95.40 attn_vo:H=0.7928,top10E=0.19,eRank=231.7,q75/q25=100.71 mlp_w1:H=0.7890,top10E=0.26,eRank=205.5,q75/q25=13.21 mlp_w2:H=0.8639,top10E=0.13,eRank=314.2,q75/q25=16.32 vo_prod:H=0.6966,top10E=0.29,eRank=108.3,q75/q25=15786.71 train_time:401373ms step_avg:74.33ms +[2025-09-02 05:01:26] [Rank 0] step:5401/10000 train_time:401384ms step_avg:74.32ms +[2025-09-02 05:01:26] [Rank 0] step:5401/10000 train_time:401384ms step_avg:74.32ms +[2025-09-02 05:01:27] [Rank 0] step:5421/10000 train_time:402822ms step_avg:74.31ms +[2025-09-02 05:01:27] [Rank 0] step:5421/10000 train_time:402822ms step_avg:74.31ms +[2025-09-02 05:01:29] [Rank 0] step:5441/10000 train_time:404384ms step_avg:74.32ms +[2025-09-02 05:01:29] [Rank 0] step:5441/10000 train_time:404384ms step_avg:74.32ms +[2025-09-02 05:01:31] [Rank 0] step:5461/10000 train_time:405957ms step_avg:74.34ms +[2025-09-02 05:01:31] [Rank 0] step:5461/10000 train_time:405957ms step_avg:74.34ms +[2025-09-02 05:01:32] [Rank 0] step:5481/10000 train_time:407530ms step_avg:74.35ms +[2025-09-02 05:01:32] [Rank 0] step:5481/10000 train_time:407530ms step_avg:74.35ms +[2025-09-02 05:01:34] [Rank 0] step:5501/10000 train_time:409108ms step_avg:74.37ms +[2025-09-02 05:01:34] [Rank 0] step:5501/10000 train_time:409108ms step_avg:74.37ms +[2025-09-02 05:01:35] [Rank 0] step:5521/10000 train_time:410683ms step_avg:74.39ms +[2025-09-02 05:01:35] [Rank 0] step:5521/10000 train_time:410683ms step_avg:74.39ms +[2025-09-02 05:01:37] [Rank 0] step:5541/10000 train_time:412256ms step_avg:74.40ms +[2025-09-02 05:01:37] [Rank 0] step:5541/10000 train_time:412256ms step_avg:74.40ms +[2025-09-02 05:01:39] [Rank 0] step:5561/10000 train_time:413829ms step_avg:74.42ms +[2025-09-02 05:01:39] [Rank 0] step:5561/10000 train_time:413829ms step_avg:74.42ms +[2025-09-02 05:01:40] [Rank 0] step:5581/10000 train_time:415402ms step_avg:74.43ms +[2025-09-02 05:01:40] [Rank 0] step:5581/10000 train_time:415402ms step_avg:74.43ms +[2025-09-02 05:01:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:01:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:01:53] [Rank 0] PRINT: step:5600/10000 val_loss:4.0183 svd_entropy: attn_qk:H=0.7269,top10E=0.30,eRank=133.0,q75/q25=95.70 attn_vo:H=0.7951,top10E=0.19,eRank=234.3,q75/q25=100.42 mlp_w1:H=0.7923,top10E=0.26,eRank=209.7,q75/q25=13.37 mlp_w2:H=0.8656,top10E=0.13,eRank=317.7,q75/q25=16.24 vo_prod:H=0.6994,top10E=0.29,eRank=110.2,q75/q25=15479.07 train_time:417134ms step_avg:74.49ms +[2025-09-02 05:01:53] [Rank 0] PRINT: step:5600/10000 val_loss:4.0183 svd_entropy: attn_qk:H=0.7269,top10E=0.30,eRank=133.0,q75/q25=95.70 attn_vo:H=0.7951,top10E=0.19,eRank=234.3,q75/q25=100.42 mlp_w1:H=0.7923,top10E=0.26,eRank=209.7,q75/q25=13.37 mlp_w2:H=0.8656,top10E=0.13,eRank=317.7,q75/q25=16.24 vo_prod:H=0.6994,top10E=0.29,eRank=110.2,q75/q25=15479.07 train_time:417134ms step_avg:74.49ms +[2025-09-02 05:01:53] [Rank 0] step:5601/10000 train_time:417145ms step_avg:74.48ms +[2025-09-02 05:01:53] [Rank 0] step:5601/10000 train_time:417145ms step_avg:74.48ms +[2025-09-02 05:01:55] [Rank 0] step:5621/10000 train_time:418582ms step_avg:74.47ms +[2025-09-02 05:01:55] [Rank 0] step:5621/10000 train_time:418582ms step_avg:74.47ms +[2025-09-02 05:01:57] [Rank 0] step:5641/10000 train_time:420152ms step_avg:74.48ms +[2025-09-02 05:01:57] [Rank 0] step:5641/10000 train_time:420152ms step_avg:74.48ms +[2025-09-02 05:01:58] [Rank 0] step:5661/10000 train_time:421722ms step_avg:74.50ms +[2025-09-02 05:01:58] [Rank 0] step:5661/10000 train_time:421722ms step_avg:74.50ms +[2025-09-02 05:02:00] [Rank 0] step:5681/10000 train_time:423297ms step_avg:74.51ms +[2025-09-02 05:02:00] [Rank 0] step:5681/10000 train_time:423297ms step_avg:74.51ms +[2025-09-02 05:02:01] [Rank 0] step:5701/10000 train_time:424868ms step_avg:74.53ms +[2025-09-02 05:02:01] [Rank 0] step:5701/10000 train_time:424868ms step_avg:74.53ms +[2025-09-02 05:02:03] [Rank 0] step:5721/10000 train_time:426440ms step_avg:74.54ms +[2025-09-02 05:02:03] [Rank 0] step:5721/10000 train_time:426440ms step_avg:74.54ms +[2025-09-02 05:02:04] [Rank 0] step:5741/10000 train_time:428013ms step_avg:74.55ms +[2025-09-02 05:02:04] [Rank 0] step:5741/10000 train_time:428013ms step_avg:74.55ms +[2025-09-02 05:02:06] [Rank 0] step:5761/10000 train_time:429587ms step_avg:74.57ms +[2025-09-02 05:02:06] [Rank 0] step:5761/10000 train_time:429587ms step_avg:74.57ms +[2025-09-02 05:02:08] [Rank 0] step:5781/10000 train_time:431158ms step_avg:74.58ms +[2025-09-02 05:02:08] [Rank 0] step:5781/10000 train_time:431158ms step_avg:74.58ms +[2025-09-02 05:02:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:02:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:02:21] [Rank 0] PRINT: step:5800/10000 val_loss:4.0093 svd_entropy: attn_qk:H=0.7292,top10E=0.30,eRank=134.9,q75/q25=96.26 attn_vo:H=0.7973,top10E=0.19,eRank=236.8,q75/q25=98.97 mlp_w1:H=0.7955,top10E=0.25,eRank=213.7,q75/q25=13.52 mlp_w2:H=0.8672,top10E=0.12,eRank=321.1,q75/q25=16.08 vo_prod:H=0.7020,top10E=0.29,eRank=112.1,q75/q25=15079.92 train_time:432891ms step_avg:74.64ms +[2025-09-02 05:02:21] [Rank 0] PRINT: step:5800/10000 val_loss:4.0093 svd_entropy: attn_qk:H=0.7292,top10E=0.30,eRank=134.9,q75/q25=96.26 attn_vo:H=0.7973,top10E=0.19,eRank=236.8,q75/q25=98.97 mlp_w1:H=0.7955,top10E=0.25,eRank=213.7,q75/q25=13.52 mlp_w2:H=0.8672,top10E=0.12,eRank=321.1,q75/q25=16.08 vo_prod:H=0.7020,top10E=0.29,eRank=112.1,q75/q25=15079.92 train_time:432891ms step_avg:74.64ms +[2025-09-02 05:02:21] [Rank 0] step:5801/10000 train_time:432902ms step_avg:74.63ms +[2025-09-02 05:02:21] [Rank 0] step:5801/10000 train_time:432902ms step_avg:74.63ms +[2025-09-02 05:02:22] [Rank 0] step:5821/10000 train_time:434338ms step_avg:74.62ms +[2025-09-02 05:02:22] [Rank 0] step:5821/10000 train_time:434338ms step_avg:74.62ms +[2025-09-02 05:02:24] [Rank 0] step:5841/10000 train_time:435906ms step_avg:74.63ms +[2025-09-02 05:02:24] [Rank 0] step:5841/10000 train_time:435906ms step_avg:74.63ms +[2025-09-02 05:02:26] [Rank 0] step:5861/10000 train_time:437482ms step_avg:74.64ms +[2025-09-02 05:02:26] [Rank 0] step:5861/10000 train_time:437482ms step_avg:74.64ms +[2025-09-02 05:02:27] [Rank 0] step:5881/10000 train_time:439057ms step_avg:74.66ms +[2025-09-02 05:02:27] [Rank 0] step:5881/10000 train_time:439057ms step_avg:74.66ms +[2025-09-02 05:02:29] [Rank 0] step:5901/10000 train_time:440629ms step_avg:74.67ms +[2025-09-02 05:02:29] [Rank 0] step:5901/10000 train_time:440629ms step_avg:74.67ms +[2025-09-02 05:02:30] [Rank 0] step:5921/10000 train_time:442202ms step_avg:74.68ms +[2025-09-02 05:02:30] [Rank 0] step:5921/10000 train_time:442202ms step_avg:74.68ms +[2025-09-02 05:02:32] [Rank 0] step:5941/10000 train_time:443780ms step_avg:74.70ms +[2025-09-02 05:02:32] [Rank 0] step:5941/10000 train_time:443780ms step_avg:74.70ms +[2025-09-02 05:02:33] [Rank 0] step:5961/10000 train_time:445359ms step_avg:74.71ms +[2025-09-02 05:02:33] [Rank 0] step:5961/10000 train_time:445359ms step_avg:74.71ms +[2025-09-02 05:02:35] [Rank 0] step:5981/10000 train_time:446936ms step_avg:74.73ms +[2025-09-02 05:02:35] [Rank 0] step:5981/10000 train_time:446936ms step_avg:74.73ms +[2025-09-02 05:02:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:02:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:02:48] [Rank 0] PRINT: step:6000/10000 val_loss:3.9827 svd_entropy: attn_qk:H=0.7315,top10E=0.29,eRank=136.8,q75/q25=96.21 attn_vo:H=0.7994,top10E=0.19,eRank=239.2,q75/q25=98.64 mlp_w1:H=0.7986,top10E=0.25,eRank=217.7,q75/q25=13.67 mlp_w2:H=0.8687,top10E=0.12,eRank=324.4,q75/q25=15.97 vo_prod:H=0.7046,top10E=0.28,eRank=114.1,q75/q25=15044.68 train_time:448667ms step_avg:74.78ms +[2025-09-02 05:02:48] [Rank 0] PRINT: step:6000/10000 val_loss:3.9827 svd_entropy: attn_qk:H=0.7315,top10E=0.29,eRank=136.8,q75/q25=96.21 attn_vo:H=0.7994,top10E=0.19,eRank=239.2,q75/q25=98.64 mlp_w1:H=0.7986,top10E=0.25,eRank=217.7,q75/q25=13.67 mlp_w2:H=0.8687,top10E=0.12,eRank=324.4,q75/q25=15.97 vo_prod:H=0.7046,top10E=0.28,eRank=114.1,q75/q25=15044.68 train_time:448667ms step_avg:74.78ms +[2025-09-02 05:02:48] [Rank 0] step:6001/10000 train_time:448678ms step_avg:74.77ms +[2025-09-02 05:02:48] [Rank 0] step:6001/10000 train_time:448678ms step_avg:74.77ms +[2025-09-02 05:02:50] [Rank 0] step:6021/10000 train_time:450120ms step_avg:74.76ms +[2025-09-02 05:02:50] [Rank 0] step:6021/10000 train_time:450120ms step_avg:74.76ms +[2025-09-02 05:02:51] [Rank 0] step:6041/10000 train_time:451695ms step_avg:74.77ms +[2025-09-02 05:02:51] [Rank 0] step:6041/10000 train_time:451695ms step_avg:74.77ms +[2025-09-02 05:02:53] [Rank 0] step:6061/10000 train_time:453276ms step_avg:74.79ms +[2025-09-02 05:02:53] [Rank 0] step:6061/10000 train_time:453276ms step_avg:74.79ms +[2025-09-02 05:02:55] [Rank 0] step:6081/10000 train_time:454853ms step_avg:74.80ms +[2025-09-02 05:02:55] [Rank 0] step:6081/10000 train_time:454853ms step_avg:74.80ms +[2025-09-02 05:02:56] [Rank 0] step:6101/10000 train_time:456432ms step_avg:74.81ms +[2025-09-02 05:02:56] [Rank 0] step:6101/10000 train_time:456432ms step_avg:74.81ms +[2025-09-02 05:02:58] [Rank 0] step:6121/10000 train_time:458069ms step_avg:74.84ms +[2025-09-02 05:02:58] [Rank 0] step:6121/10000 train_time:458069ms step_avg:74.84ms +[2025-09-02 05:02:59] [Rank 0] step:6141/10000 train_time:459655ms step_avg:74.85ms +[2025-09-02 05:02:59] [Rank 0] step:6141/10000 train_time:459655ms step_avg:74.85ms +[2025-09-02 05:03:01] [Rank 0] step:6161/10000 train_time:461230ms step_avg:74.86ms +[2025-09-02 05:03:01] [Rank 0] step:6161/10000 train_time:461230ms step_avg:74.86ms +[2025-09-02 05:03:03] [Rank 0] step:6181/10000 train_time:462805ms step_avg:74.88ms +[2025-09-02 05:03:03] [Rank 0] step:6181/10000 train_time:462805ms step_avg:74.88ms +[2025-09-02 05:03:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:03:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:03:16] [Rank 0] PRINT: step:6200/10000 val_loss:3.9688 svd_entropy: attn_qk:H=0.7336,top10E=0.29,eRank=138.6,q75/q25=97.25 attn_vo:H=0.8013,top10E=0.18,eRank=241.5,q75/q25=98.11 mlp_w1:H=0.8013,top10E=0.25,eRank=221.3,q75/q25=13.74 mlp_w2:H=0.8702,top10E=0.12,eRank=327.6,q75/q25=15.87 vo_prod:H=0.7071,top10E=0.28,eRank=115.9,q75/q25=14441.31 train_time:464542ms step_avg:74.93ms +[2025-09-02 05:03:16] [Rank 0] PRINT: step:6200/10000 val_loss:3.9688 svd_entropy: attn_qk:H=0.7336,top10E=0.29,eRank=138.6,q75/q25=97.25 attn_vo:H=0.8013,top10E=0.18,eRank=241.5,q75/q25=98.11 mlp_w1:H=0.8013,top10E=0.25,eRank=221.3,q75/q25=13.74 mlp_w2:H=0.8702,top10E=0.12,eRank=327.6,q75/q25=15.87 vo_prod:H=0.7071,top10E=0.28,eRank=115.9,q75/q25=14441.31 train_time:464542ms step_avg:74.93ms +[2025-09-02 05:03:16] [Rank 0] step:6201/10000 train_time:464552ms step_avg:74.92ms +[2025-09-02 05:03:16] [Rank 0] step:6201/10000 train_time:464552ms step_avg:74.92ms +[2025-09-02 05:03:17] [Rank 0] step:6221/10000 train_time:465983ms step_avg:74.90ms +[2025-09-02 05:03:17] [Rank 0] step:6221/10000 train_time:465983ms step_avg:74.90ms +[2025-09-02 05:03:19] [Rank 0] step:6241/10000 train_time:467553ms step_avg:74.92ms +[2025-09-02 05:03:19] [Rank 0] step:6241/10000 train_time:467553ms step_avg:74.92ms +[2025-09-02 05:03:21] [Rank 0] step:6261/10000 train_time:469129ms step_avg:74.93ms +[2025-09-02 05:03:21] [Rank 0] step:6261/10000 train_time:469129ms step_avg:74.93ms +[2025-09-02 05:03:22] [Rank 0] step:6281/10000 train_time:470707ms step_avg:74.94ms +[2025-09-02 05:03:22] [Rank 0] step:6281/10000 train_time:470707ms step_avg:74.94ms +[2025-09-02 05:03:24] [Rank 0] step:6301/10000 train_time:472284ms step_avg:74.95ms +[2025-09-02 05:03:24] [Rank 0] step:6301/10000 train_time:472284ms step_avg:74.95ms +[2025-09-02 05:03:25] [Rank 0] step:6321/10000 train_time:473861ms step_avg:74.97ms +[2025-09-02 05:03:25] [Rank 0] step:6321/10000 train_time:473861ms step_avg:74.97ms +[2025-09-02 05:03:27] [Rank 0] step:6341/10000 train_time:475441ms step_avg:74.98ms +[2025-09-02 05:03:27] [Rank 0] step:6341/10000 train_time:475441ms step_avg:74.98ms +[2025-09-02 05:03:28] [Rank 0] step:6361/10000 train_time:477022ms step_avg:74.99ms +[2025-09-02 05:03:28] [Rank 0] step:6361/10000 train_time:477022ms step_avg:74.99ms +[2025-09-02 05:03:30] [Rank 0] step:6381/10000 train_time:478608ms step_avg:75.01ms +[2025-09-02 05:03:30] [Rank 0] step:6381/10000 train_time:478608ms step_avg:75.01ms +[2025-09-02 05:03:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:03:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:03:43] [Rank 0] PRINT: step:6400/10000 val_loss:3.9499 svd_entropy: attn_qk:H=0.7355,top10E=0.29,eRank=140.2,q75/q25=97.24 attn_vo:H=0.8031,top10E=0.18,eRank=243.6,q75/q25=97.35 mlp_w1:H=0.8039,top10E=0.24,eRank=224.7,q75/q25=13.86 mlp_w2:H=0.8715,top10E=0.12,eRank=330.4,q75/q25=15.71 vo_prod:H=0.7093,top10E=0.28,eRank=117.5,q75/q25=14266.60 train_time:480344ms step_avg:75.05ms +[2025-09-02 05:03:43] [Rank 0] PRINT: step:6400/10000 val_loss:3.9499 svd_entropy: attn_qk:H=0.7355,top10E=0.29,eRank=140.2,q75/q25=97.24 attn_vo:H=0.8031,top10E=0.18,eRank=243.6,q75/q25=97.35 mlp_w1:H=0.8039,top10E=0.24,eRank=224.7,q75/q25=13.86 mlp_w2:H=0.8715,top10E=0.12,eRank=330.4,q75/q25=15.71 vo_prod:H=0.7093,top10E=0.28,eRank=117.5,q75/q25=14266.60 train_time:480344ms step_avg:75.05ms +[2025-09-02 05:03:43] [Rank 0] step:6401/10000 train_time:480355ms step_avg:75.04ms +[2025-09-02 05:03:43] [Rank 0] step:6401/10000 train_time:480355ms step_avg:75.04ms +[2025-09-02 05:03:45] [Rank 0] step:6421/10000 train_time:481786ms step_avg:75.03ms +[2025-09-02 05:03:45] [Rank 0] step:6421/10000 train_time:481786ms step_avg:75.03ms +[2025-09-02 05:03:46] [Rank 0] step:6441/10000 train_time:483361ms step_avg:75.04ms +[2025-09-02 05:03:46] [Rank 0] step:6441/10000 train_time:483361ms step_avg:75.04ms +[2025-09-02 05:03:48] [Rank 0] step:6461/10000 train_time:484939ms step_avg:75.06ms +[2025-09-02 05:03:48] [Rank 0] step:6461/10000 train_time:484939ms step_avg:75.06ms +[2025-09-02 05:03:50] [Rank 0] step:6481/10000 train_time:486522ms step_avg:75.07ms +[2025-09-02 05:03:50] [Rank 0] step:6481/10000 train_time:486522ms step_avg:75.07ms +[2025-09-02 05:03:51] [Rank 0] step:6501/10000 train_time:488095ms step_avg:75.08ms +[2025-09-02 05:03:51] [Rank 0] step:6501/10000 train_time:488095ms step_avg:75.08ms +[2025-09-02 05:03:53] [Rank 0] step:6521/10000 train_time:489670ms step_avg:75.09ms +[2025-09-02 05:03:53] [Rank 0] step:6521/10000 train_time:489670ms step_avg:75.09ms +[2025-09-02 05:03:54] [Rank 0] step:6541/10000 train_time:491247ms step_avg:75.10ms +[2025-09-02 05:03:54] [Rank 0] step:6541/10000 train_time:491247ms step_avg:75.10ms +[2025-09-02 05:03:56] [Rank 0] step:6561/10000 train_time:492827ms step_avg:75.11ms +[2025-09-02 05:03:56] [Rank 0] step:6561/10000 train_time:492827ms step_avg:75.11ms +[2025-09-02 05:03:57] [Rank 0] step:6581/10000 train_time:494401ms step_avg:75.13ms +[2025-09-02 05:03:57] [Rank 0] step:6581/10000 train_time:494401ms step_avg:75.13ms +[2025-09-02 05:03:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:03:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:04:11] [Rank 0] PRINT: step:6600/10000 val_loss:3.9407 svd_entropy: attn_qk:H=0.7373,top10E=0.28,eRank=141.7,q75/q25=96.93 attn_vo:H=0.8047,top10E=0.18,eRank=245.6,q75/q25=96.99 mlp_w1:H=0.8061,top10E=0.24,eRank=227.8,q75/q25=13.93 mlp_w2:H=0.8727,top10E=0.12,eRank=333.1,q75/q25=15.51 vo_prod:H=0.7113,top10E=0.27,eRank=119.1,q75/q25=13918.23 train_time:496137ms step_avg:75.17ms +[2025-09-02 05:04:11] [Rank 0] PRINT: step:6600/10000 val_loss:3.9407 svd_entropy: attn_qk:H=0.7373,top10E=0.28,eRank=141.7,q75/q25=96.93 attn_vo:H=0.8047,top10E=0.18,eRank=245.6,q75/q25=96.99 mlp_w1:H=0.8061,top10E=0.24,eRank=227.8,q75/q25=13.93 mlp_w2:H=0.8727,top10E=0.12,eRank=333.1,q75/q25=15.51 vo_prod:H=0.7113,top10E=0.27,eRank=119.1,q75/q25=13918.23 train_time:496137ms step_avg:75.17ms +[2025-09-02 05:04:11] [Rank 0] step:6601/10000 train_time:496147ms step_avg:75.16ms +[2025-09-02 05:04:11] [Rank 0] step:6601/10000 train_time:496147ms step_avg:75.16ms +[2025-09-02 05:04:12] [Rank 0] step:6621/10000 train_time:497585ms step_avg:75.15ms +[2025-09-02 05:04:12] [Rank 0] step:6621/10000 train_time:497585ms step_avg:75.15ms +[2025-09-02 05:04:14] [Rank 0] step:6641/10000 train_time:499164ms step_avg:75.16ms +[2025-09-02 05:04:14] [Rank 0] step:6641/10000 train_time:499164ms step_avg:75.16ms +[2025-09-02 05:04:16] [Rank 0] step:6661/10000 train_time:500740ms step_avg:75.17ms +[2025-09-02 05:04:16] [Rank 0] step:6661/10000 train_time:500740ms step_avg:75.17ms +[2025-09-02 05:04:17] [Rank 0] step:6681/10000 train_time:502332ms step_avg:75.19ms +[2025-09-02 05:04:17] [Rank 0] step:6681/10000 train_time:502332ms step_avg:75.19ms +[2025-09-02 05:04:19] [Rank 0] step:6701/10000 train_time:503944ms step_avg:75.20ms +[2025-09-02 05:04:19] [Rank 0] step:6701/10000 train_time:503944ms step_avg:75.20ms +[2025-09-02 05:04:20] [Rank 0] step:6721/10000 train_time:505549ms step_avg:75.22ms +[2025-09-02 05:04:20] [Rank 0] step:6721/10000 train_time:505549ms step_avg:75.22ms +[2025-09-02 05:04:22] [Rank 0] step:6741/10000 train_time:507150ms step_avg:75.23ms +[2025-09-02 05:04:22] [Rank 0] step:6741/10000 train_time:507150ms step_avg:75.23ms +[2025-09-02 05:04:24] [Rank 0] step:6761/10000 train_time:508755ms step_avg:75.25ms +[2025-09-02 05:04:24] [Rank 0] step:6761/10000 train_time:508755ms step_avg:75.25ms +[2025-09-02 05:04:25] [Rank 0] step:6781/10000 train_time:510361ms step_avg:75.26ms +[2025-09-02 05:04:25] [Rank 0] step:6781/10000 train_time:510361ms step_avg:75.26ms +[2025-09-02 05:04:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:04:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:04:38] [Rank 0] PRINT: step:6800/10000 val_loss:3.9210 svd_entropy: attn_qk:H=0.7389,top10E=0.28,eRank=143.1,q75/q25=97.44 attn_vo:H=0.8062,top10E=0.18,eRank=247.4,q75/q25=96.16 mlp_w1:H=0.8082,top10E=0.24,eRank=230.6,q75/q25=13.95 mlp_w2:H=0.8738,top10E=0.12,eRank=335.6,q75/q25=15.44 vo_prod:H=0.7131,top10E=0.27,eRank=120.5,q75/q25=13594.96 train_time:512133ms step_avg:75.31ms +[2025-09-02 05:04:38] [Rank 0] PRINT: step:6800/10000 val_loss:3.9210 svd_entropy: attn_qk:H=0.7389,top10E=0.28,eRank=143.1,q75/q25=97.44 attn_vo:H=0.8062,top10E=0.18,eRank=247.4,q75/q25=96.16 mlp_w1:H=0.8082,top10E=0.24,eRank=230.6,q75/q25=13.95 mlp_w2:H=0.8738,top10E=0.12,eRank=335.6,q75/q25=15.44 vo_prod:H=0.7131,top10E=0.27,eRank=120.5,q75/q25=13594.96 train_time:512133ms step_avg:75.31ms +[2025-09-02 05:04:39] [Rank 0] step:6801/10000 train_time:512143ms step_avg:75.30ms +[2025-09-02 05:04:39] [Rank 0] step:6801/10000 train_time:512143ms step_avg:75.30ms +[2025-09-02 05:04:40] [Rank 0] step:6821/10000 train_time:513600ms step_avg:75.30ms +[2025-09-02 05:04:40] [Rank 0] step:6821/10000 train_time:513600ms step_avg:75.30ms +[2025-09-02 05:04:42] [Rank 0] step:6841/10000 train_time:515201ms step_avg:75.31ms +[2025-09-02 05:04:42] [Rank 0] step:6841/10000 train_time:515201ms step_avg:75.31ms +[2025-09-02 05:04:43] [Rank 0] step:6861/10000 train_time:516810ms step_avg:75.33ms +[2025-09-02 05:04:43] [Rank 0] step:6861/10000 train_time:516810ms step_avg:75.33ms +[2025-09-02 05:04:45] [Rank 0] step:6881/10000 train_time:518414ms step_avg:75.34ms +[2025-09-02 05:04:45] [Rank 0] step:6881/10000 train_time:518414ms step_avg:75.34ms +[2025-09-02 05:04:47] [Rank 0] step:6901/10000 train_time:520020ms step_avg:75.35ms +[2025-09-02 05:04:47] [Rank 0] step:6901/10000 train_time:520020ms step_avg:75.35ms +[2025-09-02 05:04:48] [Rank 0] step:6921/10000 train_time:521625ms step_avg:75.37ms +[2025-09-02 05:04:48] [Rank 0] step:6921/10000 train_time:521625ms step_avg:75.37ms +[2025-09-02 05:04:50] [Rank 0] step:6941/10000 train_time:523237ms step_avg:75.38ms +[2025-09-02 05:04:50] [Rank 0] step:6941/10000 train_time:523237ms step_avg:75.38ms +[2025-09-02 05:04:51] [Rank 0] step:6961/10000 train_time:524857ms step_avg:75.40ms +[2025-09-02 05:04:51] [Rank 0] step:6961/10000 train_time:524857ms step_avg:75.40ms +[2025-09-02 05:04:53] [Rank 0] step:6981/10000 train_time:526467ms step_avg:75.41ms +[2025-09-02 05:04:53] [Rank 0] step:6981/10000 train_time:526467ms step_avg:75.41ms +[2025-09-02 05:04:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:04:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:05:06] [Rank 0] PRINT: step:7000/10000 val_loss:3.9089 svd_entropy: attn_qk:H=0.7403,top10E=0.28,eRank=144.3,q75/q25=97.21 attn_vo:H=0.8075,top10E=0.18,eRank=249.0,q75/q25=96.00 mlp_w1:H=0.8100,top10E=0.24,eRank=233.2,q75/q25=14.01 mlp_w2:H=0.8749,top10E=0.12,eRank=338.0,q75/q25=15.35 vo_prod:H=0.7150,top10E=0.27,eRank=122.0,q75/q25=13468.98 train_time:528241ms step_avg:75.46ms +[2025-09-02 05:05:06] [Rank 0] PRINT: step:7000/10000 val_loss:3.9089 svd_entropy: attn_qk:H=0.7403,top10E=0.28,eRank=144.3,q75/q25=97.21 attn_vo:H=0.8075,top10E=0.18,eRank=249.0,q75/q25=96.00 mlp_w1:H=0.8100,top10E=0.24,eRank=233.2,q75/q25=14.01 mlp_w2:H=0.8749,top10E=0.12,eRank=338.0,q75/q25=15.35 vo_prod:H=0.7150,top10E=0.27,eRank=122.0,q75/q25=13468.98 train_time:528241ms step_avg:75.46ms +[2025-09-02 05:05:06] [Rank 0] step:7001/10000 train_time:528252ms step_avg:75.45ms +[2025-09-02 05:05:06] [Rank 0] step:7001/10000 train_time:528252ms step_avg:75.45ms +[2025-09-02 05:05:08] [Rank 0] step:7021/10000 train_time:529705ms step_avg:75.45ms +[2025-09-02 05:05:08] [Rank 0] step:7021/10000 train_time:529705ms step_avg:75.45ms +[2025-09-02 05:05:09] [Rank 0] step:7041/10000 train_time:531310ms step_avg:75.46ms +[2025-09-02 05:05:09] [Rank 0] step:7041/10000 train_time:531310ms step_avg:75.46ms +[2025-09-02 05:05:11] [Rank 0] step:7061/10000 train_time:532913ms step_avg:75.47ms +[2025-09-02 05:05:11] [Rank 0] step:7061/10000 train_time:532913ms step_avg:75.47ms +[2025-09-02 05:05:13] [Rank 0] step:7081/10000 train_time:534519ms step_avg:75.49ms +[2025-09-02 05:05:13] [Rank 0] step:7081/10000 train_time:534519ms step_avg:75.49ms +[2025-09-02 05:05:14] [Rank 0] step:7101/10000 train_time:536126ms step_avg:75.50ms +[2025-09-02 05:05:14] [Rank 0] step:7101/10000 train_time:536126ms step_avg:75.50ms +[2025-09-02 05:05:16] [Rank 0] step:7121/10000 train_time:537731ms step_avg:75.51ms +[2025-09-02 05:05:16] [Rank 0] step:7121/10000 train_time:537731ms step_avg:75.51ms +[2025-09-02 05:05:17] [Rank 0] step:7141/10000 train_time:539339ms step_avg:75.53ms +[2025-09-02 05:05:17] [Rank 0] step:7141/10000 train_time:539339ms step_avg:75.53ms +[2025-09-02 05:05:19] [Rank 0] step:7161/10000 train_time:540947ms step_avg:75.54ms +[2025-09-02 05:05:19] [Rank 0] step:7161/10000 train_time:540947ms step_avg:75.54ms +[2025-09-02 05:05:21] [Rank 0] step:7181/10000 train_time:542556ms step_avg:75.55ms +[2025-09-02 05:05:21] [Rank 0] step:7181/10000 train_time:542556ms step_avg:75.55ms +[2025-09-02 05:05:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:05:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:05:34] [Rank 0] PRINT: step:7200/10000 val_loss:3.8959 svd_entropy: attn_qk:H=0.7416,top10E=0.28,eRank=145.5,q75/q25=97.47 attn_vo:H=0.8088,top10E=0.17,eRank=250.6,q75/q25=95.09 mlp_w1:H=0.8117,top10E=0.23,eRank=235.6,q75/q25=14.07 mlp_w2:H=0.8759,top10E=0.12,eRank=340.2,q75/q25=15.23 vo_prod:H=0.7166,top10E=0.27,eRank=123.3,q75/q25=12810.27 train_time:544327ms step_avg:75.60ms +[2025-09-02 05:05:34] [Rank 0] PRINT: step:7200/10000 val_loss:3.8959 svd_entropy: attn_qk:H=0.7416,top10E=0.28,eRank=145.5,q75/q25=97.47 attn_vo:H=0.8088,top10E=0.17,eRank=250.6,q75/q25=95.09 mlp_w1:H=0.8117,top10E=0.23,eRank=235.6,q75/q25=14.07 mlp_w2:H=0.8759,top10E=0.12,eRank=340.2,q75/q25=15.23 vo_prod:H=0.7166,top10E=0.27,eRank=123.3,q75/q25=12810.27 train_time:544327ms step_avg:75.60ms +[2025-09-02 05:05:34] [Rank 0] step:7201/10000 train_time:544338ms step_avg:75.59ms +[2025-09-02 05:05:34] [Rank 0] step:7201/10000 train_time:544338ms step_avg:75.59ms +[2025-09-02 05:05:36] [Rank 0] step:7221/10000 train_time:545809ms step_avg:75.59ms +[2025-09-02 05:05:36] [Rank 0] step:7221/10000 train_time:545809ms step_avg:75.59ms +[2025-09-02 05:05:37] [Rank 0] step:7241/10000 train_time:547409ms step_avg:75.60ms +[2025-09-02 05:05:37] [Rank 0] step:7241/10000 train_time:547409ms step_avg:75.60ms +[2025-09-02 05:05:39] [Rank 0] step:7261/10000 train_time:549013ms step_avg:75.61ms +[2025-09-02 05:05:39] [Rank 0] step:7261/10000 train_time:549013ms step_avg:75.61ms +[2025-09-02 05:05:40] [Rank 0] step:7281/10000 train_time:550631ms step_avg:75.63ms +[2025-09-02 05:05:40] [Rank 0] step:7281/10000 train_time:550631ms step_avg:75.63ms +[2025-09-02 05:05:42] [Rank 0] step:7301/10000 train_time:552236ms step_avg:75.64ms +[2025-09-02 05:05:42] [Rank 0] step:7301/10000 train_time:552236ms step_avg:75.64ms +[2025-09-02 05:05:44] [Rank 0] step:7321/10000 train_time:553850ms step_avg:75.65ms +[2025-09-02 05:05:44] [Rank 0] step:7321/10000 train_time:553850ms step_avg:75.65ms +[2025-09-02 05:05:45] [Rank 0] step:7341/10000 train_time:555459ms step_avg:75.67ms +[2025-09-02 05:05:45] [Rank 0] step:7341/10000 train_time:555459ms step_avg:75.67ms +[2025-09-02 05:05:47] [Rank 0] step:7361/10000 train_time:557071ms step_avg:75.68ms +[2025-09-02 05:05:47] [Rank 0] step:7361/10000 train_time:557071ms step_avg:75.68ms +[2025-09-02 05:05:49] [Rank 0] step:7381/10000 train_time:558688ms step_avg:75.69ms +[2025-09-02 05:05:49] [Rank 0] step:7381/10000 train_time:558688ms step_avg:75.69ms +[2025-09-02 05:05:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:05:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:06:02] [Rank 0] PRINT: step:7400/10000 val_loss:3.8767 svd_entropy: attn_qk:H=0.7428,top10E=0.28,eRank=146.6,q75/q25=97.23 attn_vo:H=0.8099,top10E=0.17,eRank=251.9,q75/q25=94.40 mlp_w1:H=0.8133,top10E=0.23,eRank=237.8,q75/q25=14.08 mlp_w2:H=0.8768,top10E=0.12,eRank=342.2,q75/q25=15.14 vo_prod:H=0.7180,top10E=0.27,eRank=124.5,q75/q25=12733.76 train_time:560441ms step_avg:75.74ms +[2025-09-02 05:06:02] [Rank 0] PRINT: step:7400/10000 val_loss:3.8767 svd_entropy: attn_qk:H=0.7428,top10E=0.28,eRank=146.6,q75/q25=97.23 attn_vo:H=0.8099,top10E=0.17,eRank=251.9,q75/q25=94.40 mlp_w1:H=0.8133,top10E=0.23,eRank=237.8,q75/q25=14.08 mlp_w2:H=0.8768,top10E=0.12,eRank=342.2,q75/q25=15.14 vo_prod:H=0.7180,top10E=0.27,eRank=124.5,q75/q25=12733.76 train_time:560441ms step_avg:75.74ms +[2025-09-02 05:06:02] [Rank 0] step:7401/10000 train_time:560452ms step_avg:75.73ms +[2025-09-02 05:06:02] [Rank 0] step:7401/10000 train_time:560452ms step_avg:75.73ms +[2025-09-02 05:06:03] [Rank 0] step:7421/10000 train_time:561928ms step_avg:75.72ms +[2025-09-02 05:06:03] [Rank 0] step:7421/10000 train_time:561928ms step_avg:75.72ms +[2025-09-02 05:06:05] [Rank 0] step:7441/10000 train_time:563531ms step_avg:75.73ms +[2025-09-02 05:06:05] [Rank 0] step:7441/10000 train_time:563531ms step_avg:75.73ms +[2025-09-02 05:06:07] [Rank 0] step:7461/10000 train_time:565137ms step_avg:75.75ms +[2025-09-02 05:06:07] [Rank 0] step:7461/10000 train_time:565137ms step_avg:75.75ms +[2025-09-02 05:06:08] [Rank 0] step:7481/10000 train_time:566749ms step_avg:75.76ms +[2025-09-02 05:06:08] [Rank 0] step:7481/10000 train_time:566749ms step_avg:75.76ms +[2025-09-02 05:06:10] [Rank 0] step:7501/10000 train_time:568360ms step_avg:75.77ms +[2025-09-02 05:06:10] [Rank 0] step:7501/10000 train_time:568360ms step_avg:75.77ms +[2025-09-02 05:06:12] [Rank 0] step:7521/10000 train_time:569973ms step_avg:75.78ms +[2025-09-02 05:06:12] [Rank 0] step:7521/10000 train_time:569973ms step_avg:75.78ms +[2025-09-02 05:06:13] [Rank 0] step:7541/10000 train_time:571595ms step_avg:75.80ms +[2025-09-02 05:06:13] [Rank 0] step:7541/10000 train_time:571595ms step_avg:75.80ms +[2025-09-02 05:06:15] [Rank 0] step:7561/10000 train_time:573195ms step_avg:75.81ms +[2025-09-02 05:06:15] [Rank 0] step:7561/10000 train_time:573195ms step_avg:75.81ms +[2025-09-02 05:06:16] [Rank 0] step:7581/10000 train_time:574818ms step_avg:75.82ms +[2025-09-02 05:06:16] [Rank 0] step:7581/10000 train_time:574818ms step_avg:75.82ms +[2025-09-02 05:06:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:06:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:06:30] [Rank 0] PRINT: step:7600/10000 val_loss:3.8723 svd_entropy: attn_qk:H=0.7440,top10E=0.28,eRank=147.7,q75/q25=96.12 attn_vo:H=0.8108,top10E=0.17,eRank=253.1,q75/q25=93.23 mlp_w1:H=0.8147,top10E=0.23,eRank=239.8,q75/q25=14.06 mlp_w2:H=0.8776,top10E=0.12,eRank=344.1,q75/q25=15.00 vo_prod:H=0.7192,top10E=0.26,eRank=125.5,q75/q25=12065.63 train_time:576607ms step_avg:75.87ms +[2025-09-02 05:06:30] [Rank 0] PRINT: step:7600/10000 val_loss:3.8723 svd_entropy: attn_qk:H=0.7440,top10E=0.28,eRank=147.7,q75/q25=96.12 attn_vo:H=0.8108,top10E=0.17,eRank=253.1,q75/q25=93.23 mlp_w1:H=0.8147,top10E=0.23,eRank=239.8,q75/q25=14.06 mlp_w2:H=0.8776,top10E=0.12,eRank=344.1,q75/q25=15.00 vo_prod:H=0.7192,top10E=0.26,eRank=125.5,q75/q25=12065.63 train_time:576607ms step_avg:75.87ms +[2025-09-02 05:06:30] [Rank 0] step:7601/10000 train_time:576618ms step_avg:75.86ms +[2025-09-02 05:06:30] [Rank 0] step:7601/10000 train_time:576618ms step_avg:75.86ms +[2025-09-02 05:06:31] [Rank 0] step:7621/10000 train_time:578071ms step_avg:75.85ms +[2025-09-02 05:06:31] [Rank 0] step:7621/10000 train_time:578071ms step_avg:75.85ms +[2025-09-02 05:06:33] [Rank 0] step:7641/10000 train_time:579678ms step_avg:75.86ms +[2025-09-02 05:06:33] [Rank 0] step:7641/10000 train_time:579678ms step_avg:75.86ms +[2025-09-02 05:06:35] [Rank 0] step:7661/10000 train_time:581292ms step_avg:75.88ms +[2025-09-02 05:06:35] [Rank 0] step:7661/10000 train_time:581292ms step_avg:75.88ms +[2025-09-02 05:06:36] [Rank 0] step:7681/10000 train_time:582898ms step_avg:75.89ms +[2025-09-02 05:06:36] [Rank 0] step:7681/10000 train_time:582898ms step_avg:75.89ms +[2025-09-02 05:06:38] [Rank 0] step:7701/10000 train_time:584508ms step_avg:75.90ms +[2025-09-02 05:06:38] [Rank 0] step:7701/10000 train_time:584508ms step_avg:75.90ms +[2025-09-02 05:06:39] [Rank 0] step:7721/10000 train_time:586131ms step_avg:75.91ms +[2025-09-02 05:06:39] [Rank 0] step:7721/10000 train_time:586131ms step_avg:75.91ms +[2025-09-02 05:06:41] [Rank 0] step:7741/10000 train_time:587745ms step_avg:75.93ms +[2025-09-02 05:06:41] [Rank 0] step:7741/10000 train_time:587745ms step_avg:75.93ms +[2025-09-02 05:06:43] [Rank 0] step:7761/10000 train_time:589363ms step_avg:75.94ms +[2025-09-02 05:06:43] [Rank 0] step:7761/10000 train_time:589363ms step_avg:75.94ms +[2025-09-02 05:06:44] [Rank 0] step:7781/10000 train_time:590985ms step_avg:75.95ms +[2025-09-02 05:06:44] [Rank 0] step:7781/10000 train_time:590985ms step_avg:75.95ms +[2025-09-02 05:06:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:06:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:06:58] [Rank 0] PRINT: step:7800/10000 val_loss:3.8594 svd_entropy: attn_qk:H=0.7450,top10E=0.27,eRank=148.6,q75/q25=96.15 attn_vo:H=0.8117,top10E=0.17,eRank=254.2,q75/q25=92.66 mlp_w1:H=0.8159,top10E=0.23,eRank=241.6,q75/q25=14.06 mlp_w2:H=0.8784,top10E=0.11,eRank=345.9,q75/q25=14.85 vo_prod:H=0.7205,top10E=0.26,eRank=126.5,q75/q25=11821.85 train_time:592769ms step_avg:76.00ms +[2025-09-02 05:06:58] [Rank 0] PRINT: step:7800/10000 val_loss:3.8594 svd_entropy: attn_qk:H=0.7450,top10E=0.27,eRank=148.6,q75/q25=96.15 attn_vo:H=0.8117,top10E=0.17,eRank=254.2,q75/q25=92.66 mlp_w1:H=0.8159,top10E=0.23,eRank=241.6,q75/q25=14.06 mlp_w2:H=0.8784,top10E=0.11,eRank=345.9,q75/q25=14.85 vo_prod:H=0.7205,top10E=0.26,eRank=126.5,q75/q25=11821.85 train_time:592769ms step_avg:76.00ms +[2025-09-02 05:06:58] [Rank 0] step:7801/10000 train_time:592780ms step_avg:75.99ms +[2025-09-02 05:06:58] [Rank 0] step:7801/10000 train_time:592780ms step_avg:75.99ms +[2025-09-02 05:06:59] [Rank 0] step:7821/10000 train_time:594229ms step_avg:75.98ms +[2025-09-02 05:06:59] [Rank 0] step:7821/10000 train_time:594229ms step_avg:75.98ms +[2025-09-02 05:07:01] [Rank 0] step:7841/10000 train_time:595839ms step_avg:75.99ms +[2025-09-02 05:07:01] [Rank 0] step:7841/10000 train_time:595839ms step_avg:75.99ms +[2025-09-02 05:07:03] [Rank 0] step:7861/10000 train_time:597452ms step_avg:76.00ms +[2025-09-02 05:07:03] [Rank 0] step:7861/10000 train_time:597452ms step_avg:76.00ms +[2025-09-02 05:07:04] [Rank 0] step:7881/10000 train_time:599066ms step_avg:76.01ms +[2025-09-02 05:07:04] [Rank 0] step:7881/10000 train_time:599066ms step_avg:76.01ms +[2025-09-02 05:07:06] [Rank 0] step:7901/10000 train_time:600679ms step_avg:76.03ms +[2025-09-02 05:07:06] [Rank 0] step:7901/10000 train_time:600679ms step_avg:76.03ms +[2025-09-02 05:07:07] [Rank 0] step:7921/10000 train_time:602298ms step_avg:76.04ms +[2025-09-02 05:07:07] [Rank 0] step:7921/10000 train_time:602298ms step_avg:76.04ms +[2025-09-02 05:07:09] [Rank 0] step:7941/10000 train_time:603925ms step_avg:76.05ms +[2025-09-02 05:07:09] [Rank 0] step:7941/10000 train_time:603925ms step_avg:76.05ms +[2025-09-02 05:07:11] [Rank 0] step:7961/10000 train_time:605546ms step_avg:76.06ms +[2025-09-02 05:07:11] [Rank 0] step:7961/10000 train_time:605546ms step_avg:76.06ms +[2025-09-02 05:07:12] [Rank 0] step:7981/10000 train_time:607161ms step_avg:76.08ms +[2025-09-02 05:07:12] [Rank 0] step:7981/10000 train_time:607161ms step_avg:76.08ms +[2025-09-02 05:07:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:07:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:07:25] [Rank 0] PRINT: step:8000/10000 val_loss:3.8416 svd_entropy: attn_qk:H=0.7460,top10E=0.27,eRank=149.5,q75/q25=96.15 attn_vo:H=0.8125,top10E=0.17,eRank=255.3,q75/q25=92.00 mlp_w1:H=0.8170,top10E=0.23,eRank=243.2,q75/q25=14.04 mlp_w2:H=0.8791,top10E=0.11,eRank=347.5,q75/q25=14.74 vo_prod:H=0.7217,top10E=0.26,eRank=127.5,q75/q25=11566.66 train_time:608932ms step_avg:76.12ms +[2025-09-02 05:07:25] [Rank 0] PRINT: step:8000/10000 val_loss:3.8416 svd_entropy: attn_qk:H=0.7460,top10E=0.27,eRank=149.5,q75/q25=96.15 attn_vo:H=0.8125,top10E=0.17,eRank=255.3,q75/q25=92.00 mlp_w1:H=0.8170,top10E=0.23,eRank=243.2,q75/q25=14.04 mlp_w2:H=0.8791,top10E=0.11,eRank=347.5,q75/q25=14.74 vo_prod:H=0.7217,top10E=0.26,eRank=127.5,q75/q25=11566.66 train_time:608932ms step_avg:76.12ms +[2025-09-02 05:07:26] [Rank 0] step:8001/10000 train_time:608943ms step_avg:76.11ms +[2025-09-02 05:07:26] [Rank 0] step:8001/10000 train_time:608943ms step_avg:76.11ms +[2025-09-02 05:07:27] [Rank 0] step:8021/10000 train_time:610401ms step_avg:76.10ms +[2025-09-02 05:07:27] [Rank 0] step:8021/10000 train_time:610401ms step_avg:76.10ms +[2025-09-02 05:07:29] [Rank 0] step:8041/10000 train_time:612023ms step_avg:76.11ms +[2025-09-02 05:07:29] [Rank 0] step:8041/10000 train_time:612023ms step_avg:76.11ms +[2025-09-02 05:07:30] [Rank 0] step:8061/10000 train_time:613637ms step_avg:76.12ms +[2025-09-02 05:07:30] [Rank 0] step:8061/10000 train_time:613637ms step_avg:76.12ms +[2025-09-02 05:07:32] [Rank 0] step:8081/10000 train_time:615244ms step_avg:76.13ms +[2025-09-02 05:07:32] [Rank 0] step:8081/10000 train_time:615244ms step_avg:76.13ms +[2025-09-02 05:07:34] [Rank 0] step:8101/10000 train_time:616867ms step_avg:76.15ms +[2025-09-02 05:07:34] [Rank 0] step:8101/10000 train_time:616867ms step_avg:76.15ms +[2025-09-02 05:07:35] [Rank 0] step:8121/10000 train_time:618483ms step_avg:76.16ms +[2025-09-02 05:07:35] [Rank 0] step:8121/10000 train_time:618483ms step_avg:76.16ms +[2025-09-02 05:07:37] [Rank 0] step:8141/10000 train_time:620192ms step_avg:76.18ms +[2025-09-02 05:07:37] [Rank 0] step:8141/10000 train_time:620192ms step_avg:76.18ms +[2025-09-02 05:07:39] [Rank 0] step:8161/10000 train_time:621822ms step_avg:76.19ms +[2025-09-02 05:07:39] [Rank 0] step:8161/10000 train_time:621822ms step_avg:76.19ms +[2025-09-02 05:07:40] [Rank 0] step:8181/10000 train_time:623468ms step_avg:76.21ms +[2025-09-02 05:07:40] [Rank 0] step:8181/10000 train_time:623468ms step_avg:76.21ms +[2025-09-02 05:07:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:07:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:07:54] [Rank 0] PRINT: step:8200/10000 val_loss:3.8338 svd_entropy: attn_qk:H=0.7468,top10E=0.27,eRank=150.2,q75/q25=95.39 attn_vo:H=0.8133,top10E=0.17,eRank=256.2,q75/q25=91.59 mlp_w1:H=0.8180,top10E=0.22,eRank=244.7,q75/q25=14.04 mlp_w2:H=0.8798,top10E=0.11,eRank=349.1,q75/q25=14.62 vo_prod:H=0.7228,top10E=0.26,eRank=128.4,q75/q25=11360.27 train_time:625296ms step_avg:76.26ms +[2025-09-02 05:07:54] [Rank 0] PRINT: step:8200/10000 val_loss:3.8338 svd_entropy: attn_qk:H=0.7468,top10E=0.27,eRank=150.2,q75/q25=95.39 attn_vo:H=0.8133,top10E=0.17,eRank=256.2,q75/q25=91.59 mlp_w1:H=0.8180,top10E=0.22,eRank=244.7,q75/q25=14.04 mlp_w2:H=0.8798,top10E=0.11,eRank=349.1,q75/q25=14.62 vo_prod:H=0.7228,top10E=0.26,eRank=128.4,q75/q25=11360.27 train_time:625296ms step_avg:76.26ms +[2025-09-02 05:07:54] [Rank 0] step:8201/10000 train_time:625307ms step_avg:76.25ms +[2025-09-02 05:07:54] [Rank 0] step:8201/10000 train_time:625307ms step_avg:76.25ms +[2025-09-02 05:07:55] [Rank 0] step:8221/10000 train_time:626799ms step_avg:76.24ms +[2025-09-02 05:07:55] [Rank 0] step:8221/10000 train_time:626799ms step_avg:76.24ms +[2025-09-02 05:07:57] [Rank 0] step:8241/10000 train_time:628445ms step_avg:76.26ms +[2025-09-02 05:07:57] [Rank 0] step:8241/10000 train_time:628445ms step_avg:76.26ms +[2025-09-02 05:07:59] [Rank 0] step:8261/10000 train_time:630081ms step_avg:76.27ms +[2025-09-02 05:07:59] [Rank 0] step:8261/10000 train_time:630081ms step_avg:76.27ms +[2025-09-02 05:08:00] [Rank 0] step:8281/10000 train_time:631722ms step_avg:76.29ms +[2025-09-02 05:08:00] [Rank 0] step:8281/10000 train_time:631722ms step_avg:76.29ms +[2025-09-02 05:08:02] [Rank 0] step:8301/10000 train_time:633357ms step_avg:76.30ms +[2025-09-02 05:08:02] [Rank 0] step:8301/10000 train_time:633357ms step_avg:76.30ms +[2025-09-02 05:08:03] [Rank 0] step:8321/10000 train_time:634986ms step_avg:76.31ms +[2025-09-02 05:08:03] [Rank 0] step:8321/10000 train_time:634986ms step_avg:76.31ms +[2025-09-02 05:08:05] [Rank 0] step:8341/10000 train_time:636623ms step_avg:76.32ms +[2025-09-02 05:08:05] [Rank 0] step:8341/10000 train_time:636623ms step_avg:76.32ms +[2025-09-02 05:08:07] [Rank 0] step:8361/10000 train_time:638261ms step_avg:76.34ms +[2025-09-02 05:08:07] [Rank 0] step:8361/10000 train_time:638261ms step_avg:76.34ms +[2025-09-02 05:08:08] [Rank 0] step:8381/10000 train_time:639904ms step_avg:76.35ms +[2025-09-02 05:08:08] [Rank 0] step:8381/10000 train_time:639904ms step_avg:76.35ms +[2025-09-02 05:08:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:08:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:08:22] [Rank 0] PRINT: step:8400/10000 val_loss:3.8224 svd_entropy: attn_qk:H=0.7475,top10E=0.27,eRank=150.9,q75/q25=95.43 attn_vo:H=0.8139,top10E=0.17,eRank=257.1,q75/q25=91.03 mlp_w1:H=0.8189,top10E=0.22,eRank=246.0,q75/q25=14.00 mlp_w2:H=0.8804,top10E=0.11,eRank=350.5,q75/q25=14.56 vo_prod:H=0.7237,top10E=0.26,eRank=129.2,q75/q25=11212.11 train_time:641706ms step_avg:76.39ms +[2025-09-02 05:08:22] [Rank 0] PRINT: step:8400/10000 val_loss:3.8224 svd_entropy: attn_qk:H=0.7475,top10E=0.27,eRank=150.9,q75/q25=95.43 attn_vo:H=0.8139,top10E=0.17,eRank=257.1,q75/q25=91.03 mlp_w1:H=0.8189,top10E=0.22,eRank=246.0,q75/q25=14.00 mlp_w2:H=0.8804,top10E=0.11,eRank=350.5,q75/q25=14.56 vo_prod:H=0.7237,top10E=0.26,eRank=129.2,q75/q25=11212.11 train_time:641706ms step_avg:76.39ms +[2025-09-02 05:08:22] [Rank 0] step:8401/10000 train_time:641718ms step_avg:76.39ms +[2025-09-02 05:08:22] [Rank 0] step:8401/10000 train_time:641718ms step_avg:76.39ms +[2025-09-02 05:08:23] [Rank 0] step:8421/10000 train_time:643197ms step_avg:76.38ms +[2025-09-02 05:08:23] [Rank 0] step:8421/10000 train_time:643197ms step_avg:76.38ms +[2025-09-02 05:08:25] [Rank 0] step:8441/10000 train_time:644837ms step_avg:76.39ms +[2025-09-02 05:08:25] [Rank 0] step:8441/10000 train_time:644837ms step_avg:76.39ms +[2025-09-02 05:08:27] [Rank 0] step:8461/10000 train_time:646469ms step_avg:76.41ms +[2025-09-02 05:08:27] [Rank 0] step:8461/10000 train_time:646469ms step_avg:76.41ms +[2025-09-02 05:08:28] [Rank 0] step:8481/10000 train_time:648113ms step_avg:76.42ms +[2025-09-02 05:08:28] [Rank 0] step:8481/10000 train_time:648113ms step_avg:76.42ms +[2025-09-02 05:08:30] [Rank 0] step:8501/10000 train_time:649774ms step_avg:76.43ms +[2025-09-02 05:08:30] [Rank 0] step:8501/10000 train_time:649774ms step_avg:76.43ms +[2025-09-02 05:08:32] [Rank 0] step:8521/10000 train_time:651420ms step_avg:76.45ms +[2025-09-02 05:08:32] [Rank 0] step:8521/10000 train_time:651420ms step_avg:76.45ms +[2025-09-02 05:08:33] [Rank 0] step:8541/10000 train_time:653072ms step_avg:76.46ms +[2025-09-02 05:08:33] [Rank 0] step:8541/10000 train_time:653072ms step_avg:76.46ms +[2025-09-02 05:08:35] [Rank 0] step:8561/10000 train_time:654716ms step_avg:76.48ms +[2025-09-02 05:08:35] [Rank 0] step:8561/10000 train_time:654716ms step_avg:76.48ms +[2025-09-02 05:08:37] [Rank 0] step:8581/10000 train_time:656360ms step_avg:76.49ms +[2025-09-02 05:08:37] [Rank 0] step:8581/10000 train_time:656360ms step_avg:76.49ms +[2025-09-02 05:08:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:08:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:08:50] [Rank 0] PRINT: step:8600/10000 val_loss:3.8135 svd_entropy: attn_qk:H=0.7482,top10E=0.27,eRank=151.6,q75/q25=95.29 attn_vo:H=0.8145,top10E=0.17,eRank=257.8,q75/q25=90.40 mlp_w1:H=0.8196,top10E=0.22,eRank=247.1,q75/q25=13.97 mlp_w2:H=0.8809,top10E=0.11,eRank=351.8,q75/q25=14.49 vo_prod:H=0.7246,top10E=0.26,eRank=129.9,q75/q25=10884.47 train_time:658156ms step_avg:76.53ms +[2025-09-02 05:08:50] [Rank 0] PRINT: step:8600/10000 val_loss:3.8135 svd_entropy: attn_qk:H=0.7482,top10E=0.27,eRank=151.6,q75/q25=95.29 attn_vo:H=0.8145,top10E=0.17,eRank=257.8,q75/q25=90.40 mlp_w1:H=0.8196,top10E=0.22,eRank=247.1,q75/q25=13.97 mlp_w2:H=0.8809,top10E=0.11,eRank=351.8,q75/q25=14.49 vo_prod:H=0.7246,top10E=0.26,eRank=129.9,q75/q25=10884.47 train_time:658156ms step_avg:76.53ms +[2025-09-02 05:08:50] [Rank 0] step:8601/10000 train_time:658167ms step_avg:76.52ms +[2025-09-02 05:08:50] [Rank 0] step:8601/10000 train_time:658167ms step_avg:76.52ms +[2025-09-02 05:08:52] [Rank 0] step:8621/10000 train_time:659671ms step_avg:76.52ms +[2025-09-02 05:08:52] [Rank 0] step:8621/10000 train_time:659671ms step_avg:76.52ms +[2025-09-02 05:08:53] [Rank 0] step:8641/10000 train_time:661307ms step_avg:76.53ms +[2025-09-02 05:08:53] [Rank 0] step:8641/10000 train_time:661307ms step_avg:76.53ms +[2025-09-02 05:08:55] [Rank 0] step:8661/10000 train_time:662945ms step_avg:76.54ms +[2025-09-02 05:08:55] [Rank 0] step:8661/10000 train_time:662945ms step_avg:76.54ms +[2025-09-02 05:08:57] [Rank 0] step:8681/10000 train_time:664581ms step_avg:76.56ms +[2025-09-02 05:08:57] [Rank 0] step:8681/10000 train_time:664581ms step_avg:76.56ms +[2025-09-02 05:08:58] [Rank 0] step:8701/10000 train_time:666215ms step_avg:76.57ms +[2025-09-02 05:08:58] [Rank 0] step:8701/10000 train_time:666215ms step_avg:76.57ms +[2025-09-02 05:09:00] [Rank 0] step:8721/10000 train_time:667858ms step_avg:76.58ms +[2025-09-02 05:09:00] [Rank 0] step:8721/10000 train_time:667858ms step_avg:76.58ms +[2025-09-02 05:09:01] [Rank 0] step:8741/10000 train_time:669490ms step_avg:76.59ms +[2025-09-02 05:09:01] [Rank 0] step:8741/10000 train_time:669490ms step_avg:76.59ms +[2025-09-02 05:09:03] [Rank 0] step:8761/10000 train_time:671124ms step_avg:76.60ms +[2025-09-02 05:09:03] [Rank 0] step:8761/10000 train_time:671124ms step_avg:76.60ms +[2025-09-02 05:09:05] [Rank 0] step:8781/10000 train_time:672771ms step_avg:76.62ms +[2025-09-02 05:09:05] [Rank 0] step:8781/10000 train_time:672771ms step_avg:76.62ms +[2025-09-02 05:09:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:09:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:09:18] [Rank 0] PRINT: step:8800/10000 val_loss:3.8035 svd_entropy: attn_qk:H=0.7488,top10E=0.27,eRank=152.1,q75/q25=95.06 attn_vo:H=0.8150,top10E=0.17,eRank=258.5,q75/q25=90.10 mlp_w1:H=0.8203,top10E=0.22,eRank=248.1,q75/q25=13.92 mlp_w2:H=0.8815,top10E=0.11,eRank=353.0,q75/q25=14.41 vo_prod:H=0.7253,top10E=0.26,eRank=130.5,q75/q25=10867.15 train_time:674577ms step_avg:76.66ms +[2025-09-02 05:09:18] [Rank 0] PRINT: step:8800/10000 val_loss:3.8035 svd_entropy: attn_qk:H=0.7488,top10E=0.27,eRank=152.1,q75/q25=95.06 attn_vo:H=0.8150,top10E=0.17,eRank=258.5,q75/q25=90.10 mlp_w1:H=0.8203,top10E=0.22,eRank=248.1,q75/q25=13.92 mlp_w2:H=0.8815,top10E=0.11,eRank=353.0,q75/q25=14.41 vo_prod:H=0.7253,top10E=0.26,eRank=130.5,q75/q25=10867.15 train_time:674577ms step_avg:76.66ms +[2025-09-02 05:09:18] [Rank 0] step:8801/10000 train_time:674588ms step_avg:76.65ms +[2025-09-02 05:09:18] [Rank 0] step:8801/10000 train_time:674588ms step_avg:76.65ms +[2025-09-02 05:09:20] [Rank 0] step:8821/10000 train_time:676062ms step_avg:76.64ms +[2025-09-02 05:09:20] [Rank 0] step:8821/10000 train_time:676062ms step_avg:76.64ms +[2025-09-02 05:09:21] [Rank 0] step:8841/10000 train_time:677719ms step_avg:76.66ms +[2025-09-02 05:09:21] [Rank 0] step:8841/10000 train_time:677719ms step_avg:76.66ms +[2025-09-02 05:09:23] [Rank 0] step:8861/10000 train_time:679355ms step_avg:76.67ms +[2025-09-02 05:09:23] [Rank 0] step:8861/10000 train_time:679355ms step_avg:76.67ms +[2025-09-02 05:09:25] [Rank 0] step:8881/10000 train_time:680995ms step_avg:76.68ms +[2025-09-02 05:09:25] [Rank 0] step:8881/10000 train_time:680995ms step_avg:76.68ms +[2025-09-02 05:09:26] [Rank 0] step:8901/10000 train_time:682639ms step_avg:76.69ms +[2025-09-02 05:09:26] [Rank 0] step:8901/10000 train_time:682639ms step_avg:76.69ms +[2025-09-02 05:09:28] [Rank 0] step:8921/10000 train_time:684287ms step_avg:76.71ms +[2025-09-02 05:09:28] [Rank 0] step:8921/10000 train_time:684287ms step_avg:76.71ms +[2025-09-02 05:09:30] [Rank 0] step:8941/10000 train_time:685940ms step_avg:76.72ms +[2025-09-02 05:09:30] [Rank 0] step:8941/10000 train_time:685940ms step_avg:76.72ms +[2025-09-02 05:09:31] [Rank 0] step:8961/10000 train_time:687577ms step_avg:76.73ms +[2025-09-02 05:09:31] [Rank 0] step:8961/10000 train_time:687577ms step_avg:76.73ms +[2025-09-02 05:09:33] [Rank 0] step:8981/10000 train_time:689217ms step_avg:76.74ms +[2025-09-02 05:09:33] [Rank 0] step:8981/10000 train_time:689217ms step_avg:76.74ms +[2025-09-02 05:09:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:09:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:09:46] [Rank 0] PRINT: step:9000/10000 val_loss:3.7944 svd_entropy: attn_qk:H=0.7493,top10E=0.27,eRank=152.6,q75/q25=95.16 attn_vo:H=0.8155,top10E=0.17,eRank=259.1,q75/q25=89.72 mlp_w1:H=0.8209,top10E=0.22,eRank=249.0,q75/q25=13.86 mlp_w2:H=0.8819,top10E=0.11,eRank=354.0,q75/q25=14.34 vo_prod:H=0.7260,top10E=0.26,eRank=131.1,q75/q25=10762.85 train_time:691020ms step_avg:76.78ms +[2025-09-02 05:09:46] [Rank 0] PRINT: step:9000/10000 val_loss:3.7944 svd_entropy: attn_qk:H=0.7493,top10E=0.27,eRank=152.6,q75/q25=95.16 attn_vo:H=0.8155,top10E=0.17,eRank=259.1,q75/q25=89.72 mlp_w1:H=0.8209,top10E=0.22,eRank=249.0,q75/q25=13.86 mlp_w2:H=0.8819,top10E=0.11,eRank=354.0,q75/q25=14.34 vo_prod:H=0.7260,top10E=0.26,eRank=131.1,q75/q25=10762.85 train_time:691020ms step_avg:76.78ms +[2025-09-02 05:09:47] [Rank 0] step:9001/10000 train_time:691033ms step_avg:76.77ms +[2025-09-02 05:09:47] [Rank 0] step:9001/10000 train_time:691033ms step_avg:76.77ms +[2025-09-02 05:09:48] [Rank 0] step:9021/10000 train_time:692535ms step_avg:76.77ms +[2025-09-02 05:09:48] [Rank 0] step:9021/10000 train_time:692535ms step_avg:76.77ms +[2025-09-02 05:09:50] [Rank 0] step:9041/10000 train_time:694174ms step_avg:76.78ms +[2025-09-02 05:09:50] [Rank 0] step:9041/10000 train_time:694174ms step_avg:76.78ms +[2025-09-02 05:09:52] [Rank 0] step:9061/10000 train_time:695829ms step_avg:76.79ms +[2025-09-02 05:09:52] [Rank 0] step:9061/10000 train_time:695829ms step_avg:76.79ms +[2025-09-02 05:09:53] [Rank 0] step:9081/10000 train_time:697477ms step_avg:76.81ms +[2025-09-02 05:09:53] [Rank 0] step:9081/10000 train_time:697477ms step_avg:76.81ms +[2025-09-02 05:09:55] [Rank 0] step:9101/10000 train_time:699140ms step_avg:76.82ms +[2025-09-02 05:09:55] [Rank 0] step:9101/10000 train_time:699140ms step_avg:76.82ms +[2025-09-02 05:09:56] [Rank 0] step:9121/10000 train_time:700785ms step_avg:76.83ms +[2025-09-02 05:09:56] [Rank 0] step:9121/10000 train_time:700785ms step_avg:76.83ms +[2025-09-02 05:09:58] [Rank 0] step:9141/10000 train_time:702420ms step_avg:76.84ms +[2025-09-02 05:09:58] [Rank 0] step:9141/10000 train_time:702420ms step_avg:76.84ms +[2025-09-02 05:10:00] [Rank 0] step:9161/10000 train_time:704052ms step_avg:76.85ms +[2025-09-02 05:10:00] [Rank 0] step:9161/10000 train_time:704052ms step_avg:76.85ms +[2025-09-02 05:10:01] [Rank 0] step:9181/10000 train_time:705724ms step_avg:76.87ms +[2025-09-02 05:10:01] [Rank 0] step:9181/10000 train_time:705724ms step_avg:76.87ms +[2025-09-02 05:10:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:10:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:10:15] [Rank 0] PRINT: step:9200/10000 val_loss:3.7868 svd_entropy: attn_qk:H=0.7498,top10E=0.27,eRank=153.0,q75/q25=94.99 attn_vo:H=0.8159,top10E=0.17,eRank=259.6,q75/q25=89.72 mlp_w1:H=0.8214,top10E=0.22,eRank=249.7,q75/q25=13.84 mlp_w2:H=0.8823,top10E=0.11,eRank=354.9,q75/q25=14.28 vo_prod:H=0.7267,top10E=0.26,eRank=131.6,q75/q25=10527.75 train_time:707531ms step_avg:76.91ms +[2025-09-02 05:10:15] [Rank 0] PRINT: step:9200/10000 val_loss:3.7868 svd_entropy: attn_qk:H=0.7498,top10E=0.27,eRank=153.0,q75/q25=94.99 attn_vo:H=0.8159,top10E=0.17,eRank=259.6,q75/q25=89.72 mlp_w1:H=0.8214,top10E=0.22,eRank=249.7,q75/q25=13.84 mlp_w2:H=0.8823,top10E=0.11,eRank=354.9,q75/q25=14.28 vo_prod:H=0.7267,top10E=0.26,eRank=131.6,q75/q25=10527.75 train_time:707531ms step_avg:76.91ms +[2025-09-02 05:10:15] [Rank 0] step:9201/10000 train_time:707544ms step_avg:76.90ms +[2025-09-02 05:10:15] [Rank 0] step:9201/10000 train_time:707544ms step_avg:76.90ms +[2025-09-02 05:10:17] [Rank 0] step:9221/10000 train_time:709045ms step_avg:76.89ms +[2025-09-02 05:10:17] [Rank 0] step:9221/10000 train_time:709045ms step_avg:76.89ms +[2025-09-02 05:10:18] [Rank 0] step:9241/10000 train_time:710698ms step_avg:76.91ms +[2025-09-02 05:10:18] [Rank 0] step:9241/10000 train_time:710698ms step_avg:76.91ms +[2025-09-02 05:10:20] [Rank 0] step:9261/10000 train_time:712353ms step_avg:76.92ms +[2025-09-02 05:10:20] [Rank 0] step:9261/10000 train_time:712353ms step_avg:76.92ms +[2025-09-02 05:10:21] [Rank 0] step:9281/10000 train_time:713987ms step_avg:76.93ms +[2025-09-02 05:10:21] [Rank 0] step:9281/10000 train_time:713987ms step_avg:76.93ms +[2025-09-02 05:10:23] [Rank 0] step:9301/10000 train_time:715731ms step_avg:76.95ms +[2025-09-02 05:10:23] [Rank 0] step:9301/10000 train_time:715731ms step_avg:76.95ms +[2025-09-02 05:10:25] [Rank 0] step:9321/10000 train_time:717379ms step_avg:76.96ms +[2025-09-02 05:10:25] [Rank 0] step:9321/10000 train_time:717379ms step_avg:76.96ms +[2025-09-02 05:10:27] [Rank 0] step:9341/10000 train_time:719025ms step_avg:76.98ms +[2025-09-02 05:10:27] [Rank 0] step:9341/10000 train_time:719025ms step_avg:76.98ms +[2025-09-02 05:10:28] [Rank 0] step:9361/10000 train_time:720674ms step_avg:76.99ms +[2025-09-02 05:10:28] [Rank 0] step:9361/10000 train_time:720674ms step_avg:76.99ms +[2025-09-02 05:10:30] [Rank 0] step:9381/10000 train_time:722328ms step_avg:77.00ms +[2025-09-02 05:10:30] [Rank 0] step:9381/10000 train_time:722328ms step_avg:77.00ms +[2025-09-02 05:10:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:10:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:10:43] [Rank 0] PRINT: step:9400/10000 val_loss:3.7806 svd_entropy: attn_qk:H=0.7501,top10E=0.27,eRank=153.4,q75/q25=95.20 attn_vo:H=0.8162,top10E=0.17,eRank=260.0,q75/q25=88.98 mlp_w1:H=0.8219,top10E=0.22,eRank=250.4,q75/q25=13.80 mlp_w2:H=0.8826,top10E=0.11,eRank=355.7,q75/q25=14.21 vo_prod:H=0.7272,top10E=0.25,eRank=132.1,q75/q25=10481.24 train_time:724142ms step_avg:77.04ms +[2025-09-02 05:10:43] [Rank 0] PRINT: step:9400/10000 val_loss:3.7806 svd_entropy: attn_qk:H=0.7501,top10E=0.27,eRank=153.4,q75/q25=95.20 attn_vo:H=0.8162,top10E=0.17,eRank=260.0,q75/q25=88.98 mlp_w1:H=0.8219,top10E=0.22,eRank=250.4,q75/q25=13.80 mlp_w2:H=0.8826,top10E=0.11,eRank=355.7,q75/q25=14.21 vo_prod:H=0.7272,top10E=0.25,eRank=132.1,q75/q25=10481.24 train_time:724142ms step_avg:77.04ms +[2025-09-02 05:10:43] [Rank 0] step:9401/10000 train_time:724153ms step_avg:77.03ms +[2025-09-02 05:10:43] [Rank 0] step:9401/10000 train_time:724153ms step_avg:77.03ms +[2025-09-02 05:10:45] [Rank 0] step:9421/10000 train_time:725651ms step_avg:77.02ms +[2025-09-02 05:10:45] [Rank 0] step:9421/10000 train_time:725651ms step_avg:77.02ms +[2025-09-02 05:10:47] [Rank 0] step:9441/10000 train_time:727292ms step_avg:77.04ms +[2025-09-02 05:10:47] [Rank 0] step:9441/10000 train_time:727292ms step_avg:77.04ms +[2025-09-02 05:10:48] [Rank 0] step:9461/10000 train_time:728939ms step_avg:77.05ms +[2025-09-02 05:10:48] [Rank 0] step:9461/10000 train_time:728939ms step_avg:77.05ms +[2025-09-02 05:10:50] [Rank 0] step:9481/10000 train_time:730586ms step_avg:77.06ms +[2025-09-02 05:10:50] [Rank 0] step:9481/10000 train_time:730586ms step_avg:77.06ms +[2025-09-02 05:10:51] [Rank 0] step:9501/10000 train_time:732241ms step_avg:77.07ms +[2025-09-02 05:10:51] [Rank 0] step:9501/10000 train_time:732241ms step_avg:77.07ms +[2025-09-02 05:10:53] [Rank 0] step:9521/10000 train_time:733879ms step_avg:77.08ms +[2025-09-02 05:10:53] [Rank 0] step:9521/10000 train_time:733879ms step_avg:77.08ms +[2025-09-02 05:10:55] [Rank 0] step:9541/10000 train_time:735522ms step_avg:77.09ms +[2025-09-02 05:10:55] [Rank 0] step:9541/10000 train_time:735522ms step_avg:77.09ms +[2025-09-02 05:10:56] [Rank 0] step:9561/10000 train_time:737159ms step_avg:77.10ms +[2025-09-02 05:10:56] [Rank 0] step:9561/10000 train_time:737159ms step_avg:77.10ms +[2025-09-02 05:10:58] [Rank 0] step:9581/10000 train_time:738802ms step_avg:77.11ms +[2025-09-02 05:10:58] [Rank 0] step:9581/10000 train_time:738802ms step_avg:77.11ms +[2025-09-02 05:11:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:11:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:11:11] [Rank 0] PRINT: step:9600/10000 val_loss:3.7746 svd_entropy: attn_qk:H=0.7504,top10E=0.27,eRank=153.7,q75/q25=94.99 attn_vo:H=0.8165,top10E=0.17,eRank=260.4,q75/q25=88.85 mlp_w1:H=0.8222,top10E=0.22,eRank=250.9,q75/q25=13.76 mlp_w2:H=0.8829,top10E=0.11,eRank=356.4,q75/q25=14.18 vo_prod:H=0.7277,top10E=0.25,eRank=132.5,q75/q25=10294.23 train_time:740620ms step_avg:77.15ms +[2025-09-02 05:11:11] [Rank 0] PRINT: step:9600/10000 val_loss:3.7746 svd_entropy: attn_qk:H=0.7504,top10E=0.27,eRank=153.7,q75/q25=94.99 attn_vo:H=0.8165,top10E=0.17,eRank=260.4,q75/q25=88.85 mlp_w1:H=0.8222,top10E=0.22,eRank=250.9,q75/q25=13.76 mlp_w2:H=0.8829,top10E=0.11,eRank=356.4,q75/q25=14.18 vo_prod:H=0.7277,top10E=0.25,eRank=132.5,q75/q25=10294.23 train_time:740620ms step_avg:77.15ms +[2025-09-02 05:11:12] [Rank 0] step:9601/10000 train_time:740631ms step_avg:77.14ms +[2025-09-02 05:11:12] [Rank 0] step:9601/10000 train_time:740631ms step_avg:77.14ms +[2025-09-02 05:11:13] [Rank 0] step:9621/10000 train_time:742135ms step_avg:77.14ms +[2025-09-02 05:11:13] [Rank 0] step:9621/10000 train_time:742135ms step_avg:77.14ms +[2025-09-02 05:11:15] [Rank 0] step:9641/10000 train_time:743779ms step_avg:77.15ms +[2025-09-02 05:11:15] [Rank 0] step:9641/10000 train_time:743779ms step_avg:77.15ms +[2025-09-02 05:11:17] [Rank 0] step:9661/10000 train_time:745449ms step_avg:77.16ms +[2025-09-02 05:11:17] [Rank 0] step:9661/10000 train_time:745449ms step_avg:77.16ms +[2025-09-02 05:11:18] [Rank 0] step:9681/10000 train_time:747111ms step_avg:77.17ms +[2025-09-02 05:11:18] [Rank 0] step:9681/10000 train_time:747111ms step_avg:77.17ms +[2025-09-02 05:11:20] [Rank 0] step:9701/10000 train_time:748794ms step_avg:77.19ms +[2025-09-02 05:11:20] [Rank 0] step:9701/10000 train_time:748794ms step_avg:77.19ms +[2025-09-02 05:11:22] [Rank 0] step:9721/10000 train_time:750455ms step_avg:77.20ms +[2025-09-02 05:11:22] [Rank 0] step:9721/10000 train_time:750455ms step_avg:77.20ms +[2025-09-02 05:11:23] [Rank 0] step:9741/10000 train_time:752142ms step_avg:77.21ms +[2025-09-02 05:11:23] [Rank 0] step:9741/10000 train_time:752142ms step_avg:77.21ms +[2025-09-02 05:11:25] [Rank 0] step:9761/10000 train_time:753807ms step_avg:77.23ms +[2025-09-02 05:11:25] [Rank 0] step:9761/10000 train_time:753807ms step_avg:77.23ms +[2025-09-02 05:11:27] [Rank 0] step:9781/10000 train_time:755491ms step_avg:77.24ms +[2025-09-02 05:11:27] [Rank 0] step:9781/10000 train_time:755491ms step_avg:77.24ms +[2025-09-02 05:11:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:11:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:11:40] [Rank 0] PRINT: step:9800/10000 val_loss:3.7686 svd_entropy: attn_qk:H=0.7507,top10E=0.27,eRank=153.9,q75/q25=94.74 attn_vo:H=0.8167,top10E=0.16,eRank=260.7,q75/q25=88.56 mlp_w1:H=0.8225,top10E=0.22,eRank=251.3,q75/q25=13.74 mlp_w2:H=0.8832,top10E=0.11,eRank=356.9,q75/q25=14.11 vo_prod:H=0.7280,top10E=0.25,eRank=132.8,q75/q25=10218.07 train_time:757342ms step_avg:77.28ms +[2025-09-02 05:11:40] [Rank 0] PRINT: step:9800/10000 val_loss:3.7686 svd_entropy: attn_qk:H=0.7507,top10E=0.27,eRank=153.9,q75/q25=94.74 attn_vo:H=0.8167,top10E=0.16,eRank=260.7,q75/q25=88.56 mlp_w1:H=0.8225,top10E=0.22,eRank=251.3,q75/q25=13.74 mlp_w2:H=0.8832,top10E=0.11,eRank=356.9,q75/q25=14.11 vo_prod:H=0.7280,top10E=0.25,eRank=132.8,q75/q25=10218.07 train_time:757342ms step_avg:77.28ms +[2025-09-02 05:11:40] [Rank 0] step:9801/10000 train_time:757353ms step_avg:77.27ms +[2025-09-02 05:11:40] [Rank 0] step:9801/10000 train_time:757353ms step_avg:77.27ms +[2025-09-02 05:11:42] [Rank 0] step:9821/10000 train_time:758862ms step_avg:77.27ms +[2025-09-02 05:11:42] [Rank 0] step:9821/10000 train_time:758862ms step_avg:77.27ms +[2025-09-02 05:11:43] [Rank 0] step:9841/10000 train_time:760546ms step_avg:77.28ms +[2025-09-02 05:11:43] [Rank 0] step:9841/10000 train_time:760546ms step_avg:77.28ms +[2025-09-02 05:11:45] [Rank 0] step:9861/10000 train_time:762207ms step_avg:77.30ms +[2025-09-02 05:11:45] [Rank 0] step:9861/10000 train_time:762207ms step_avg:77.30ms +[2025-09-02 05:11:47] [Rank 0] step:9881/10000 train_time:763863ms step_avg:77.31ms +[2025-09-02 05:11:47] [Rank 0] step:9881/10000 train_time:763863ms step_avg:77.31ms +[2025-09-02 05:11:48] [Rank 0] step:9901/10000 train_time:765538ms step_avg:77.32ms +[2025-09-02 05:11:48] [Rank 0] step:9901/10000 train_time:765538ms step_avg:77.32ms +[2025-09-02 05:11:50] [Rank 0] step:9921/10000 train_time:767204ms step_avg:77.33ms +[2025-09-02 05:11:50] [Rank 0] step:9921/10000 train_time:767204ms step_avg:77.33ms +[2025-09-02 05:11:52] [Rank 0] step:9941/10000 train_time:768878ms step_avg:77.34ms +[2025-09-02 05:11:52] [Rank 0] step:9941/10000 train_time:768878ms step_avg:77.34ms +[2025-09-02 05:11:53] [Rank 0] step:9961/10000 train_time:770549ms step_avg:77.36ms +[2025-09-02 05:11:53] [Rank 0] step:9961/10000 train_time:770549ms step_avg:77.36ms +[2025-09-02 05:11:55] [Rank 0] step:9981/10000 train_time:772214ms step_avg:77.37ms +[2025-09-02 05:11:55] [Rank 0] step:9981/10000 train_time:772214ms step_avg:77.37ms +[2025-09-02 05:11:57] [Rank 0] step:10000/10000 train_time:773809ms step_avg:77.38ms +[2025-09-02 05:11:57] [Rank 0] step:10000/10000 train_time:773809ms step_avg:77.38ms +[2025-09-02 05:11:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:11:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:12:08] [Rank 0] PRINT: step:10000/10000 val_loss:3.7626 svd_entropy: attn_qk:H=0.7508,top10E=0.27,eRank=154.1,q75/q25=94.77 attn_vo:H=0.8169,top10E=0.16,eRank=260.9,q75/q25=88.61 mlp_w1:H=0.8227,top10E=0.22,eRank=251.6,q75/q25=13.72 mlp_w2:H=0.8833,top10E=0.11,eRank=357.3,q75/q25=14.09 vo_prod:H=0.7283,top10E=0.25,eRank=133.0,q75/q25=10151.44 train_time:774065ms step_avg:77.41ms +[2025-09-02 05:12:08] [Rank 0] PRINT: step:10000/10000 val_loss:3.7626 svd_entropy: attn_qk:H=0.7508,top10E=0.27,eRank=154.1,q75/q25=94.77 attn_vo:H=0.8169,top10E=0.16,eRank=260.9,q75/q25=88.61 mlp_w1:H=0.8227,top10E=0.22,eRank=251.6,q75/q25=13.72 mlp_w2:H=0.8833,top10E=0.11,eRank=357.3,q75/q25=14.09 vo_prod:H=0.7283,top10E=0.25,eRank=133.0,q75/q25=10151.44 train_time:774065ms step_avg:77.41ms +[2025-09-02 05:12:08] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 05:12:08 2025 --- +[2025-09-02 05:12:08] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 05:12:08 2025 --- +[2025-09-02 05:12:08] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14416 MiB +[2025-09-02 05:12:08] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14416 MiB diff --git a/logs_svd_qkvo/mode_14_param_qkvo_seed_42/config.json b/logs_svd_qkvo/mode_14_param_qkvo_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..93ad5ed70904164d7e22c9f50ee46ac8782435b7 --- /dev/null +++ b/logs_svd_qkvo/mode_14_param_qkvo_seed_42/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 14, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "cd83c7bf-9949-4f10-ae32-24a87ee6b13f", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_14_param_qkvo_seed_42/training_log_cd83c7bf-9949-4f10-ae32-24a87ee6b13f.txt b/logs_svd_qkvo/mode_14_param_qkvo_seed_42/training_log_cd83c7bf-9949-4f10-ae32-24a87ee6b13f.txt new file mode 100644 index 0000000000000000000000000000000000000000..4b3c8bb8e8949f27e0f4e34588996ad077957489 --- /dev/null +++ b/logs_svd_qkvo/mode_14_param_qkvo_seed_42/training_log_cd83c7bf-9949-4f10-ae32-24a87ee6b13f.txt @@ -0,0 +1,2984 @@ +[2025-09-02 06:02:50] [Rank 0] PRINT: --- Script Start: Tue Sep 2 06:02:50 2025 --- +[2025-09-02 06:02:50] [Rank 0] PRINT: --- Script Start: Tue Sep 2 06:02:50 2025 --- +[2025-09-02 06:02:50] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=14, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 06:02:50] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=14, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 06:02:50] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 06:02:50] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 06:02:50] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-02 06:02:50] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-02 06:02:50] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_14_param_qkvo_seed_42 +[2025-09-02 06:02:50] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_14_param_qkvo_seed_42 +[2025-09-02 06:02:50] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 06:02:50] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 06:02:50] [Rank 0] PRINT: Constructing model... +[2025-09-02 06:02:50] [Rank 0] PRINT: Constructing model... +[2025-09-02 06:02:52] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 06:02:52] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 06:02:52] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 06:02:52] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 06:02:52] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 06:02:52] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 06:02:52] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 14 +[2025-09-02 06:02:52] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 14 +[2025-09-02 06:02:52] [Rank 0] PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 06:02:52] [Rank 0] PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 06:02:52] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 06:02:52] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 06:02:52] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 06:02:52] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 06:02:52] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 06:02:52] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 06:02:52] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 06:02:52] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 06:02:52] [Rank 0] PRINT: Starting warmup... +[2025-09-02 06:02:52] [Rank 0] PRINT: Starting warmup... +[2025-09-02 06:03:37] [Rank 0] PRINT: Warmup complete. +[2025-09-02 06:03:37] [Rank 0] PRINT: Warmup complete. +[2025-09-02 06:03:37] [Rank 0] PRINT: Starting training... +[2025-09-02 06:03:37] [Rank 0] PRINT: Starting training... +[2025-09-02 06:03:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:03:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:03:57] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9248,top10E=0.05,eRank=465.9,q75/q25=10.27 attn_vo:H=0.4623,top10E=0.02,eRank=232.8,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 06:03:57] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9248,top10E=0.05,eRank=465.9,q75/q25=10.27 attn_vo:H=0.4623,top10E=0.02,eRank=232.8,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 06:03:59] [Rank 0] step:21/10000 train_time:1296ms step_avg:61.70ms +[2025-09-02 06:03:59] [Rank 0] step:21/10000 train_time:1296ms step_avg:61.70ms +[2025-09-02 06:04:00] [Rank 0] step:41/10000 train_time:2696ms step_avg:65.77ms +[2025-09-02 06:04:00] [Rank 0] step:41/10000 train_time:2696ms step_avg:65.77ms +[2025-09-02 06:04:02] [Rank 0] step:61/10000 train_time:4103ms step_avg:67.26ms +[2025-09-02 06:04:02] [Rank 0] step:61/10000 train_time:4103ms step_avg:67.26ms +[2025-09-02 06:04:03] [Rank 0] step:81/10000 train_time:5512ms step_avg:68.05ms +[2025-09-02 06:04:03] [Rank 0] step:81/10000 train_time:5512ms step_avg:68.05ms +[2025-09-02 06:04:04] [Rank 0] step:101/10000 train_time:6923ms step_avg:68.54ms +[2025-09-02 06:04:04] [Rank 0] step:101/10000 train_time:6923ms step_avg:68.54ms +[2025-09-02 06:04:06] [Rank 0] step:121/10000 train_time:8334ms step_avg:68.87ms +[2025-09-02 06:04:06] [Rank 0] step:121/10000 train_time:8334ms step_avg:68.87ms +[2025-09-02 06:04:07] [Rank 0] step:141/10000 train_time:9752ms step_avg:69.16ms +[2025-09-02 06:04:07] [Rank 0] step:141/10000 train_time:9752ms step_avg:69.16ms +[2025-09-02 06:04:09] [Rank 0] step:161/10000 train_time:11165ms step_avg:69.35ms +[2025-09-02 06:04:09] [Rank 0] step:161/10000 train_time:11165ms step_avg:69.35ms +[2025-09-02 06:04:10] [Rank 0] step:181/10000 train_time:12579ms step_avg:69.50ms +[2025-09-02 06:04:10] [Rank 0] step:181/10000 train_time:12579ms step_avg:69.50ms +[2025-09-02 06:04:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:04:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:04:23] [Rank 0] PRINT: step:200/10000 val_loss:6.5879 svd_entropy: attn_qk:H=0.5021,top10E=0.72,eRank=74.4,q75/q25=11.94 attn_vo:H=0.4378,top10E=0.67,eRank=59.8,q75/q25=inf mlp_w1:H=0.4153,top10E=0.77,eRank=18.7,q75/q25=2.64 mlp_w2:H=0.1781,top10E=0.96,eRank=4.5,q75/q25=105.72 vo_prod:H=0.1949,top10E=0.87,eRank=6.5,q75/q25=inf train_time:14138ms step_avg:70.69ms +[2025-09-02 06:04:23] [Rank 0] PRINT: step:200/10000 val_loss:6.5879 svd_entropy: attn_qk:H=0.5021,top10E=0.72,eRank=74.4,q75/q25=11.94 attn_vo:H=0.4378,top10E=0.67,eRank=59.8,q75/q25=inf mlp_w1:H=0.4153,top10E=0.77,eRank=18.7,q75/q25=2.64 mlp_w2:H=0.1781,top10E=0.96,eRank=4.5,q75/q25=105.72 vo_prod:H=0.1949,top10E=0.87,eRank=6.5,q75/q25=inf train_time:14138ms step_avg:70.69ms +[2025-09-02 06:04:23] [Rank 0] step:201/10000 train_time:14150ms step_avg:70.40ms +[2025-09-02 06:04:23] [Rank 0] step:201/10000 train_time:14150ms step_avg:70.40ms +[2025-09-02 06:04:25] [Rank 0] step:221/10000 train_time:15440ms step_avg:69.86ms +[2025-09-02 06:04:25] [Rank 0] step:221/10000 train_time:15440ms step_avg:69.86ms +[2025-09-02 06:04:26] [Rank 0] step:241/10000 train_time:16853ms step_avg:69.93ms +[2025-09-02 06:04:26] [Rank 0] step:241/10000 train_time:16853ms step_avg:69.93ms +[2025-09-02 06:04:28] [Rank 0] step:261/10000 train_time:18267ms step_avg:69.99ms +[2025-09-02 06:04:28] [Rank 0] step:261/10000 train_time:18267ms step_avg:69.99ms +[2025-09-02 06:04:29] [Rank 0] step:281/10000 train_time:19681ms step_avg:70.04ms +[2025-09-02 06:04:29] [Rank 0] step:281/10000 train_time:19681ms step_avg:70.04ms +[2025-09-02 06:04:31] [Rank 0] step:301/10000 train_time:21095ms step_avg:70.08ms +[2025-09-02 06:04:31] [Rank 0] step:301/10000 train_time:21095ms step_avg:70.08ms +[2025-09-02 06:04:32] [Rank 0] step:321/10000 train_time:22509ms step_avg:70.12ms +[2025-09-02 06:04:32] [Rank 0] step:321/10000 train_time:22509ms step_avg:70.12ms +[2025-09-02 06:04:33] [Rank 0] step:341/10000 train_time:23924ms step_avg:70.16ms +[2025-09-02 06:04:33] [Rank 0] step:341/10000 train_time:23924ms step_avg:70.16ms +[2025-09-02 06:04:35] [Rank 0] step:361/10000 train_time:25339ms step_avg:70.19ms +[2025-09-02 06:04:35] [Rank 0] step:361/10000 train_time:25339ms step_avg:70.19ms +[2025-09-02 06:04:36] [Rank 0] step:381/10000 train_time:26754ms step_avg:70.22ms +[2025-09-02 06:04:36] [Rank 0] step:381/10000 train_time:26754ms step_avg:70.22ms +[2025-09-02 06:04:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:04:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:04:49] [Rank 0] PRINT: step:400/10000 val_loss:6.0446 svd_entropy: attn_qk:H=0.5427,top10E=0.64,eRank=81.5,q75/q25=13.18 attn_vo:H=0.5111,top10E=0.55,eRank=75.0,q75/q25=inf mlp_w1:H=0.4290,top10E=0.72,eRank=26.6,q75/q25=3.27 mlp_w2:H=0.5336,top10E=0.62,eRank=35.2,q75/q25=13.79 vo_prod:H=0.3222,top10E=0.80,eRank=13.7,q75/q25=inf train_time:28313ms step_avg:70.78ms +[2025-09-02 06:04:49] [Rank 0] PRINT: step:400/10000 val_loss:6.0446 svd_entropy: attn_qk:H=0.5427,top10E=0.64,eRank=81.5,q75/q25=13.18 attn_vo:H=0.5111,top10E=0.55,eRank=75.0,q75/q25=inf mlp_w1:H=0.4290,top10E=0.72,eRank=26.6,q75/q25=3.27 mlp_w2:H=0.5336,top10E=0.62,eRank=35.2,q75/q25=13.79 vo_prod:H=0.3222,top10E=0.80,eRank=13.7,q75/q25=inf train_time:28313ms step_avg:70.78ms +[2025-09-02 06:04:50] [Rank 0] step:401/10000 train_time:28325ms step_avg:70.64ms +[2025-09-02 06:04:50] [Rank 0] step:401/10000 train_time:28325ms step_avg:70.64ms +[2025-09-02 06:04:51] [Rank 0] step:421/10000 train_time:29613ms step_avg:70.34ms +[2025-09-02 06:04:51] [Rank 0] step:421/10000 train_time:29613ms step_avg:70.34ms +[2025-09-02 06:04:52] [Rank 0] step:441/10000 train_time:31026ms step_avg:70.35ms +[2025-09-02 06:04:52] [Rank 0] step:441/10000 train_time:31026ms step_avg:70.35ms +[2025-09-02 06:04:54] [Rank 0] step:461/10000 train_time:32440ms step_avg:70.37ms +[2025-09-02 06:04:54] [Rank 0] step:461/10000 train_time:32440ms step_avg:70.37ms +[2025-09-02 06:04:55] [Rank 0] step:481/10000 train_time:33854ms step_avg:70.38ms +[2025-09-02 06:04:55] [Rank 0] step:481/10000 train_time:33854ms step_avg:70.38ms +[2025-09-02 06:04:57] [Rank 0] step:501/10000 train_time:35268ms step_avg:70.40ms +[2025-09-02 06:04:57] [Rank 0] step:501/10000 train_time:35268ms step_avg:70.40ms +[2025-09-02 06:04:58] [Rank 0] step:521/10000 train_time:36684ms step_avg:70.41ms +[2025-09-02 06:04:58] [Rank 0] step:521/10000 train_time:36684ms step_avg:70.41ms +[2025-09-02 06:04:59] [Rank 0] step:541/10000 train_time:38097ms step_avg:70.42ms +[2025-09-02 06:04:59] [Rank 0] step:541/10000 train_time:38097ms step_avg:70.42ms +[2025-09-02 06:05:01] [Rank 0] step:561/10000 train_time:39512ms step_avg:70.43ms +[2025-09-02 06:05:01] [Rank 0] step:561/10000 train_time:39512ms step_avg:70.43ms +[2025-09-02 06:05:02] [Rank 0] step:581/10000 train_time:40928ms step_avg:70.44ms +[2025-09-02 06:05:02] [Rank 0] step:581/10000 train_time:40928ms step_avg:70.44ms +[2025-09-02 06:05:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:05:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:05:15] [Rank 0] PRINT: step:600/10000 val_loss:5.7334 svd_entropy: attn_qk:H=0.5717,top10E=0.58,eRank=87.8,q75/q25=14.60 attn_vo:H=0.5533,top10E=0.48,eRank=88.4,q75/q25=inf mlp_w1:H=0.4755,top10E=0.66,eRank=37.0,q75/q25=3.82 mlp_w2:H=0.6179,top10E=0.48,eRank=61.5,q75/q25=9.68 vo_prod:H=0.3835,top10E=0.72,eRank=19.2,q75/q25=inf train_time:42485ms step_avg:70.81ms +[2025-09-02 06:05:15] [Rank 0] PRINT: step:600/10000 val_loss:5.7334 svd_entropy: attn_qk:H=0.5717,top10E=0.58,eRank=87.8,q75/q25=14.60 attn_vo:H=0.5533,top10E=0.48,eRank=88.4,q75/q25=inf mlp_w1:H=0.4755,top10E=0.66,eRank=37.0,q75/q25=3.82 mlp_w2:H=0.6179,top10E=0.48,eRank=61.5,q75/q25=9.68 vo_prod:H=0.3835,top10E=0.72,eRank=19.2,q75/q25=inf train_time:42485ms step_avg:70.81ms +[2025-09-02 06:05:16] [Rank 0] step:601/10000 train_time:42498ms step_avg:70.71ms +[2025-09-02 06:05:16] [Rank 0] step:601/10000 train_time:42498ms step_avg:70.71ms +[2025-09-02 06:05:17] [Rank 0] step:621/10000 train_time:43794ms step_avg:70.52ms +[2025-09-02 06:05:17] [Rank 0] step:621/10000 train_time:43794ms step_avg:70.52ms +[2025-09-02 06:05:18] [Rank 0] step:641/10000 train_time:45208ms step_avg:70.53ms +[2025-09-02 06:05:18] [Rank 0] step:641/10000 train_time:45208ms step_avg:70.53ms +[2025-09-02 06:05:20] [Rank 0] step:661/10000 train_time:46622ms step_avg:70.53ms +[2025-09-02 06:05:20] [Rank 0] step:661/10000 train_time:46622ms step_avg:70.53ms +[2025-09-02 06:05:21] [Rank 0] step:681/10000 train_time:48038ms step_avg:70.54ms +[2025-09-02 06:05:21] [Rank 0] step:681/10000 train_time:48038ms step_avg:70.54ms +[2025-09-02 06:05:23] [Rank 0] step:701/10000 train_time:49452ms step_avg:70.55ms +[2025-09-02 06:05:23] [Rank 0] step:701/10000 train_time:49452ms step_avg:70.55ms +[2025-09-02 06:05:24] [Rank 0] step:721/10000 train_time:50868ms step_avg:70.55ms +[2025-09-02 06:05:24] [Rank 0] step:721/10000 train_time:50868ms step_avg:70.55ms +[2025-09-02 06:05:25] [Rank 0] step:741/10000 train_time:52284ms step_avg:70.56ms +[2025-09-02 06:05:25] [Rank 0] step:741/10000 train_time:52284ms step_avg:70.56ms +[2025-09-02 06:05:27] [Rank 0] step:761/10000 train_time:53711ms step_avg:70.58ms +[2025-09-02 06:05:27] [Rank 0] step:761/10000 train_time:53711ms step_avg:70.58ms +[2025-09-02 06:05:28] [Rank 0] step:781/10000 train_time:55139ms step_avg:70.60ms +[2025-09-02 06:05:28] [Rank 0] step:781/10000 train_time:55139ms step_avg:70.60ms +[2025-09-02 06:05:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:05:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:05:42] [Rank 0] PRINT: step:800/10000 val_loss:5.5038 svd_entropy: attn_qk:H=0.5952,top10E=0.53,eRank=93.2,q75/q25=16.26 attn_vo:H=0.5825,top10E=0.43,eRank=100.2,q75/q25=inf mlp_w1:H=0.5151,top10E=0.61,eRank=46.5,q75/q25=4.25 mlp_w2:H=0.6635,top10E=0.40,eRank=83.5,q75/q25=9.19 vo_prod:H=0.4249,top10E=0.65,eRank=24.6,q75/q25=inf train_time:56711ms step_avg:70.89ms +[2025-09-02 06:05:42] [Rank 0] PRINT: step:800/10000 val_loss:5.5038 svd_entropy: attn_qk:H=0.5952,top10E=0.53,eRank=93.2,q75/q25=16.26 attn_vo:H=0.5825,top10E=0.43,eRank=100.2,q75/q25=inf mlp_w1:H=0.5151,top10E=0.61,eRank=46.5,q75/q25=4.25 mlp_w2:H=0.6635,top10E=0.40,eRank=83.5,q75/q25=9.19 vo_prod:H=0.4249,top10E=0.65,eRank=24.6,q75/q25=inf train_time:56711ms step_avg:70.89ms +[2025-09-02 06:05:42] [Rank 0] step:801/10000 train_time:56723ms step_avg:70.82ms +[2025-09-02 06:05:42] [Rank 0] step:801/10000 train_time:56723ms step_avg:70.82ms +[2025-09-02 06:05:43] [Rank 0] step:821/10000 train_time:58014ms step_avg:70.66ms +[2025-09-02 06:05:43] [Rank 0] step:821/10000 train_time:58014ms step_avg:70.66ms +[2025-09-02 06:05:45] [Rank 0] step:841/10000 train_time:59439ms step_avg:70.68ms +[2025-09-02 06:05:45] [Rank 0] step:841/10000 train_time:59439ms step_avg:70.68ms +[2025-09-02 06:05:46] [Rank 0] step:861/10000 train_time:60867ms step_avg:70.69ms +[2025-09-02 06:05:46] [Rank 0] step:861/10000 train_time:60867ms step_avg:70.69ms +[2025-09-02 06:05:47] [Rank 0] step:881/10000 train_time:62294ms step_avg:70.71ms +[2025-09-02 06:05:47] [Rank 0] step:881/10000 train_time:62294ms step_avg:70.71ms +[2025-09-02 06:05:49] [Rank 0] step:901/10000 train_time:63740ms step_avg:70.74ms +[2025-09-02 06:05:49] [Rank 0] step:901/10000 train_time:63740ms step_avg:70.74ms +[2025-09-02 06:05:50] [Rank 0] step:921/10000 train_time:65168ms step_avg:70.76ms +[2025-09-02 06:05:50] [Rank 0] step:921/10000 train_time:65168ms step_avg:70.76ms +[2025-09-02 06:05:52] [Rank 0] step:941/10000 train_time:66597ms step_avg:70.77ms +[2025-09-02 06:05:52] [Rank 0] step:941/10000 train_time:66597ms step_avg:70.77ms +[2025-09-02 06:05:53] [Rank 0] step:961/10000 train_time:68024ms step_avg:70.78ms +[2025-09-02 06:05:53] [Rank 0] step:961/10000 train_time:68024ms step_avg:70.78ms +[2025-09-02 06:05:55] [Rank 0] step:981/10000 train_time:69453ms step_avg:70.80ms +[2025-09-02 06:05:55] [Rank 0] step:981/10000 train_time:69453ms step_avg:70.80ms +[2025-09-02 06:05:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:05:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:06:08] [Rank 0] PRINT: step:1000/10000 val_loss:5.3352 svd_entropy: attn_qk:H=0.6134,top10E=0.49,eRank=98.3,q75/q25=18.32 attn_vo:H=0.6053,top10E=0.40,eRank=111.5,q75/q25=inf mlp_w1:H=0.5478,top10E=0.57,eRank=55.0,q75/q25=4.64 mlp_w2:H=0.6981,top10E=0.35,eRank=105.3,q75/q25=9.27 vo_prod:H=0.4548,top10E=0.59,eRank=29.8,q75/q25=inf train_time:71025ms step_avg:71.03ms +[2025-09-02 06:06:08] [Rank 0] PRINT: step:1000/10000 val_loss:5.3352 svd_entropy: attn_qk:H=0.6134,top10E=0.49,eRank=98.3,q75/q25=18.32 attn_vo:H=0.6053,top10E=0.40,eRank=111.5,q75/q25=inf mlp_w1:H=0.5478,top10E=0.57,eRank=55.0,q75/q25=4.64 mlp_w2:H=0.6981,top10E=0.35,eRank=105.3,q75/q25=9.27 vo_prod:H=0.4548,top10E=0.59,eRank=29.8,q75/q25=inf train_time:71025ms step_avg:71.03ms +[2025-09-02 06:06:08] [Rank 0] step:1001/10000 train_time:71037ms step_avg:70.97ms +[2025-09-02 06:06:08] [Rank 0] step:1001/10000 train_time:71037ms step_avg:70.97ms +[2025-09-02 06:06:09] [Rank 0] step:1021/10000 train_time:72351ms step_avg:70.86ms +[2025-09-02 06:06:09] [Rank 0] step:1021/10000 train_time:72351ms step_avg:70.86ms +[2025-09-02 06:06:11] [Rank 0] step:1041/10000 train_time:73779ms step_avg:70.87ms +[2025-09-02 06:06:11] [Rank 0] step:1041/10000 train_time:73779ms step_avg:70.87ms +[2025-09-02 06:06:12] [Rank 0] step:1061/10000 train_time:75209ms step_avg:70.88ms +[2025-09-02 06:06:12] [Rank 0] step:1061/10000 train_time:75209ms step_avg:70.88ms +[2025-09-02 06:06:13] [Rank 0] step:1081/10000 train_time:76637ms step_avg:70.89ms +[2025-09-02 06:06:13] [Rank 0] step:1081/10000 train_time:76637ms step_avg:70.89ms +[2025-09-02 06:06:15] [Rank 0] step:1101/10000 train_time:78066ms step_avg:70.90ms +[2025-09-02 06:06:15] [Rank 0] step:1101/10000 train_time:78066ms step_avg:70.90ms +[2025-09-02 06:06:16] [Rank 0] step:1121/10000 train_time:79495ms step_avg:70.91ms +[2025-09-02 06:06:16] [Rank 0] step:1121/10000 train_time:79495ms step_avg:70.91ms +[2025-09-02 06:06:18] [Rank 0] step:1141/10000 train_time:80925ms step_avg:70.92ms +[2025-09-02 06:06:18] [Rank 0] step:1141/10000 train_time:80925ms step_avg:70.92ms +[2025-09-02 06:06:19] [Rank 0] step:1161/10000 train_time:82354ms step_avg:70.93ms +[2025-09-02 06:06:19] [Rank 0] step:1161/10000 train_time:82354ms step_avg:70.93ms +[2025-09-02 06:06:21] [Rank 0] step:1181/10000 train_time:83785ms step_avg:70.94ms +[2025-09-02 06:06:21] [Rank 0] step:1181/10000 train_time:83785ms step_avg:70.94ms +[2025-09-02 06:06:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:06:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:06:33] [Rank 0] PRINT: step:1200/10000 val_loss:5.1657 svd_entropy: attn_qk:H=0.6286,top10E=0.46,eRank=103.2,q75/q25=20.93 attn_vo:H=0.6255,top10E=0.38,eRank=123.5,q75/q25=inf mlp_w1:H=0.5728,top10E=0.54,eRank=62.4,q75/q25=5.05 mlp_w2:H=0.7238,top10E=0.31,eRank=125.2,q75/q25=10.02 vo_prod:H=0.4783,top10E=0.55,eRank=34.6,q75/q25=inf train_time:85358ms step_avg:71.13ms +[2025-09-02 06:06:33] [Rank 0] PRINT: step:1200/10000 val_loss:5.1657 svd_entropy: attn_qk:H=0.6286,top10E=0.46,eRank=103.2,q75/q25=20.93 attn_vo:H=0.6255,top10E=0.38,eRank=123.5,q75/q25=inf mlp_w1:H=0.5728,top10E=0.54,eRank=62.4,q75/q25=5.05 mlp_w2:H=0.7238,top10E=0.31,eRank=125.2,q75/q25=10.02 vo_prod:H=0.4783,top10E=0.55,eRank=34.6,q75/q25=inf train_time:85358ms step_avg:71.13ms +[2025-09-02 06:06:34] [Rank 0] step:1201/10000 train_time:85371ms step_avg:71.08ms +[2025-09-02 06:06:34] [Rank 0] step:1201/10000 train_time:85371ms step_avg:71.08ms +[2025-09-02 06:06:35] [Rank 0] step:1221/10000 train_time:86661ms step_avg:70.98ms +[2025-09-02 06:06:35] [Rank 0] step:1221/10000 train_time:86661ms step_avg:70.98ms +[2025-09-02 06:06:36] [Rank 0] step:1241/10000 train_time:88088ms step_avg:70.98ms +[2025-09-02 06:06:36] [Rank 0] step:1241/10000 train_time:88088ms step_avg:70.98ms +[2025-09-02 06:06:38] [Rank 0] step:1261/10000 train_time:89516ms step_avg:70.99ms +[2025-09-02 06:06:38] [Rank 0] step:1261/10000 train_time:89516ms step_avg:70.99ms +[2025-09-02 06:06:39] [Rank 0] step:1281/10000 train_time:90945ms step_avg:70.99ms +[2025-09-02 06:06:39] [Rank 0] step:1281/10000 train_time:90945ms step_avg:70.99ms +[2025-09-02 06:06:41] [Rank 0] step:1301/10000 train_time:92373ms step_avg:71.00ms +[2025-09-02 06:06:41] [Rank 0] step:1301/10000 train_time:92373ms step_avg:71.00ms +[2025-09-02 06:06:42] [Rank 0] step:1321/10000 train_time:93803ms step_avg:71.01ms +[2025-09-02 06:06:42] [Rank 0] step:1321/10000 train_time:93803ms step_avg:71.01ms +[2025-09-02 06:06:44] [Rank 0] step:1341/10000 train_time:95232ms step_avg:71.02ms +[2025-09-02 06:06:44] [Rank 0] step:1341/10000 train_time:95232ms step_avg:71.02ms +[2025-09-02 06:06:45] [Rank 0] step:1361/10000 train_time:96661ms step_avg:71.02ms +[2025-09-02 06:06:45] [Rank 0] step:1361/10000 train_time:96661ms step_avg:71.02ms +[2025-09-02 06:06:46] [Rank 0] step:1381/10000 train_time:98092ms step_avg:71.03ms +[2025-09-02 06:06:46] [Rank 0] step:1381/10000 train_time:98092ms step_avg:71.03ms +[2025-09-02 06:06:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:06:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:06:59] [Rank 0] PRINT: step:1400/10000 val_loss:5.0340 svd_entropy: attn_qk:H=0.6415,top10E=0.44,eRank=108.0,q75/q25=24.67 attn_vo:H=0.6427,top10E=0.35,eRank=134.9,q75/q25=inf mlp_w1:H=0.5936,top10E=0.52,eRank=69.5,q75/q25=5.52 mlp_w2:H=0.7425,top10E=0.28,eRank=142.0,q75/q25=11.07 vo_prod:H=0.4968,top10E=0.51,eRank=39.0,q75/q25=inf train_time:99665ms step_avg:71.19ms +[2025-09-02 06:06:59] [Rank 0] PRINT: step:1400/10000 val_loss:5.0340 svd_entropy: attn_qk:H=0.6415,top10E=0.44,eRank=108.0,q75/q25=24.67 attn_vo:H=0.6427,top10E=0.35,eRank=134.9,q75/q25=inf mlp_w1:H=0.5936,top10E=0.52,eRank=69.5,q75/q25=5.52 mlp_w2:H=0.7425,top10E=0.28,eRank=142.0,q75/q25=11.07 vo_prod:H=0.4968,top10E=0.51,eRank=39.0,q75/q25=inf train_time:99665ms step_avg:71.19ms +[2025-09-02 06:06:59] [Rank 0] step:1401/10000 train_time:99677ms step_avg:71.15ms +[2025-09-02 06:06:59] [Rank 0] step:1401/10000 train_time:99677ms step_avg:71.15ms +[2025-09-02 06:07:01] [Rank 0] step:1421/10000 train_time:100978ms step_avg:71.06ms +[2025-09-02 06:07:01] [Rank 0] step:1421/10000 train_time:100978ms step_avg:71.06ms +[2025-09-02 06:07:02] [Rank 0] step:1441/10000 train_time:102407ms step_avg:71.07ms +[2025-09-02 06:07:02] [Rank 0] step:1441/10000 train_time:102407ms step_avg:71.07ms +[2025-09-02 06:07:04] [Rank 0] step:1461/10000 train_time:103835ms step_avg:71.07ms +[2025-09-02 06:07:04] [Rank 0] step:1461/10000 train_time:103835ms step_avg:71.07ms +[2025-09-02 06:07:05] [Rank 0] step:1481/10000 train_time:105263ms step_avg:71.08ms +[2025-09-02 06:07:05] [Rank 0] step:1481/10000 train_time:105263ms step_avg:71.08ms +[2025-09-02 06:07:07] [Rank 0] step:1501/10000 train_time:106700ms step_avg:71.09ms +[2025-09-02 06:07:07] [Rank 0] step:1501/10000 train_time:106700ms step_avg:71.09ms +[2025-09-02 06:07:08] [Rank 0] step:1521/10000 train_time:108139ms step_avg:71.10ms +[2025-09-02 06:07:08] [Rank 0] step:1521/10000 train_time:108139ms step_avg:71.10ms +[2025-09-02 06:07:10] [Rank 0] step:1541/10000 train_time:109579ms step_avg:71.11ms +[2025-09-02 06:07:10] [Rank 0] step:1541/10000 train_time:109579ms step_avg:71.11ms +[2025-09-02 06:07:11] [Rank 0] step:1561/10000 train_time:111019ms step_avg:71.12ms +[2025-09-02 06:07:11] [Rank 0] step:1561/10000 train_time:111019ms step_avg:71.12ms +[2025-09-02 06:07:12] [Rank 0] step:1581/10000 train_time:112459ms step_avg:71.13ms +[2025-09-02 06:07:12] [Rank 0] step:1581/10000 train_time:112459ms step_avg:71.13ms +[2025-09-02 06:07:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:07:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:07:25] [Rank 0] PRINT: step:1600/10000 val_loss:4.8861 svd_entropy: attn_qk:H=0.6524,top10E=0.42,eRank=112.0,q75/q25=29.08 attn_vo:H=0.6573,top10E=0.33,eRank=145.1,q75/q25=inf mlp_w1:H=0.6119,top10E=0.49,eRank=76.4,q75/q25=6.06 mlp_w2:H=0.7567,top10E=0.26,eRank=156.4,q75/q25=12.49 vo_prod:H=0.5124,top10E=0.48,eRank=43.2,q75/q25=inf train_time:114043ms step_avg:71.28ms +[2025-09-02 06:07:25] [Rank 0] PRINT: step:1600/10000 val_loss:4.8861 svd_entropy: attn_qk:H=0.6524,top10E=0.42,eRank=112.0,q75/q25=29.08 attn_vo:H=0.6573,top10E=0.33,eRank=145.1,q75/q25=inf mlp_w1:H=0.6119,top10E=0.49,eRank=76.4,q75/q25=6.06 mlp_w2:H=0.7567,top10E=0.26,eRank=156.4,q75/q25=12.49 vo_prod:H=0.5124,top10E=0.48,eRank=43.2,q75/q25=inf train_time:114043ms step_avg:71.28ms +[2025-09-02 06:07:25] [Rank 0] step:1601/10000 train_time:114056ms step_avg:71.24ms +[2025-09-02 06:07:25] [Rank 0] step:1601/10000 train_time:114056ms step_avg:71.24ms +[2025-09-02 06:07:27] [Rank 0] step:1621/10000 train_time:115374ms step_avg:71.17ms +[2025-09-02 06:07:27] [Rank 0] step:1621/10000 train_time:115374ms step_avg:71.17ms +[2025-09-02 06:07:28] [Rank 0] step:1641/10000 train_time:116813ms step_avg:71.18ms +[2025-09-02 06:07:28] [Rank 0] step:1641/10000 train_time:116813ms step_avg:71.18ms +[2025-09-02 06:07:30] [Rank 0] step:1661/10000 train_time:118254ms step_avg:71.19ms +[2025-09-02 06:07:30] [Rank 0] step:1661/10000 train_time:118254ms step_avg:71.19ms +[2025-09-02 06:07:31] [Rank 0] step:1681/10000 train_time:119692ms step_avg:71.20ms +[2025-09-02 06:07:31] [Rank 0] step:1681/10000 train_time:119692ms step_avg:71.20ms +[2025-09-02 06:07:33] [Rank 0] step:1701/10000 train_time:121130ms step_avg:71.21ms +[2025-09-02 06:07:33] [Rank 0] step:1701/10000 train_time:121130ms step_avg:71.21ms +[2025-09-02 06:07:34] [Rank 0] step:1721/10000 train_time:122569ms step_avg:71.22ms +[2025-09-02 06:07:34] [Rank 0] step:1721/10000 train_time:122569ms step_avg:71.22ms +[2025-09-02 06:07:36] [Rank 0] step:1741/10000 train_time:124008ms step_avg:71.23ms +[2025-09-02 06:07:36] [Rank 0] step:1741/10000 train_time:124008ms step_avg:71.23ms +[2025-09-02 06:07:37] [Rank 0] step:1761/10000 train_time:125447ms step_avg:71.24ms +[2025-09-02 06:07:37] [Rank 0] step:1761/10000 train_time:125447ms step_avg:71.24ms +[2025-09-02 06:07:38] [Rank 0] step:1781/10000 train_time:126887ms step_avg:71.24ms +[2025-09-02 06:07:38] [Rank 0] step:1781/10000 train_time:126887ms step_avg:71.24ms +[2025-09-02 06:07:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:07:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:07:51] [Rank 0] PRINT: step:1800/10000 val_loss:4.7755 svd_entropy: attn_qk:H=0.6614,top10E=0.41,eRank=115.7,q75/q25=33.43 attn_vo:H=0.6695,top10E=0.32,eRank=153.7,q75/q25=inf mlp_w1:H=0.6282,top10E=0.47,eRank=83.1,q75/q25=6.57 mlp_w2:H=0.7679,top10E=0.24,eRank=168.9,q75/q25=13.76 vo_prod:H=0.5253,top10E=0.46,eRank=47.1,q75/q25=inf train_time:128471ms step_avg:71.37ms +[2025-09-02 06:07:51] [Rank 0] PRINT: step:1800/10000 val_loss:4.7755 svd_entropy: attn_qk:H=0.6614,top10E=0.41,eRank=115.7,q75/q25=33.43 attn_vo:H=0.6695,top10E=0.32,eRank=153.7,q75/q25=inf mlp_w1:H=0.6282,top10E=0.47,eRank=83.1,q75/q25=6.57 mlp_w2:H=0.7679,top10E=0.24,eRank=168.9,q75/q25=13.76 vo_prod:H=0.5253,top10E=0.46,eRank=47.1,q75/q25=inf train_time:128471ms step_avg:71.37ms +[2025-09-02 06:07:52] [Rank 0] step:1801/10000 train_time:128483ms step_avg:71.34ms +[2025-09-02 06:07:52] [Rank 0] step:1801/10000 train_time:128483ms step_avg:71.34ms +[2025-09-02 06:07:53] [Rank 0] step:1821/10000 train_time:129795ms step_avg:71.28ms +[2025-09-02 06:07:53] [Rank 0] step:1821/10000 train_time:129795ms step_avg:71.28ms +[2025-09-02 06:07:54] [Rank 0] step:1841/10000 train_time:131233ms step_avg:71.28ms +[2025-09-02 06:07:54] [Rank 0] step:1841/10000 train_time:131233ms step_avg:71.28ms +[2025-09-02 06:07:56] [Rank 0] step:1861/10000 train_time:132671ms step_avg:71.29ms +[2025-09-02 06:07:56] [Rank 0] step:1861/10000 train_time:132671ms step_avg:71.29ms +[2025-09-02 06:07:57] [Rank 0] step:1881/10000 train_time:134110ms step_avg:71.30ms +[2025-09-02 06:07:57] [Rank 0] step:1881/10000 train_time:134110ms step_avg:71.30ms +[2025-09-02 06:07:59] [Rank 0] step:1901/10000 train_time:135550ms step_avg:71.30ms +[2025-09-02 06:07:59] [Rank 0] step:1901/10000 train_time:135550ms step_avg:71.30ms +[2025-09-02 06:08:00] [Rank 0] step:1921/10000 train_time:136990ms step_avg:71.31ms +[2025-09-02 06:08:00] [Rank 0] step:1921/10000 train_time:136990ms step_avg:71.31ms +[2025-09-02 06:08:02] [Rank 0] step:1941/10000 train_time:138430ms step_avg:71.32ms +[2025-09-02 06:08:02] [Rank 0] step:1941/10000 train_time:138430ms step_avg:71.32ms +[2025-09-02 06:08:03] [Rank 0] step:1961/10000 train_time:139870ms step_avg:71.33ms +[2025-09-02 06:08:03] [Rank 0] step:1961/10000 train_time:139870ms step_avg:71.33ms +[2025-09-02 06:08:05] [Rank 0] step:1981/10000 train_time:141310ms step_avg:71.33ms +[2025-09-02 06:08:05] [Rank 0] step:1981/10000 train_time:141310ms step_avg:71.33ms +[2025-09-02 06:08:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:08:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:08:18] [Rank 0] PRINT: step:2000/10000 val_loss:4.7034 svd_entropy: attn_qk:H=0.6697,top10E=0.39,eRank=119.4,q75/q25=38.06 attn_vo:H=0.6801,top10E=0.30,eRank=161.2,q75/q25=inf mlp_w1:H=0.6427,top10E=0.46,eRank=89.6,q75/q25=7.11 mlp_w2:H=0.7774,top10E=0.23,eRank=180.2,q75/q25=14.79 vo_prod:H=0.5370,top10E=0.43,eRank=51.0,q75/q25=inf train_time:142895ms step_avg:71.45ms +[2025-09-02 06:08:18] [Rank 0] PRINT: step:2000/10000 val_loss:4.7034 svd_entropy: attn_qk:H=0.6697,top10E=0.39,eRank=119.4,q75/q25=38.06 attn_vo:H=0.6801,top10E=0.30,eRank=161.2,q75/q25=inf mlp_w1:H=0.6427,top10E=0.46,eRank=89.6,q75/q25=7.11 mlp_w2:H=0.7774,top10E=0.23,eRank=180.2,q75/q25=14.79 vo_prod:H=0.5370,top10E=0.43,eRank=51.0,q75/q25=inf train_time:142895ms step_avg:71.45ms +[2025-09-02 06:08:18] [Rank 0] step:2001/10000 train_time:142908ms step_avg:71.42ms +[2025-09-02 06:08:18] [Rank 0] step:2001/10000 train_time:142908ms step_avg:71.42ms +[2025-09-02 06:08:19] [Rank 0] step:2021/10000 train_time:144216ms step_avg:71.36ms +[2025-09-02 06:08:19] [Rank 0] step:2021/10000 train_time:144216ms step_avg:71.36ms +[2025-09-02 06:08:21] [Rank 0] step:2041/10000 train_time:145774ms step_avg:71.42ms +[2025-09-02 06:08:21] [Rank 0] step:2041/10000 train_time:145774ms step_avg:71.42ms +[2025-09-02 06:08:22] [Rank 0] step:2061/10000 train_time:147214ms step_avg:71.43ms +[2025-09-02 06:08:22] [Rank 0] step:2061/10000 train_time:147214ms step_avg:71.43ms +[2025-09-02 06:08:23] [Rank 0] step:2081/10000 train_time:148653ms step_avg:71.43ms +[2025-09-02 06:08:23] [Rank 0] step:2081/10000 train_time:148653ms step_avg:71.43ms +[2025-09-02 06:08:25] [Rank 0] step:2101/10000 train_time:150092ms step_avg:71.44ms +[2025-09-02 06:08:25] [Rank 0] step:2101/10000 train_time:150092ms step_avg:71.44ms +[2025-09-02 06:08:26] [Rank 0] step:2121/10000 train_time:151533ms step_avg:71.44ms +[2025-09-02 06:08:26] [Rank 0] step:2121/10000 train_time:151533ms step_avg:71.44ms +[2025-09-02 06:08:28] [Rank 0] step:2141/10000 train_time:152974ms step_avg:71.45ms +[2025-09-02 06:08:28] [Rank 0] step:2141/10000 train_time:152974ms step_avg:71.45ms +[2025-09-02 06:08:29] [Rank 0] step:2161/10000 train_time:154415ms step_avg:71.46ms +[2025-09-02 06:08:29] [Rank 0] step:2161/10000 train_time:154415ms step_avg:71.46ms +[2025-09-02 06:08:31] [Rank 0] step:2181/10000 train_time:155856ms step_avg:71.46ms +[2025-09-02 06:08:31] [Rank 0] step:2181/10000 train_time:155856ms step_avg:71.46ms +[2025-09-02 06:08:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:08:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:08:44] [Rank 0] PRINT: step:2200/10000 val_loss:4.6217 svd_entropy: attn_qk:H=0.6768,top10E=0.38,eRank=122.7,q75/q25=42.63 attn_vo:H=0.6889,top10E=0.29,eRank=167.5,q75/q25=inf mlp_w1:H=0.6554,top10E=0.44,eRank=95.9,q75/q25=7.62 mlp_w2:H=0.7856,top10E=0.22,eRank=190.4,q75/q25=15.56 vo_prod:H=0.5468,top10E=0.41,eRank=54.6,q75/q25=inf train_time:157442ms step_avg:71.56ms +[2025-09-02 06:08:44] [Rank 0] PRINT: step:2200/10000 val_loss:4.6217 svd_entropy: attn_qk:H=0.6768,top10E=0.38,eRank=122.7,q75/q25=42.63 attn_vo:H=0.6889,top10E=0.29,eRank=167.5,q75/q25=inf mlp_w1:H=0.6554,top10E=0.44,eRank=95.9,q75/q25=7.62 mlp_w2:H=0.7856,top10E=0.22,eRank=190.4,q75/q25=15.56 vo_prod:H=0.5468,top10E=0.41,eRank=54.6,q75/q25=inf train_time:157442ms step_avg:71.56ms +[2025-09-02 06:08:44] [Rank 0] step:2201/10000 train_time:157454ms step_avg:71.54ms +[2025-09-02 06:08:44] [Rank 0] step:2201/10000 train_time:157454ms step_avg:71.54ms +[2025-09-02 06:08:45] [Rank 0] step:2221/10000 train_time:158768ms step_avg:71.48ms +[2025-09-02 06:08:45] [Rank 0] step:2221/10000 train_time:158768ms step_avg:71.48ms +[2025-09-02 06:08:47] [Rank 0] step:2241/10000 train_time:160239ms step_avg:71.50ms +[2025-09-02 06:08:47] [Rank 0] step:2241/10000 train_time:160239ms step_avg:71.50ms +[2025-09-02 06:08:48] [Rank 0] step:2261/10000 train_time:161722ms step_avg:71.53ms +[2025-09-02 06:08:48] [Rank 0] step:2261/10000 train_time:161722ms step_avg:71.53ms +[2025-09-02 06:08:50] [Rank 0] step:2281/10000 train_time:163206ms step_avg:71.55ms +[2025-09-02 06:08:50] [Rank 0] step:2281/10000 train_time:163206ms step_avg:71.55ms +[2025-09-02 06:08:51] [Rank 0] step:2301/10000 train_time:164689ms step_avg:71.57ms +[2025-09-02 06:08:51] [Rank 0] step:2301/10000 train_time:164689ms step_avg:71.57ms +[2025-09-02 06:08:53] [Rank 0] step:2321/10000 train_time:166171ms step_avg:71.59ms +[2025-09-02 06:08:53] [Rank 0] step:2321/10000 train_time:166171ms step_avg:71.59ms +[2025-09-02 06:08:54] [Rank 0] step:2341/10000 train_time:167656ms step_avg:71.62ms +[2025-09-02 06:08:54] [Rank 0] step:2341/10000 train_time:167656ms step_avg:71.62ms +[2025-09-02 06:08:56] [Rank 0] step:2361/10000 train_time:169139ms step_avg:71.64ms +[2025-09-02 06:08:56] [Rank 0] step:2361/10000 train_time:169139ms step_avg:71.64ms +[2025-09-02 06:08:57] [Rank 0] step:2381/10000 train_time:170623ms step_avg:71.66ms +[2025-09-02 06:08:57] [Rank 0] step:2381/10000 train_time:170623ms step_avg:71.66ms +[2025-09-02 06:08:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:08:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:09:10] [Rank 0] PRINT: step:2400/10000 val_loss:4.5434 svd_entropy: attn_qk:H=0.6824,top10E=0.37,eRank=125.4,q75/q25=47.42 attn_vo:H=0.6969,top10E=0.28,eRank=173.4,q75/q25=inf mlp_w1:H=0.6673,top10E=0.42,eRank=102.3,q75/q25=8.13 mlp_w2:H=0.7929,top10E=0.21,eRank=199.9,q75/q25=16.29 vo_prod:H=0.5557,top10E=0.40,eRank=58.0,q75/q25=inf train_time:172290ms step_avg:71.79ms +[2025-09-02 06:09:10] [Rank 0] PRINT: step:2400/10000 val_loss:4.5434 svd_entropy: attn_qk:H=0.6824,top10E=0.37,eRank=125.4,q75/q25=47.42 attn_vo:H=0.6969,top10E=0.28,eRank=173.4,q75/q25=inf mlp_w1:H=0.6673,top10E=0.42,eRank=102.3,q75/q25=8.13 mlp_w2:H=0.7929,top10E=0.21,eRank=199.9,q75/q25=16.29 vo_prod:H=0.5557,top10E=0.40,eRank=58.0,q75/q25=inf train_time:172290ms step_avg:71.79ms +[2025-09-02 06:09:10] [Rank 0] step:2401/10000 train_time:172302ms step_avg:71.76ms +[2025-09-02 06:09:10] [Rank 0] step:2401/10000 train_time:172302ms step_avg:71.76ms +[2025-09-02 06:09:12] [Rank 0] step:2421/10000 train_time:173653ms step_avg:71.73ms +[2025-09-02 06:09:12] [Rank 0] step:2421/10000 train_time:173653ms step_avg:71.73ms +[2025-09-02 06:09:13] [Rank 0] step:2441/10000 train_time:175135ms step_avg:71.75ms +[2025-09-02 06:09:13] [Rank 0] step:2441/10000 train_time:175135ms step_avg:71.75ms +[2025-09-02 06:09:15] [Rank 0] step:2461/10000 train_time:176617ms step_avg:71.77ms +[2025-09-02 06:09:15] [Rank 0] step:2461/10000 train_time:176617ms step_avg:71.77ms +[2025-09-02 06:09:16] [Rank 0] step:2481/10000 train_time:178099ms step_avg:71.79ms +[2025-09-02 06:09:16] [Rank 0] step:2481/10000 train_time:178099ms step_avg:71.79ms +[2025-09-02 06:09:18] [Rank 0] step:2501/10000 train_time:179583ms step_avg:71.80ms +[2025-09-02 06:09:18] [Rank 0] step:2501/10000 train_time:179583ms step_avg:71.80ms +[2025-09-02 06:09:19] [Rank 0] step:2521/10000 train_time:181068ms step_avg:71.82ms +[2025-09-02 06:09:19] [Rank 0] step:2521/10000 train_time:181068ms step_avg:71.82ms +[2025-09-02 06:09:21] [Rank 0] step:2541/10000 train_time:182550ms step_avg:71.84ms +[2025-09-02 06:09:21] [Rank 0] step:2541/10000 train_time:182550ms step_avg:71.84ms +[2025-09-02 06:09:22] [Rank 0] step:2561/10000 train_time:184033ms step_avg:71.86ms +[2025-09-02 06:09:22] [Rank 0] step:2561/10000 train_time:184033ms step_avg:71.86ms +[2025-09-02 06:09:24] [Rank 0] step:2581/10000 train_time:185517ms step_avg:71.88ms +[2025-09-02 06:09:24] [Rank 0] step:2581/10000 train_time:185517ms step_avg:71.88ms +[2025-09-02 06:09:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:09:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:09:37] [Rank 0] PRINT: step:2600/10000 val_loss:4.4787 svd_entropy: attn_qk:H=0.6881,top10E=0.36,eRank=128.3,q75/q25=51.92 attn_vo:H=0.7040,top10E=0.27,eRank=178.8,q75/q25=inf mlp_w1:H=0.6786,top10E=0.41,eRank=108.7,q75/q25=8.53 mlp_w2:H=0.8006,top10E=0.20,eRank=210.3,q75/q25=16.46 vo_prod:H=0.5640,top10E=0.38,eRank=61.4,q75/q25=inf train_time:187150ms step_avg:71.98ms +[2025-09-02 06:09:37] [Rank 0] PRINT: step:2600/10000 val_loss:4.4787 svd_entropy: attn_qk:H=0.6881,top10E=0.36,eRank=128.3,q75/q25=51.92 attn_vo:H=0.7040,top10E=0.27,eRank=178.8,q75/q25=inf mlp_w1:H=0.6786,top10E=0.41,eRank=108.7,q75/q25=8.53 mlp_w2:H=0.8006,top10E=0.20,eRank=210.3,q75/q25=16.46 vo_prod:H=0.5640,top10E=0.38,eRank=61.4,q75/q25=inf train_time:187150ms step_avg:71.98ms +[2025-09-02 06:09:37] [Rank 0] step:2601/10000 train_time:187162ms step_avg:71.96ms +[2025-09-02 06:09:37] [Rank 0] step:2601/10000 train_time:187162ms step_avg:71.96ms +[2025-09-02 06:09:38] [Rank 0] step:2621/10000 train_time:188507ms step_avg:71.92ms +[2025-09-02 06:09:38] [Rank 0] step:2621/10000 train_time:188507ms step_avg:71.92ms +[2025-09-02 06:09:40] [Rank 0] step:2641/10000 train_time:189989ms step_avg:71.94ms +[2025-09-02 06:09:40] [Rank 0] step:2641/10000 train_time:189989ms step_avg:71.94ms +[2025-09-02 06:09:41] [Rank 0] step:2661/10000 train_time:191471ms step_avg:71.95ms +[2025-09-02 06:09:41] [Rank 0] step:2661/10000 train_time:191471ms step_avg:71.95ms +[2025-09-02 06:09:43] [Rank 0] step:2681/10000 train_time:192955ms step_avg:71.97ms +[2025-09-02 06:09:43] [Rank 0] step:2681/10000 train_time:192955ms step_avg:71.97ms +[2025-09-02 06:09:44] [Rank 0] step:2701/10000 train_time:194439ms step_avg:71.99ms +[2025-09-02 06:09:44] [Rank 0] step:2701/10000 train_time:194439ms step_avg:71.99ms +[2025-09-02 06:09:46] [Rank 0] step:2721/10000 train_time:195922ms step_avg:72.00ms +[2025-09-02 06:09:46] [Rank 0] step:2721/10000 train_time:195922ms step_avg:72.00ms +[2025-09-02 06:09:47] [Rank 0] step:2741/10000 train_time:197406ms step_avg:72.02ms +[2025-09-02 06:09:47] [Rank 0] step:2741/10000 train_time:197406ms step_avg:72.02ms +[2025-09-02 06:09:49] [Rank 0] step:2761/10000 train_time:198891ms step_avg:72.04ms +[2025-09-02 06:09:49] [Rank 0] step:2761/10000 train_time:198891ms step_avg:72.04ms +[2025-09-02 06:09:50] [Rank 0] step:2781/10000 train_time:200375ms step_avg:72.05ms +[2025-09-02 06:09:50] [Rank 0] step:2781/10000 train_time:200375ms step_avg:72.05ms +[2025-09-02 06:09:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:09:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:10:03] [Rank 0] PRINT: step:2800/10000 val_loss:4.4358 svd_entropy: attn_qk:H=0.6936,top10E=0.35,eRank=131.2,q75/q25=56.04 attn_vo:H=0.7104,top10E=0.26,eRank=183.8,q75/q25=inf mlp_w1:H=0.6885,top10E=0.40,eRank=114.9,q75/q25=8.87 mlp_w2:H=0.8071,top10E=0.19,eRank=219.7,q75/q25=16.62 vo_prod:H=0.5717,top10E=0.37,eRank=64.7,q75/q25=inf train_time:202010ms step_avg:72.15ms +[2025-09-02 06:10:03] [Rank 0] PRINT: step:2800/10000 val_loss:4.4358 svd_entropy: attn_qk:H=0.6936,top10E=0.35,eRank=131.2,q75/q25=56.04 attn_vo:H=0.7104,top10E=0.26,eRank=183.8,q75/q25=inf mlp_w1:H=0.6885,top10E=0.40,eRank=114.9,q75/q25=8.87 mlp_w2:H=0.8071,top10E=0.19,eRank=219.7,q75/q25=16.62 vo_prod:H=0.5717,top10E=0.37,eRank=64.7,q75/q25=inf train_time:202010ms step_avg:72.15ms +[2025-09-02 06:10:03] [Rank 0] step:2801/10000 train_time:202022ms step_avg:72.12ms +[2025-09-02 06:10:03] [Rank 0] step:2801/10000 train_time:202022ms step_avg:72.12ms +[2025-09-02 06:10:05] [Rank 0] step:2821/10000 train_time:203360ms step_avg:72.09ms +[2025-09-02 06:10:05] [Rank 0] step:2821/10000 train_time:203360ms step_avg:72.09ms +[2025-09-02 06:10:06] [Rank 0] step:2841/10000 train_time:204845ms step_avg:72.10ms +[2025-09-02 06:10:06] [Rank 0] step:2841/10000 train_time:204845ms step_avg:72.10ms +[2025-09-02 06:10:08] [Rank 0] step:2861/10000 train_time:206330ms step_avg:72.12ms +[2025-09-02 06:10:08] [Rank 0] step:2861/10000 train_time:206330ms step_avg:72.12ms +[2025-09-02 06:10:09] [Rank 0] step:2881/10000 train_time:207815ms step_avg:72.13ms +[2025-09-02 06:10:09] [Rank 0] step:2881/10000 train_time:207815ms step_avg:72.13ms +[2025-09-02 06:10:11] [Rank 0] step:2901/10000 train_time:209300ms step_avg:72.15ms +[2025-09-02 06:10:11] [Rank 0] step:2901/10000 train_time:209300ms step_avg:72.15ms +[2025-09-02 06:10:12] [Rank 0] step:2921/10000 train_time:210786ms step_avg:72.16ms +[2025-09-02 06:10:12] [Rank 0] step:2921/10000 train_time:210786ms step_avg:72.16ms +[2025-09-02 06:10:14] [Rank 0] step:2941/10000 train_time:212272ms step_avg:72.18ms +[2025-09-02 06:10:14] [Rank 0] step:2941/10000 train_time:212272ms step_avg:72.18ms +[2025-09-02 06:10:15] [Rank 0] step:2961/10000 train_time:213758ms step_avg:72.19ms +[2025-09-02 06:10:15] [Rank 0] step:2961/10000 train_time:213758ms step_avg:72.19ms +[2025-09-02 06:10:17] [Rank 0] step:2981/10000 train_time:215250ms step_avg:72.21ms +[2025-09-02 06:10:17] [Rank 0] step:2981/10000 train_time:215250ms step_avg:72.21ms +[2025-09-02 06:10:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:10:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:10:30] [Rank 0] PRINT: step:3000/10000 val_loss:4.3865 svd_entropy: attn_qk:H=0.6983,top10E=0.35,eRank=133.9,q75/q25=59.44 attn_vo:H=0.7162,top10E=0.25,eRank=188.4,q75/q25=inf mlp_w1:H=0.6969,top10E=0.39,eRank=120.6,q75/q25=9.31 mlp_w2:H=0.8124,top10E=0.19,eRank=227.4,q75/q25=16.84 vo_prod:H=0.5784,top10E=0.36,eRank=67.7,q75/q25=inf train_time:216894ms step_avg:72.30ms +[2025-09-02 06:10:30] [Rank 0] PRINT: step:3000/10000 val_loss:4.3865 svd_entropy: attn_qk:H=0.6983,top10E=0.35,eRank=133.9,q75/q25=59.44 attn_vo:H=0.7162,top10E=0.25,eRank=188.4,q75/q25=inf mlp_w1:H=0.6969,top10E=0.39,eRank=120.6,q75/q25=9.31 mlp_w2:H=0.8124,top10E=0.19,eRank=227.4,q75/q25=16.84 vo_prod:H=0.5784,top10E=0.36,eRank=67.7,q75/q25=inf train_time:216894ms step_avg:72.30ms +[2025-09-02 06:10:30] [Rank 0] step:3001/10000 train_time:216906ms step_avg:72.28ms +[2025-09-02 06:10:30] [Rank 0] step:3001/10000 train_time:216906ms step_avg:72.28ms +[2025-09-02 06:10:31] [Rank 0] step:3021/10000 train_time:218271ms step_avg:72.25ms +[2025-09-02 06:10:31] [Rank 0] step:3021/10000 train_time:218271ms step_avg:72.25ms +[2025-09-02 06:10:33] [Rank 0] step:3041/10000 train_time:219761ms step_avg:72.27ms +[2025-09-02 06:10:33] [Rank 0] step:3041/10000 train_time:219761ms step_avg:72.27ms +[2025-09-02 06:10:34] [Rank 0] step:3061/10000 train_time:221253ms step_avg:72.28ms +[2025-09-02 06:10:34] [Rank 0] step:3061/10000 train_time:221253ms step_avg:72.28ms +[2025-09-02 06:10:36] [Rank 0] step:3081/10000 train_time:222745ms step_avg:72.30ms +[2025-09-02 06:10:36] [Rank 0] step:3081/10000 train_time:222745ms step_avg:72.30ms +[2025-09-02 06:10:37] [Rank 0] step:3101/10000 train_time:224237ms step_avg:72.31ms +[2025-09-02 06:10:37] [Rank 0] step:3101/10000 train_time:224237ms step_avg:72.31ms +[2025-09-02 06:10:39] [Rank 0] step:3121/10000 train_time:225730ms step_avg:72.33ms +[2025-09-02 06:10:39] [Rank 0] step:3121/10000 train_time:225730ms step_avg:72.33ms +[2025-09-02 06:10:40] [Rank 0] step:3141/10000 train_time:227222ms step_avg:72.34ms +[2025-09-02 06:10:40] [Rank 0] step:3141/10000 train_time:227222ms step_avg:72.34ms +[2025-09-02 06:10:42] [Rank 0] step:3161/10000 train_time:228715ms step_avg:72.36ms +[2025-09-02 06:10:42] [Rank 0] step:3161/10000 train_time:228715ms step_avg:72.36ms +[2025-09-02 06:10:43] [Rank 0] step:3181/10000 train_time:230209ms step_avg:72.37ms +[2025-09-02 06:10:43] [Rank 0] step:3181/10000 train_time:230209ms step_avg:72.37ms +[2025-09-02 06:10:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:10:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:10:56] [Rank 0] PRINT: step:3200/10000 val_loss:4.3521 svd_entropy: attn_qk:H=0.7030,top10E=0.34,eRank=136.6,q75/q25=62.97 attn_vo:H=0.7213,top10E=0.24,eRank=192.7,q75/q25=inf mlp_w1:H=0.7049,top10E=0.38,eRank=126.2,q75/q25=9.67 mlp_w2:H=0.8173,top10E=0.18,eRank=234.8,q75/q25=16.99 vo_prod:H=0.5843,top10E=0.35,eRank=70.6,q75/q25=inf train_time:231853ms step_avg:72.45ms +[2025-09-02 06:10:56] [Rank 0] PRINT: step:3200/10000 val_loss:4.3521 svd_entropy: attn_qk:H=0.7030,top10E=0.34,eRank=136.6,q75/q25=62.97 attn_vo:H=0.7213,top10E=0.24,eRank=192.7,q75/q25=inf mlp_w1:H=0.7049,top10E=0.38,eRank=126.2,q75/q25=9.67 mlp_w2:H=0.8173,top10E=0.18,eRank=234.8,q75/q25=16.99 vo_prod:H=0.5843,top10E=0.35,eRank=70.6,q75/q25=inf train_time:231853ms step_avg:72.45ms +[2025-09-02 06:10:57] [Rank 0] step:3201/10000 train_time:231865ms step_avg:72.44ms +[2025-09-02 06:10:57] [Rank 0] step:3201/10000 train_time:231865ms step_avg:72.44ms +[2025-09-02 06:10:58] [Rank 0] step:3221/10000 train_time:233215ms step_avg:72.40ms +[2025-09-02 06:10:58] [Rank 0] step:3221/10000 train_time:233215ms step_avg:72.40ms +[2025-09-02 06:11:00] [Rank 0] step:3241/10000 train_time:234706ms step_avg:72.42ms +[2025-09-02 06:11:00] [Rank 0] step:3241/10000 train_time:234706ms step_avg:72.42ms +[2025-09-02 06:11:01] [Rank 0] step:3261/10000 train_time:236196ms step_avg:72.43ms +[2025-09-02 06:11:01] [Rank 0] step:3261/10000 train_time:236196ms step_avg:72.43ms +[2025-09-02 06:11:03] [Rank 0] step:3281/10000 train_time:237689ms step_avg:72.44ms +[2025-09-02 06:11:03] [Rank 0] step:3281/10000 train_time:237689ms step_avg:72.44ms +[2025-09-02 06:11:04] [Rank 0] step:3301/10000 train_time:239181ms step_avg:72.46ms +[2025-09-02 06:11:04] [Rank 0] step:3301/10000 train_time:239181ms step_avg:72.46ms +[2025-09-02 06:11:05] [Rank 0] step:3321/10000 train_time:240674ms step_avg:72.47ms +[2025-09-02 06:11:05] [Rank 0] step:3321/10000 train_time:240674ms step_avg:72.47ms +[2025-09-02 06:11:07] [Rank 0] step:3341/10000 train_time:242166ms step_avg:72.48ms +[2025-09-02 06:11:07] [Rank 0] step:3341/10000 train_time:242166ms step_avg:72.48ms +[2025-09-02 06:11:08] [Rank 0] step:3361/10000 train_time:243658ms step_avg:72.50ms +[2025-09-02 06:11:08] [Rank 0] step:3361/10000 train_time:243658ms step_avg:72.50ms +[2025-09-02 06:11:10] [Rank 0] step:3381/10000 train_time:245151ms step_avg:72.51ms +[2025-09-02 06:11:10] [Rank 0] step:3381/10000 train_time:245151ms step_avg:72.51ms +[2025-09-02 06:11:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:11:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:11:23] [Rank 0] PRINT: step:3400/10000 val_loss:4.3059 svd_entropy: attn_qk:H=0.7074,top10E=0.33,eRank=139.1,q75/q25=66.08 attn_vo:H=0.7263,top10E=0.23,eRank=197.0,q75/q25=inf mlp_w1:H=0.7123,top10E=0.37,eRank=131.8,q75/q25=10.06 mlp_w2:H=0.8214,top10E=0.18,eRank=241.4,q75/q25=17.29 vo_prod:H=0.5903,top10E=0.34,eRank=73.6,q75/q25=inf train_time:246794ms step_avg:72.59ms +[2025-09-02 06:11:23] [Rank 0] PRINT: step:3400/10000 val_loss:4.3059 svd_entropy: attn_qk:H=0.7074,top10E=0.33,eRank=139.1,q75/q25=66.08 attn_vo:H=0.7263,top10E=0.23,eRank=197.0,q75/q25=inf mlp_w1:H=0.7123,top10E=0.37,eRank=131.8,q75/q25=10.06 mlp_w2:H=0.8214,top10E=0.18,eRank=241.4,q75/q25=17.29 vo_prod:H=0.5903,top10E=0.34,eRank=73.6,q75/q25=inf train_time:246794ms step_avg:72.59ms +[2025-09-02 06:11:23] [Rank 0] step:3401/10000 train_time:246807ms step_avg:72.57ms +[2025-09-02 06:11:23] [Rank 0] step:3401/10000 train_time:246807ms step_avg:72.57ms +[2025-09-02 06:11:25] [Rank 0] step:3421/10000 train_time:248156ms step_avg:72.54ms +[2025-09-02 06:11:25] [Rank 0] step:3421/10000 train_time:248156ms step_avg:72.54ms +[2025-09-02 06:11:26] [Rank 0] step:3441/10000 train_time:249647ms step_avg:72.55ms +[2025-09-02 06:11:26] [Rank 0] step:3441/10000 train_time:249647ms step_avg:72.55ms +[2025-09-02 06:11:28] [Rank 0] step:3461/10000 train_time:251139ms step_avg:72.56ms +[2025-09-02 06:11:28] [Rank 0] step:3461/10000 train_time:251139ms step_avg:72.56ms +[2025-09-02 06:11:29] [Rank 0] step:3481/10000 train_time:252629ms step_avg:72.57ms +[2025-09-02 06:11:29] [Rank 0] step:3481/10000 train_time:252629ms step_avg:72.57ms +[2025-09-02 06:11:31] [Rank 0] step:3501/10000 train_time:254121ms step_avg:72.59ms +[2025-09-02 06:11:31] [Rank 0] step:3501/10000 train_time:254121ms step_avg:72.59ms +[2025-09-02 06:11:32] [Rank 0] step:3521/10000 train_time:255615ms step_avg:72.60ms +[2025-09-02 06:11:32] [Rank 0] step:3521/10000 train_time:255615ms step_avg:72.60ms +[2025-09-02 06:11:34] [Rank 0] step:3541/10000 train_time:257107ms step_avg:72.61ms +[2025-09-02 06:11:34] [Rank 0] step:3541/10000 train_time:257107ms step_avg:72.61ms +[2025-09-02 06:11:35] [Rank 0] step:3561/10000 train_time:258599ms step_avg:72.62ms +[2025-09-02 06:11:35] [Rank 0] step:3561/10000 train_time:258599ms step_avg:72.62ms +[2025-09-02 06:11:37] [Rank 0] step:3581/10000 train_time:260092ms step_avg:72.63ms +[2025-09-02 06:11:37] [Rank 0] step:3581/10000 train_time:260092ms step_avg:72.63ms +[2025-09-02 06:11:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:11:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:11:50] [Rank 0] PRINT: step:3600/10000 val_loss:4.2901 svd_entropy: attn_qk:H=0.7115,top10E=0.32,eRank=141.8,q75/q25=68.98 attn_vo:H=0.7306,top10E=0.23,eRank=200.9,q75/q25=inf mlp_w1:H=0.7191,top10E=0.36,eRank=137.1,q75/q25=10.48 mlp_w2:H=0.8251,top10E=0.17,eRank=247.3,q75/q25=17.48 vo_prod:H=0.5954,top10E=0.33,eRank=76.2,q75/q25=inf train_time:261737ms step_avg:72.70ms +[2025-09-02 06:11:50] [Rank 0] PRINT: step:3600/10000 val_loss:4.2901 svd_entropy: attn_qk:H=0.7115,top10E=0.32,eRank=141.8,q75/q25=68.98 attn_vo:H=0.7306,top10E=0.23,eRank=200.9,q75/q25=inf mlp_w1:H=0.7191,top10E=0.36,eRank=137.1,q75/q25=10.48 mlp_w2:H=0.8251,top10E=0.17,eRank=247.3,q75/q25=17.48 vo_prod:H=0.5954,top10E=0.33,eRank=76.2,q75/q25=inf train_time:261737ms step_avg:72.70ms +[2025-09-02 06:11:50] [Rank 0] step:3601/10000 train_time:261749ms step_avg:72.69ms +[2025-09-02 06:11:50] [Rank 0] step:3601/10000 train_time:261749ms step_avg:72.69ms +[2025-09-02 06:11:51] [Rank 0] step:3621/10000 train_time:263110ms step_avg:72.66ms +[2025-09-02 06:11:51] [Rank 0] step:3621/10000 train_time:263110ms step_avg:72.66ms +[2025-09-02 06:11:53] [Rank 0] step:3641/10000 train_time:264603ms step_avg:72.67ms +[2025-09-02 06:11:53] [Rank 0] step:3641/10000 train_time:264603ms step_avg:72.67ms +[2025-09-02 06:11:54] [Rank 0] step:3661/10000 train_time:266095ms step_avg:72.68ms +[2025-09-02 06:11:54] [Rank 0] step:3661/10000 train_time:266095ms step_avg:72.68ms +[2025-09-02 06:11:56] [Rank 0] step:3681/10000 train_time:267589ms step_avg:72.69ms +[2025-09-02 06:11:56] [Rank 0] step:3681/10000 train_time:267589ms step_avg:72.69ms +[2025-09-02 06:11:57] [Rank 0] step:3701/10000 train_time:269082ms step_avg:72.71ms +[2025-09-02 06:11:57] [Rank 0] step:3701/10000 train_time:269082ms step_avg:72.71ms +[2025-09-02 06:11:59] [Rank 0] step:3721/10000 train_time:270602ms step_avg:72.72ms +[2025-09-02 06:11:59] [Rank 0] step:3721/10000 train_time:270602ms step_avg:72.72ms +[2025-09-02 06:12:00] [Rank 0] step:3741/10000 train_time:272133ms step_avg:72.74ms +[2025-09-02 06:12:00] [Rank 0] step:3741/10000 train_time:272133ms step_avg:72.74ms +[2025-09-02 06:12:02] [Rank 0] step:3761/10000 train_time:273662ms step_avg:72.76ms +[2025-09-02 06:12:02] [Rank 0] step:3761/10000 train_time:273662ms step_avg:72.76ms +[2025-09-02 06:12:03] [Rank 0] step:3781/10000 train_time:275193ms step_avg:72.78ms +[2025-09-02 06:12:03] [Rank 0] step:3781/10000 train_time:275193ms step_avg:72.78ms +[2025-09-02 06:12:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:12:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:12:16] [Rank 0] PRINT: step:3800/10000 val_loss:4.2303 svd_entropy: attn_qk:H=0.7148,top10E=0.32,eRank=143.9,q75/q25=71.67 attn_vo:H=0.7348,top10E=0.22,eRank=204.7,q75/q25=inf mlp_w1:H=0.7253,top10E=0.35,eRank=142.2,q75/q25=10.87 mlp_w2:H=0.8284,top10E=0.17,eRank=252.7,q75/q25=17.71 vo_prod:H=0.6001,top10E=0.32,eRank=78.7,q75/q25=inf train_time:276877ms step_avg:72.86ms +[2025-09-02 06:12:16] [Rank 0] PRINT: step:3800/10000 val_loss:4.2303 svd_entropy: attn_qk:H=0.7148,top10E=0.32,eRank=143.9,q75/q25=71.67 attn_vo:H=0.7348,top10E=0.22,eRank=204.7,q75/q25=inf mlp_w1:H=0.7253,top10E=0.35,eRank=142.2,q75/q25=10.87 mlp_w2:H=0.8284,top10E=0.17,eRank=252.7,q75/q25=17.71 vo_prod:H=0.6001,top10E=0.32,eRank=78.7,q75/q25=inf train_time:276877ms step_avg:72.86ms +[2025-09-02 06:12:17] [Rank 0] step:3801/10000 train_time:276889ms step_avg:72.85ms +[2025-09-02 06:12:17] [Rank 0] step:3801/10000 train_time:276889ms step_avg:72.85ms +[2025-09-02 06:12:18] [Rank 0] step:3821/10000 train_time:278289ms step_avg:72.83ms +[2025-09-02 06:12:18] [Rank 0] step:3821/10000 train_time:278289ms step_avg:72.83ms +[2025-09-02 06:12:20] [Rank 0] step:3841/10000 train_time:279818ms step_avg:72.85ms +[2025-09-02 06:12:20] [Rank 0] step:3841/10000 train_time:279818ms step_avg:72.85ms +[2025-09-02 06:12:21] [Rank 0] step:3861/10000 train_time:281347ms step_avg:72.87ms +[2025-09-02 06:12:21] [Rank 0] step:3861/10000 train_time:281347ms step_avg:72.87ms +[2025-09-02 06:12:23] [Rank 0] step:3881/10000 train_time:282875ms step_avg:72.89ms +[2025-09-02 06:12:23] [Rank 0] step:3881/10000 train_time:282875ms step_avg:72.89ms +[2025-09-02 06:12:24] [Rank 0] step:3901/10000 train_time:284403ms step_avg:72.91ms +[2025-09-02 06:12:24] [Rank 0] step:3901/10000 train_time:284403ms step_avg:72.91ms +[2025-09-02 06:12:26] [Rank 0] step:3921/10000 train_time:285930ms step_avg:72.92ms +[2025-09-02 06:12:26] [Rank 0] step:3921/10000 train_time:285930ms step_avg:72.92ms +[2025-09-02 06:12:27] [Rank 0] step:3941/10000 train_time:287459ms step_avg:72.94ms +[2025-09-02 06:12:27] [Rank 0] step:3941/10000 train_time:287459ms step_avg:72.94ms +[2025-09-02 06:12:29] [Rank 0] step:3961/10000 train_time:288987ms step_avg:72.96ms +[2025-09-02 06:12:29] [Rank 0] step:3961/10000 train_time:288987ms step_avg:72.96ms +[2025-09-02 06:12:30] [Rank 0] step:3981/10000 train_time:290515ms step_avg:72.98ms +[2025-09-02 06:12:30] [Rank 0] step:3981/10000 train_time:290515ms step_avg:72.98ms +[2025-09-02 06:12:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:12:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:12:43] [Rank 0] PRINT: step:4000/10000 val_loss:4.2021 svd_entropy: attn_qk:H=0.7182,top10E=0.32,eRank=146.1,q75/q25=72.08 attn_vo:H=0.7382,top10E=0.22,eRank=207.8,q75/q25=inf mlp_w1:H=0.7316,top10E=0.34,eRank=147.6,q75/q25=11.21 mlp_w2:H=0.8319,top10E=0.17,eRank=258.4,q75/q25=17.67 vo_prod:H=0.6039,top10E=0.31,eRank=81.0,q75/q25=inf train_time:292194ms step_avg:73.05ms +[2025-09-02 06:12:43] [Rank 0] PRINT: step:4000/10000 val_loss:4.2021 svd_entropy: attn_qk:H=0.7182,top10E=0.32,eRank=146.1,q75/q25=72.08 attn_vo:H=0.7382,top10E=0.22,eRank=207.8,q75/q25=inf mlp_w1:H=0.7316,top10E=0.34,eRank=147.6,q75/q25=11.21 mlp_w2:H=0.8319,top10E=0.17,eRank=258.4,q75/q25=17.67 vo_prod:H=0.6039,top10E=0.31,eRank=81.0,q75/q25=inf train_time:292194ms step_avg:73.05ms +[2025-09-02 06:12:44] [Rank 0] step:4001/10000 train_time:292207ms step_avg:73.03ms +[2025-09-02 06:12:44] [Rank 0] step:4001/10000 train_time:292207ms step_avg:73.03ms +[2025-09-02 06:12:45] [Rank 0] step:4021/10000 train_time:293585ms step_avg:73.01ms +[2025-09-02 06:12:45] [Rank 0] step:4021/10000 train_time:293585ms step_avg:73.01ms +[2025-09-02 06:12:47] [Rank 0] step:4041/10000 train_time:295112ms step_avg:73.03ms +[2025-09-02 06:12:47] [Rank 0] step:4041/10000 train_time:295112ms step_avg:73.03ms +[2025-09-02 06:12:48] [Rank 0] step:4061/10000 train_time:296640ms step_avg:73.05ms +[2025-09-02 06:12:48] [Rank 0] step:4061/10000 train_time:296640ms step_avg:73.05ms +[2025-09-02 06:12:50] [Rank 0] step:4081/10000 train_time:298276ms step_avg:73.09ms +[2025-09-02 06:12:50] [Rank 0] step:4081/10000 train_time:298276ms step_avg:73.09ms +[2025-09-02 06:12:51] [Rank 0] step:4101/10000 train_time:299806ms step_avg:73.11ms +[2025-09-02 06:12:51] [Rank 0] step:4101/10000 train_time:299806ms step_avg:73.11ms +[2025-09-02 06:12:53] [Rank 0] step:4121/10000 train_time:301333ms step_avg:73.12ms +[2025-09-02 06:12:53] [Rank 0] step:4121/10000 train_time:301333ms step_avg:73.12ms +[2025-09-02 06:12:54] [Rank 0] step:4141/10000 train_time:302861ms step_avg:73.14ms +[2025-09-02 06:12:54] [Rank 0] step:4141/10000 train_time:302861ms step_avg:73.14ms +[2025-09-02 06:12:56] [Rank 0] step:4161/10000 train_time:304389ms step_avg:73.15ms +[2025-09-02 06:12:56] [Rank 0] step:4161/10000 train_time:304389ms step_avg:73.15ms +[2025-09-02 06:12:57] [Rank 0] step:4181/10000 train_time:305918ms step_avg:73.17ms +[2025-09-02 06:12:57] [Rank 0] step:4181/10000 train_time:305918ms step_avg:73.17ms +[2025-09-02 06:12:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:12:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:13:11] [Rank 0] PRINT: step:4200/10000 val_loss:4.1853 svd_entropy: attn_qk:H=0.7215,top10E=0.31,eRank=148.3,q75/q25=74.09 attn_vo:H=0.7418,top10E=0.21,eRank=211.3,q75/q25=inf mlp_w1:H=0.7370,top10E=0.33,eRank=152.4,q75/q25=11.60 mlp_w2:H=0.8348,top10E=0.16,eRank=263.3,q75/q25=17.74 vo_prod:H=0.6077,top10E=0.31,eRank=83.2,q75/q25=inf train_time:307600ms step_avg:73.24ms +[2025-09-02 06:13:11] [Rank 0] PRINT: step:4200/10000 val_loss:4.1853 svd_entropy: attn_qk:H=0.7215,top10E=0.31,eRank=148.3,q75/q25=74.09 attn_vo:H=0.7418,top10E=0.21,eRank=211.3,q75/q25=inf mlp_w1:H=0.7370,top10E=0.33,eRank=152.4,q75/q25=11.60 mlp_w2:H=0.8348,top10E=0.16,eRank=263.3,q75/q25=17.74 vo_prod:H=0.6077,top10E=0.31,eRank=83.2,q75/q25=inf train_time:307600ms step_avg:73.24ms +[2025-09-02 06:13:11] [Rank 0] step:4201/10000 train_time:307612ms step_avg:73.22ms +[2025-09-02 06:13:11] [Rank 0] step:4201/10000 train_time:307612ms step_avg:73.22ms +[2025-09-02 06:13:12] [Rank 0] step:4221/10000 train_time:309003ms step_avg:73.21ms +[2025-09-02 06:13:12] [Rank 0] step:4221/10000 train_time:309003ms step_avg:73.21ms +[2025-09-02 06:13:14] [Rank 0] step:4241/10000 train_time:310531ms step_avg:73.22ms +[2025-09-02 06:13:14] [Rank 0] step:4241/10000 train_time:310531ms step_avg:73.22ms +[2025-09-02 06:13:15] [Rank 0] step:4261/10000 train_time:312058ms step_avg:73.24ms +[2025-09-02 06:13:15] [Rank 0] step:4261/10000 train_time:312058ms step_avg:73.24ms +[2025-09-02 06:13:17] [Rank 0] step:4281/10000 train_time:313584ms step_avg:73.25ms +[2025-09-02 06:13:17] [Rank 0] step:4281/10000 train_time:313584ms step_avg:73.25ms +[2025-09-02 06:13:18] [Rank 0] step:4301/10000 train_time:315112ms step_avg:73.26ms +[2025-09-02 06:13:18] [Rank 0] step:4301/10000 train_time:315112ms step_avg:73.26ms +[2025-09-02 06:13:20] [Rank 0] step:4321/10000 train_time:316642ms step_avg:73.28ms +[2025-09-02 06:13:20] [Rank 0] step:4321/10000 train_time:316642ms step_avg:73.28ms +[2025-09-02 06:13:21] [Rank 0] step:4341/10000 train_time:318171ms step_avg:73.29ms +[2025-09-02 06:13:21] [Rank 0] step:4341/10000 train_time:318171ms step_avg:73.29ms +[2025-09-02 06:13:23] [Rank 0] step:4361/10000 train_time:319699ms step_avg:73.31ms +[2025-09-02 06:13:23] [Rank 0] step:4361/10000 train_time:319699ms step_avg:73.31ms +[2025-09-02 06:13:24] [Rank 0] step:4381/10000 train_time:321228ms step_avg:73.32ms +[2025-09-02 06:13:24] [Rank 0] step:4381/10000 train_time:321228ms step_avg:73.32ms +[2025-09-02 06:13:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:13:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:13:38] [Rank 0] PRINT: step:4400/10000 val_loss:4.1601 svd_entropy: attn_qk:H=0.7244,top10E=0.31,eRank=150.3,q75/q25=75.61 attn_vo:H=0.7451,top10E=0.21,eRank=214.5,q75/q25=inf mlp_w1:H=0.7424,top10E=0.33,eRank=157.2,q75/q25=11.95 mlp_w2:H=0.8375,top10E=0.16,eRank=267.9,q75/q25=17.84 vo_prod:H=0.6116,top10E=0.30,eRank=85.5,q75/q25=inf train_time:322910ms step_avg:73.39ms +[2025-09-02 06:13:38] [Rank 0] PRINT: step:4400/10000 val_loss:4.1601 svd_entropy: attn_qk:H=0.7244,top10E=0.31,eRank=150.3,q75/q25=75.61 attn_vo:H=0.7451,top10E=0.21,eRank=214.5,q75/q25=inf mlp_w1:H=0.7424,top10E=0.33,eRank=157.2,q75/q25=11.95 mlp_w2:H=0.8375,top10E=0.16,eRank=267.9,q75/q25=17.84 vo_prod:H=0.6116,top10E=0.30,eRank=85.5,q75/q25=inf train_time:322910ms step_avg:73.39ms +[2025-09-02 06:13:38] [Rank 0] step:4401/10000 train_time:322922ms step_avg:73.37ms +[2025-09-02 06:13:38] [Rank 0] step:4401/10000 train_time:322922ms step_avg:73.37ms +[2025-09-02 06:13:39] [Rank 0] step:4421/10000 train_time:324297ms step_avg:73.35ms +[2025-09-02 06:13:39] [Rank 0] step:4421/10000 train_time:324297ms step_avg:73.35ms +[2025-09-02 06:13:41] [Rank 0] step:4441/10000 train_time:325822ms step_avg:73.37ms +[2025-09-02 06:13:41] [Rank 0] step:4441/10000 train_time:325822ms step_avg:73.37ms +[2025-09-02 06:13:42] [Rank 0] step:4461/10000 train_time:327356ms step_avg:73.38ms +[2025-09-02 06:13:42] [Rank 0] step:4461/10000 train_time:327356ms step_avg:73.38ms +[2025-09-02 06:13:44] [Rank 0] step:4481/10000 train_time:328890ms step_avg:73.40ms +[2025-09-02 06:13:44] [Rank 0] step:4481/10000 train_time:328890ms step_avg:73.40ms +[2025-09-02 06:13:45] [Rank 0] step:4501/10000 train_time:330424ms step_avg:73.41ms +[2025-09-02 06:13:45] [Rank 0] step:4501/10000 train_time:330424ms step_avg:73.41ms +[2025-09-02 06:13:47] [Rank 0] step:4521/10000 train_time:331958ms step_avg:73.43ms +[2025-09-02 06:13:47] [Rank 0] step:4521/10000 train_time:331958ms step_avg:73.43ms +[2025-09-02 06:13:48] [Rank 0] step:4541/10000 train_time:333492ms step_avg:73.44ms +[2025-09-02 06:13:48] [Rank 0] step:4541/10000 train_time:333492ms step_avg:73.44ms +[2025-09-02 06:13:50] [Rank 0] step:4561/10000 train_time:335030ms step_avg:73.46ms +[2025-09-02 06:13:50] [Rank 0] step:4561/10000 train_time:335030ms step_avg:73.46ms +[2025-09-02 06:13:51] [Rank 0] step:4581/10000 train_time:336567ms step_avg:73.47ms +[2025-09-02 06:13:51] [Rank 0] step:4581/10000 train_time:336567ms step_avg:73.47ms +[2025-09-02 06:13:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:13:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:14:05] [Rank 0] PRINT: step:4600/10000 val_loss:4.1268 svd_entropy: attn_qk:H=0.7273,top10E=0.30,eRank=152.4,q75/q25=77.52 attn_vo:H=0.7482,top10E=0.20,eRank=217.7,q75/q25=inf mlp_w1:H=0.7474,top10E=0.32,eRank=162.0,q75/q25=12.36 mlp_w2:H=0.8400,top10E=0.16,eRank=272.3,q75/q25=17.92 vo_prod:H=0.6156,top10E=0.30,eRank=87.8,q75/q25=inf train_time:338257ms step_avg:73.53ms +[2025-09-02 06:14:05] [Rank 0] PRINT: step:4600/10000 val_loss:4.1268 svd_entropy: attn_qk:H=0.7273,top10E=0.30,eRank=152.4,q75/q25=77.52 attn_vo:H=0.7482,top10E=0.20,eRank=217.7,q75/q25=inf mlp_w1:H=0.7474,top10E=0.32,eRank=162.0,q75/q25=12.36 mlp_w2:H=0.8400,top10E=0.16,eRank=272.3,q75/q25=17.92 vo_prod:H=0.6156,top10E=0.30,eRank=87.8,q75/q25=inf train_time:338257ms step_avg:73.53ms +[2025-09-02 06:14:05] [Rank 0] step:4601/10000 train_time:338270ms step_avg:73.52ms +[2025-09-02 06:14:05] [Rank 0] step:4601/10000 train_time:338270ms step_avg:73.52ms +[2025-09-02 06:14:06] [Rank 0] step:4621/10000 train_time:339661ms step_avg:73.50ms +[2025-09-02 06:14:06] [Rank 0] step:4621/10000 train_time:339661ms step_avg:73.50ms +[2025-09-02 06:14:08] [Rank 0] step:4641/10000 train_time:341196ms step_avg:73.52ms +[2025-09-02 06:14:08] [Rank 0] step:4641/10000 train_time:341196ms step_avg:73.52ms +[2025-09-02 06:14:09] [Rank 0] step:4661/10000 train_time:342732ms step_avg:73.53ms +[2025-09-02 06:14:09] [Rank 0] step:4661/10000 train_time:342732ms step_avg:73.53ms +[2025-09-02 06:14:11] [Rank 0] step:4681/10000 train_time:344268ms step_avg:73.55ms +[2025-09-02 06:14:11] [Rank 0] step:4681/10000 train_time:344268ms step_avg:73.55ms +[2025-09-02 06:14:12] [Rank 0] step:4701/10000 train_time:345805ms step_avg:73.56ms +[2025-09-02 06:14:12] [Rank 0] step:4701/10000 train_time:345805ms step_avg:73.56ms +[2025-09-02 06:14:14] [Rank 0] step:4721/10000 train_time:347342ms step_avg:73.57ms +[2025-09-02 06:14:14] [Rank 0] step:4721/10000 train_time:347342ms step_avg:73.57ms +[2025-09-02 06:14:15] [Rank 0] step:4741/10000 train_time:348879ms step_avg:73.59ms +[2025-09-02 06:14:15] [Rank 0] step:4741/10000 train_time:348879ms step_avg:73.59ms +[2025-09-02 06:14:17] [Rank 0] step:4761/10000 train_time:350415ms step_avg:73.60ms +[2025-09-02 06:14:17] [Rank 0] step:4761/10000 train_time:350415ms step_avg:73.60ms +[2025-09-02 06:14:18] [Rank 0] step:4781/10000 train_time:351950ms step_avg:73.61ms +[2025-09-02 06:14:18] [Rank 0] step:4781/10000 train_time:351950ms step_avg:73.61ms +[2025-09-02 06:14:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:14:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:14:32] [Rank 0] PRINT: step:4800/10000 val_loss:4.1132 svd_entropy: attn_qk:H=0.7301,top10E=0.30,eRank=154.4,q75/q25=79.48 attn_vo:H=0.7512,top10E=0.20,eRank=220.7,q75/q25=inf mlp_w1:H=0.7519,top10E=0.31,eRank=166.4,q75/q25=12.72 mlp_w2:H=0.8423,top10E=0.15,eRank=276.4,q75/q25=17.97 vo_prod:H=0.6191,top10E=0.29,eRank=90.1,q75/q25=inf train_time:353642ms step_avg:73.68ms +[2025-09-02 06:14:32] [Rank 0] PRINT: step:4800/10000 val_loss:4.1132 svd_entropy: attn_qk:H=0.7301,top10E=0.30,eRank=154.4,q75/q25=79.48 attn_vo:H=0.7512,top10E=0.20,eRank=220.7,q75/q25=inf mlp_w1:H=0.7519,top10E=0.31,eRank=166.4,q75/q25=12.72 mlp_w2:H=0.8423,top10E=0.15,eRank=276.4,q75/q25=17.97 vo_prod:H=0.6191,top10E=0.29,eRank=90.1,q75/q25=inf train_time:353642ms step_avg:73.68ms +[2025-09-02 06:14:32] [Rank 0] step:4801/10000 train_time:353654ms step_avg:73.66ms +[2025-09-02 06:14:32] [Rank 0] step:4801/10000 train_time:353654ms step_avg:73.66ms +[2025-09-02 06:14:33] [Rank 0] step:4821/10000 train_time:355044ms step_avg:73.65ms +[2025-09-02 06:14:33] [Rank 0] step:4821/10000 train_time:355044ms step_avg:73.65ms +[2025-09-02 06:14:35] [Rank 0] step:4841/10000 train_time:356579ms step_avg:73.66ms +[2025-09-02 06:14:35] [Rank 0] step:4841/10000 train_time:356579ms step_avg:73.66ms +[2025-09-02 06:14:36] [Rank 0] step:4861/10000 train_time:358116ms step_avg:73.67ms +[2025-09-02 06:14:36] [Rank 0] step:4861/10000 train_time:358116ms step_avg:73.67ms +[2025-09-02 06:14:38] [Rank 0] step:4881/10000 train_time:359649ms step_avg:73.68ms +[2025-09-02 06:14:38] [Rank 0] step:4881/10000 train_time:359649ms step_avg:73.68ms +[2025-09-02 06:14:39] [Rank 0] step:4901/10000 train_time:361181ms step_avg:73.70ms +[2025-09-02 06:14:39] [Rank 0] step:4901/10000 train_time:361181ms step_avg:73.70ms +[2025-09-02 06:14:41] [Rank 0] step:4921/10000 train_time:362718ms step_avg:73.71ms +[2025-09-02 06:14:41] [Rank 0] step:4921/10000 train_time:362718ms step_avg:73.71ms +[2025-09-02 06:14:42] [Rank 0] step:4941/10000 train_time:364255ms step_avg:73.72ms +[2025-09-02 06:14:42] [Rank 0] step:4941/10000 train_time:364255ms step_avg:73.72ms +[2025-09-02 06:14:44] [Rank 0] step:4961/10000 train_time:365789ms step_avg:73.73ms +[2025-09-02 06:14:44] [Rank 0] step:4961/10000 train_time:365789ms step_avg:73.73ms +[2025-09-02 06:14:46] [Rank 0] step:4981/10000 train_time:367328ms step_avg:73.75ms +[2025-09-02 06:14:46] [Rank 0] step:4981/10000 train_time:367328ms step_avg:73.75ms +[2025-09-02 06:14:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:14:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:14:59] [Rank 0] PRINT: step:5000/10000 val_loss:4.0910 svd_entropy: attn_qk:H=0.7326,top10E=0.29,eRank=156.3,q75/q25=80.06 attn_vo:H=0.7540,top10E=0.19,eRank=223.6,q75/q25=inf mlp_w1:H=0.7560,top10E=0.31,eRank=170.5,q75/q25=13.03 mlp_w2:H=0.8444,top10E=0.15,eRank=280.2,q75/q25=17.95 vo_prod:H=0.6225,top10E=0.29,eRank=92.2,q75/q25=inf train_time:369018ms step_avg:73.80ms +[2025-09-02 06:14:59] [Rank 0] PRINT: step:5000/10000 val_loss:4.0910 svd_entropy: attn_qk:H=0.7326,top10E=0.29,eRank=156.3,q75/q25=80.06 attn_vo:H=0.7540,top10E=0.19,eRank=223.6,q75/q25=inf mlp_w1:H=0.7560,top10E=0.31,eRank=170.5,q75/q25=13.03 mlp_w2:H=0.8444,top10E=0.15,eRank=280.2,q75/q25=17.95 vo_prod:H=0.6225,top10E=0.29,eRank=92.2,q75/q25=inf train_time:369018ms step_avg:73.80ms +[2025-09-02 06:14:59] [Rank 0] step:5001/10000 train_time:369031ms step_avg:73.79ms +[2025-09-02 06:14:59] [Rank 0] step:5001/10000 train_time:369031ms step_avg:73.79ms +[2025-09-02 06:15:00] [Rank 0] step:5021/10000 train_time:370411ms step_avg:73.77ms +[2025-09-02 06:15:00] [Rank 0] step:5021/10000 train_time:370411ms step_avg:73.77ms +[2025-09-02 06:15:02] [Rank 0] step:5041/10000 train_time:371946ms step_avg:73.78ms +[2025-09-02 06:15:02] [Rank 0] step:5041/10000 train_time:371946ms step_avg:73.78ms +[2025-09-02 06:15:03] [Rank 0] step:5061/10000 train_time:373478ms step_avg:73.80ms +[2025-09-02 06:15:03] [Rank 0] step:5061/10000 train_time:373478ms step_avg:73.80ms +[2025-09-02 06:15:05] [Rank 0] step:5081/10000 train_time:375012ms step_avg:73.81ms +[2025-09-02 06:15:05] [Rank 0] step:5081/10000 train_time:375012ms step_avg:73.81ms +[2025-09-02 06:15:06] [Rank 0] step:5101/10000 train_time:376546ms step_avg:73.82ms +[2025-09-02 06:15:06] [Rank 0] step:5101/10000 train_time:376546ms step_avg:73.82ms +[2025-09-02 06:15:08] [Rank 0] step:5121/10000 train_time:378080ms step_avg:73.83ms +[2025-09-02 06:15:08] [Rank 0] step:5121/10000 train_time:378080ms step_avg:73.83ms +[2025-09-02 06:15:10] [Rank 0] step:5141/10000 train_time:379615ms step_avg:73.84ms +[2025-09-02 06:15:10] [Rank 0] step:5141/10000 train_time:379615ms step_avg:73.84ms +[2025-09-02 06:15:11] [Rank 0] step:5161/10000 train_time:381149ms step_avg:73.85ms +[2025-09-02 06:15:11] [Rank 0] step:5161/10000 train_time:381149ms step_avg:73.85ms +[2025-09-02 06:15:13] [Rank 0] step:5181/10000 train_time:382687ms step_avg:73.86ms +[2025-09-02 06:15:13] [Rank 0] step:5181/10000 train_time:382687ms step_avg:73.86ms +[2025-09-02 06:15:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:15:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:15:26] [Rank 0] PRINT: step:5200/10000 val_loss:4.0681 svd_entropy: attn_qk:H=0.7350,top10E=0.29,eRank=158.1,q75/q25=80.60 attn_vo:H=0.7566,top10E=0.19,eRank=226.3,q75/q25=inf mlp_w1:H=0.7600,top10E=0.30,eRank=174.7,q75/q25=13.35 mlp_w2:H=0.8465,top10E=0.15,eRank=283.9,q75/q25=17.94 vo_prod:H=0.6256,top10E=0.28,eRank=94.2,q75/q25=inf train_time:384400ms step_avg:73.92ms +[2025-09-02 06:15:26] [Rank 0] PRINT: step:5200/10000 val_loss:4.0681 svd_entropy: attn_qk:H=0.7350,top10E=0.29,eRank=158.1,q75/q25=80.60 attn_vo:H=0.7566,top10E=0.19,eRank=226.3,q75/q25=inf mlp_w1:H=0.7600,top10E=0.30,eRank=174.7,q75/q25=13.35 mlp_w2:H=0.8465,top10E=0.15,eRank=283.9,q75/q25=17.94 vo_prod:H=0.6256,top10E=0.28,eRank=94.2,q75/q25=inf train_time:384400ms step_avg:73.92ms +[2025-09-02 06:15:26] [Rank 0] step:5201/10000 train_time:384413ms step_avg:73.91ms +[2025-09-02 06:15:26] [Rank 0] step:5201/10000 train_time:384413ms step_avg:73.91ms +[2025-09-02 06:15:27] [Rank 0] step:5221/10000 train_time:385826ms step_avg:73.90ms +[2025-09-02 06:15:27] [Rank 0] step:5221/10000 train_time:385826ms step_avg:73.90ms +[2025-09-02 06:15:29] [Rank 0] step:5241/10000 train_time:387391ms step_avg:73.92ms +[2025-09-02 06:15:29] [Rank 0] step:5241/10000 train_time:387391ms step_avg:73.92ms +[2025-09-02 06:15:31] [Rank 0] step:5261/10000 train_time:388956ms step_avg:73.93ms +[2025-09-02 06:15:31] [Rank 0] step:5261/10000 train_time:388956ms step_avg:73.93ms +[2025-09-02 06:15:32] [Rank 0] step:5281/10000 train_time:390523ms step_avg:73.95ms +[2025-09-02 06:15:32] [Rank 0] step:5281/10000 train_time:390523ms step_avg:73.95ms +[2025-09-02 06:15:34] [Rank 0] step:5301/10000 train_time:392100ms step_avg:73.97ms +[2025-09-02 06:15:34] [Rank 0] step:5301/10000 train_time:392100ms step_avg:73.97ms +[2025-09-02 06:15:35] [Rank 0] step:5321/10000 train_time:393663ms step_avg:73.98ms +[2025-09-02 06:15:35] [Rank 0] step:5321/10000 train_time:393663ms step_avg:73.98ms +[2025-09-02 06:15:37] [Rank 0] step:5341/10000 train_time:395230ms step_avg:74.00ms +[2025-09-02 06:15:37] [Rank 0] step:5341/10000 train_time:395230ms step_avg:74.00ms +[2025-09-02 06:15:38] [Rank 0] step:5361/10000 train_time:396800ms step_avg:74.02ms +[2025-09-02 06:15:38] [Rank 0] step:5361/10000 train_time:396800ms step_avg:74.02ms +[2025-09-02 06:15:40] [Rank 0] step:5381/10000 train_time:398371ms step_avg:74.03ms +[2025-09-02 06:15:40] [Rank 0] step:5381/10000 train_time:398371ms step_avg:74.03ms +[2025-09-02 06:15:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:15:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:15:53] [Rank 0] PRINT: step:5400/10000 val_loss:4.0487 svd_entropy: attn_qk:H=0.7373,top10E=0.29,eRank=159.8,q75/q25=81.24 attn_vo:H=0.7590,top10E=0.19,eRank=228.9,q75/q25=inf mlp_w1:H=0.7638,top10E=0.30,eRank=178.7,q75/q25=13.65 mlp_w2:H=0.8484,top10E=0.15,eRank=287.4,q75/q25=18.04 vo_prod:H=0.6288,top10E=0.28,eRank=96.2,q75/q25=inf train_time:400095ms step_avg:74.09ms +[2025-09-02 06:15:53] [Rank 0] PRINT: step:5400/10000 val_loss:4.0487 svd_entropy: attn_qk:H=0.7373,top10E=0.29,eRank=159.8,q75/q25=81.24 attn_vo:H=0.7590,top10E=0.19,eRank=228.9,q75/q25=inf mlp_w1:H=0.7638,top10E=0.30,eRank=178.7,q75/q25=13.65 mlp_w2:H=0.8484,top10E=0.15,eRank=287.4,q75/q25=18.04 vo_prod:H=0.6288,top10E=0.28,eRank=96.2,q75/q25=inf train_time:400095ms step_avg:74.09ms +[2025-09-02 06:15:53] [Rank 0] step:5401/10000 train_time:400108ms step_avg:74.08ms +[2025-09-02 06:15:53] [Rank 0] step:5401/10000 train_time:400108ms step_avg:74.08ms +[2025-09-02 06:15:55] [Rank 0] step:5421/10000 train_time:401541ms step_avg:74.07ms +[2025-09-02 06:15:55] [Rank 0] step:5421/10000 train_time:401541ms step_avg:74.07ms +[2025-09-02 06:15:57] [Rank 0] step:5441/10000 train_time:403102ms step_avg:74.09ms +[2025-09-02 06:15:57] [Rank 0] step:5441/10000 train_time:403102ms step_avg:74.09ms +[2025-09-02 06:15:58] [Rank 0] step:5461/10000 train_time:404670ms step_avg:74.10ms +[2025-09-02 06:15:58] [Rank 0] step:5461/10000 train_time:404670ms step_avg:74.10ms +[2025-09-02 06:16:00] [Rank 0] step:5481/10000 train_time:406239ms step_avg:74.12ms +[2025-09-02 06:16:00] [Rank 0] step:5481/10000 train_time:406239ms step_avg:74.12ms +[2025-09-02 06:16:01] [Rank 0] step:5501/10000 train_time:407811ms step_avg:74.13ms +[2025-09-02 06:16:01] [Rank 0] step:5501/10000 train_time:407811ms step_avg:74.13ms +[2025-09-02 06:16:03] [Rank 0] step:5521/10000 train_time:409385ms step_avg:74.15ms +[2025-09-02 06:16:03] [Rank 0] step:5521/10000 train_time:409385ms step_avg:74.15ms +[2025-09-02 06:16:04] [Rank 0] step:5541/10000 train_time:410952ms step_avg:74.17ms +[2025-09-02 06:16:04] [Rank 0] step:5541/10000 train_time:410952ms step_avg:74.17ms +[2025-09-02 06:16:06] [Rank 0] step:5561/10000 train_time:412521ms step_avg:74.18ms +[2025-09-02 06:16:06] [Rank 0] step:5561/10000 train_time:412521ms step_avg:74.18ms +[2025-09-02 06:16:07] [Rank 0] step:5581/10000 train_time:414092ms step_avg:74.20ms +[2025-09-02 06:16:07] [Rank 0] step:5581/10000 train_time:414092ms step_avg:74.20ms +[2025-09-02 06:16:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:16:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:16:21] [Rank 0] PRINT: step:5600/10000 val_loss:4.0330 svd_entropy: attn_qk:H=0.7394,top10E=0.28,eRank=161.5,q75/q25=81.88 attn_vo:H=0.7613,top10E=0.18,eRank=231.4,q75/q25=inf mlp_w1:H=0.7674,top10E=0.29,eRank=182.5,q75/q25=14.00 mlp_w2:H=0.8501,top10E=0.15,eRank=290.6,q75/q25=18.03 vo_prod:H=0.6316,top10E=0.27,eRank=98.1,q75/q25=inf train_time:415820ms step_avg:74.25ms +[2025-09-02 06:16:21] [Rank 0] PRINT: step:5600/10000 val_loss:4.0330 svd_entropy: attn_qk:H=0.7394,top10E=0.28,eRank=161.5,q75/q25=81.88 attn_vo:H=0.7613,top10E=0.18,eRank=231.4,q75/q25=inf mlp_w1:H=0.7674,top10E=0.29,eRank=182.5,q75/q25=14.00 mlp_w2:H=0.8501,top10E=0.15,eRank=290.6,q75/q25=18.03 vo_prod:H=0.6316,top10E=0.27,eRank=98.1,q75/q25=inf train_time:415820ms step_avg:74.25ms +[2025-09-02 06:16:21] [Rank 0] step:5601/10000 train_time:415832ms step_avg:74.24ms +[2025-09-02 06:16:21] [Rank 0] step:5601/10000 train_time:415832ms step_avg:74.24ms +[2025-09-02 06:16:22] [Rank 0] step:5621/10000 train_time:417245ms step_avg:74.23ms +[2025-09-02 06:16:22] [Rank 0] step:5621/10000 train_time:417245ms step_avg:74.23ms +[2025-09-02 06:16:24] [Rank 0] step:5641/10000 train_time:418813ms step_avg:74.24ms +[2025-09-02 06:16:24] [Rank 0] step:5641/10000 train_time:418813ms step_avg:74.24ms +[2025-09-02 06:16:25] [Rank 0] step:5661/10000 train_time:420378ms step_avg:74.26ms +[2025-09-02 06:16:25] [Rank 0] step:5661/10000 train_time:420378ms step_avg:74.26ms +[2025-09-02 06:16:27] [Rank 0] step:5681/10000 train_time:421950ms step_avg:74.27ms +[2025-09-02 06:16:27] [Rank 0] step:5681/10000 train_time:421950ms step_avg:74.27ms +[2025-09-02 06:16:29] [Rank 0] step:5701/10000 train_time:423518ms step_avg:74.29ms +[2025-09-02 06:16:29] [Rank 0] step:5701/10000 train_time:423518ms step_avg:74.29ms +[2025-09-02 06:16:30] [Rank 0] step:5721/10000 train_time:425087ms step_avg:74.30ms +[2025-09-02 06:16:30] [Rank 0] step:5721/10000 train_time:425087ms step_avg:74.30ms +[2025-09-02 06:16:32] [Rank 0] step:5741/10000 train_time:426656ms step_avg:74.32ms +[2025-09-02 06:16:32] [Rank 0] step:5741/10000 train_time:426656ms step_avg:74.32ms +[2025-09-02 06:16:33] [Rank 0] step:5761/10000 train_time:428226ms step_avg:74.33ms +[2025-09-02 06:16:33] [Rank 0] step:5761/10000 train_time:428226ms step_avg:74.33ms +[2025-09-02 06:16:35] [Rank 0] step:5781/10000 train_time:429798ms step_avg:74.35ms +[2025-09-02 06:16:35] [Rank 0] step:5781/10000 train_time:429798ms step_avg:74.35ms +[2025-09-02 06:16:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:16:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:16:48] [Rank 0] PRINT: step:5800/10000 val_loss:4.0210 svd_entropy: attn_qk:H=0.7416,top10E=0.28,eRank=163.3,q75/q25=82.36 attn_vo:H=0.7635,top10E=0.18,eRank=233.8,q75/q25=inf mlp_w1:H=0.7707,top10E=0.29,eRank=186.2,q75/q25=14.31 mlp_w2:H=0.8517,top10E=0.15,eRank=293.6,q75/q25=18.20 vo_prod:H=0.6342,top10E=0.27,eRank=99.8,q75/q25=inf train_time:431529ms step_avg:74.40ms +[2025-09-02 06:16:48] [Rank 0] PRINT: step:5800/10000 val_loss:4.0210 svd_entropy: attn_qk:H=0.7416,top10E=0.28,eRank=163.3,q75/q25=82.36 attn_vo:H=0.7635,top10E=0.18,eRank=233.8,q75/q25=inf mlp_w1:H=0.7707,top10E=0.29,eRank=186.2,q75/q25=14.31 mlp_w2:H=0.8517,top10E=0.15,eRank=293.6,q75/q25=18.20 vo_prod:H=0.6342,top10E=0.27,eRank=99.8,q75/q25=inf train_time:431529ms step_avg:74.40ms +[2025-09-02 06:16:48] [Rank 0] step:5801/10000 train_time:431541ms step_avg:74.39ms +[2025-09-02 06:16:48] [Rank 0] step:5801/10000 train_time:431541ms step_avg:74.39ms +[2025-09-02 06:16:50] [Rank 0] step:5821/10000 train_time:432959ms step_avg:74.38ms +[2025-09-02 06:16:50] [Rank 0] step:5821/10000 train_time:432959ms step_avg:74.38ms +[2025-09-02 06:16:51] [Rank 0] step:5841/10000 train_time:434524ms step_avg:74.39ms +[2025-09-02 06:16:51] [Rank 0] step:5841/10000 train_time:434524ms step_avg:74.39ms +[2025-09-02 06:16:53] [Rank 0] step:5861/10000 train_time:436093ms step_avg:74.41ms +[2025-09-02 06:16:53] [Rank 0] step:5861/10000 train_time:436093ms step_avg:74.41ms +[2025-09-02 06:16:54] [Rank 0] step:5881/10000 train_time:437661ms step_avg:74.42ms +[2025-09-02 06:16:54] [Rank 0] step:5881/10000 train_time:437661ms step_avg:74.42ms +[2025-09-02 06:16:56] [Rank 0] step:5901/10000 train_time:439229ms step_avg:74.43ms +[2025-09-02 06:16:56] [Rank 0] step:5901/10000 train_time:439229ms step_avg:74.43ms +[2025-09-02 06:16:58] [Rank 0] step:5921/10000 train_time:440797ms step_avg:74.45ms +[2025-09-02 06:16:58] [Rank 0] step:5921/10000 train_time:440797ms step_avg:74.45ms +[2025-09-02 06:16:59] [Rank 0] step:5941/10000 train_time:442368ms step_avg:74.46ms +[2025-09-02 06:16:59] [Rank 0] step:5941/10000 train_time:442368ms step_avg:74.46ms +[2025-09-02 06:17:01] [Rank 0] step:5961/10000 train_time:443940ms step_avg:74.47ms +[2025-09-02 06:17:01] [Rank 0] step:5961/10000 train_time:443940ms step_avg:74.47ms +[2025-09-02 06:17:02] [Rank 0] step:5981/10000 train_time:445511ms step_avg:74.49ms +[2025-09-02 06:17:02] [Rank 0] step:5981/10000 train_time:445511ms step_avg:74.49ms +[2025-09-02 06:17:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:17:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:17:15] [Rank 0] PRINT: step:6000/10000 val_loss:4.0012 svd_entropy: attn_qk:H=0.7436,top10E=0.28,eRank=164.9,q75/q25=83.29 attn_vo:H=0.7656,top10E=0.18,eRank=236.1,q75/q25=inf mlp_w1:H=0.7740,top10E=0.28,eRank=189.8,q75/q25=14.54 mlp_w2:H=0.8533,top10E=0.14,eRank=296.6,q75/q25=18.15 vo_prod:H=0.6367,top10E=0.26,eRank=101.6,q75/q25=inf train_time:447237ms step_avg:74.54ms +[2025-09-02 06:17:15] [Rank 0] PRINT: step:6000/10000 val_loss:4.0012 svd_entropy: attn_qk:H=0.7436,top10E=0.28,eRank=164.9,q75/q25=83.29 attn_vo:H=0.7656,top10E=0.18,eRank=236.1,q75/q25=inf mlp_w1:H=0.7740,top10E=0.28,eRank=189.8,q75/q25=14.54 mlp_w2:H=0.8533,top10E=0.14,eRank=296.6,q75/q25=18.15 vo_prod:H=0.6367,top10E=0.26,eRank=101.6,q75/q25=inf train_time:447237ms step_avg:74.54ms +[2025-09-02 06:17:16] [Rank 0] step:6001/10000 train_time:447250ms step_avg:74.53ms +[2025-09-02 06:17:16] [Rank 0] step:6001/10000 train_time:447250ms step_avg:74.53ms +[2025-09-02 06:17:17] [Rank 0] step:6021/10000 train_time:448684ms step_avg:74.52ms +[2025-09-02 06:17:17] [Rank 0] step:6021/10000 train_time:448684ms step_avg:74.52ms +[2025-09-02 06:17:19] [Rank 0] step:6041/10000 train_time:450253ms step_avg:74.53ms +[2025-09-02 06:17:19] [Rank 0] step:6041/10000 train_time:450253ms step_avg:74.53ms +[2025-09-02 06:17:20] [Rank 0] step:6061/10000 train_time:451830ms step_avg:74.55ms +[2025-09-02 06:17:20] [Rank 0] step:6061/10000 train_time:451830ms step_avg:74.55ms +[2025-09-02 06:17:22] [Rank 0] step:6081/10000 train_time:453400ms step_avg:74.56ms +[2025-09-02 06:17:22] [Rank 0] step:6081/10000 train_time:453400ms step_avg:74.56ms +[2025-09-02 06:17:23] [Rank 0] step:6101/10000 train_time:454977ms step_avg:74.57ms +[2025-09-02 06:17:23] [Rank 0] step:6101/10000 train_time:454977ms step_avg:74.57ms +[2025-09-02 06:17:25] [Rank 0] step:6121/10000 train_time:456614ms step_avg:74.60ms +[2025-09-02 06:17:25] [Rank 0] step:6121/10000 train_time:456614ms step_avg:74.60ms +[2025-09-02 06:17:27] [Rank 0] step:6141/10000 train_time:458193ms step_avg:74.61ms +[2025-09-02 06:17:27] [Rank 0] step:6141/10000 train_time:458193ms step_avg:74.61ms +[2025-09-02 06:17:28] [Rank 0] step:6161/10000 train_time:459765ms step_avg:74.63ms +[2025-09-02 06:17:28] [Rank 0] step:6161/10000 train_time:459765ms step_avg:74.63ms +[2025-09-02 06:17:30] [Rank 0] step:6181/10000 train_time:461334ms step_avg:74.64ms +[2025-09-02 06:17:30] [Rank 0] step:6181/10000 train_time:461334ms step_avg:74.64ms +[2025-09-02 06:17:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:17:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:17:43] [Rank 0] PRINT: step:6200/10000 val_loss:3.9841 svd_entropy: attn_qk:H=0.7456,top10E=0.28,eRank=166.5,q75/q25=83.78 attn_vo:H=0.7676,top10E=0.18,eRank=238.4,q75/q25=inf mlp_w1:H=0.7769,top10E=0.28,eRank=193.1,q75/q25=14.88 mlp_w2:H=0.8547,top10E=0.14,eRank=299.4,q75/q25=18.16 vo_prod:H=0.6392,top10E=0.26,eRank=103.3,q75/q25=inf train_time:463066ms step_avg:74.69ms +[2025-09-02 06:17:43] [Rank 0] PRINT: step:6200/10000 val_loss:3.9841 svd_entropy: attn_qk:H=0.7456,top10E=0.28,eRank=166.5,q75/q25=83.78 attn_vo:H=0.7676,top10E=0.18,eRank=238.4,q75/q25=inf mlp_w1:H=0.7769,top10E=0.28,eRank=193.1,q75/q25=14.88 mlp_w2:H=0.8547,top10E=0.14,eRank=299.4,q75/q25=18.16 vo_prod:H=0.6392,top10E=0.26,eRank=103.3,q75/q25=inf train_time:463066ms step_avg:74.69ms +[2025-09-02 06:17:43] [Rank 0] step:6201/10000 train_time:463078ms step_avg:74.68ms +[2025-09-02 06:17:43] [Rank 0] step:6201/10000 train_time:463078ms step_avg:74.68ms +[2025-09-02 06:17:45] [Rank 0] step:6221/10000 train_time:464507ms step_avg:74.67ms +[2025-09-02 06:17:45] [Rank 0] step:6221/10000 train_time:464507ms step_avg:74.67ms +[2025-09-02 06:17:46] [Rank 0] step:6241/10000 train_time:466077ms step_avg:74.68ms +[2025-09-02 06:17:46] [Rank 0] step:6241/10000 train_time:466077ms step_avg:74.68ms +[2025-09-02 06:17:48] [Rank 0] step:6261/10000 train_time:467649ms step_avg:74.69ms +[2025-09-02 06:17:48] [Rank 0] step:6261/10000 train_time:467649ms step_avg:74.69ms +[2025-09-02 06:17:49] [Rank 0] step:6281/10000 train_time:469226ms step_avg:74.71ms +[2025-09-02 06:17:49] [Rank 0] step:6281/10000 train_time:469226ms step_avg:74.71ms +[2025-09-02 06:17:51] [Rank 0] step:6301/10000 train_time:470802ms step_avg:74.72ms +[2025-09-02 06:17:51] [Rank 0] step:6301/10000 train_time:470802ms step_avg:74.72ms +[2025-09-02 06:17:53] [Rank 0] step:6321/10000 train_time:472374ms step_avg:74.73ms +[2025-09-02 06:17:53] [Rank 0] step:6321/10000 train_time:472374ms step_avg:74.73ms +[2025-09-02 06:17:54] [Rank 0] step:6341/10000 train_time:473950ms step_avg:74.74ms +[2025-09-02 06:17:54] [Rank 0] step:6341/10000 train_time:473950ms step_avg:74.74ms +[2025-09-02 06:17:56] [Rank 0] step:6361/10000 train_time:475526ms step_avg:74.76ms +[2025-09-02 06:17:56] [Rank 0] step:6361/10000 train_time:475526ms step_avg:74.76ms +[2025-09-02 06:17:57] [Rank 0] step:6381/10000 train_time:477104ms step_avg:74.77ms +[2025-09-02 06:17:57] [Rank 0] step:6381/10000 train_time:477104ms step_avg:74.77ms +[2025-09-02 06:17:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:17:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:18:10] [Rank 0] PRINT: step:6400/10000 val_loss:3.9666 svd_entropy: attn_qk:H=0.7474,top10E=0.27,eRank=167.9,q75/q25=83.98 attn_vo:H=0.7694,top10E=0.17,eRank=240.5,q75/q25=inf mlp_w1:H=0.7795,top10E=0.27,eRank=196.2,q75/q25=15.07 mlp_w2:H=0.8561,top10E=0.14,eRank=302.0,q75/q25=18.16 vo_prod:H=0.6417,top10E=0.26,eRank=105.0,q75/q25=inf train_time:478837ms step_avg:74.82ms +[2025-09-02 06:18:10] [Rank 0] PRINT: step:6400/10000 val_loss:3.9666 svd_entropy: attn_qk:H=0.7474,top10E=0.27,eRank=167.9,q75/q25=83.98 attn_vo:H=0.7694,top10E=0.17,eRank=240.5,q75/q25=inf mlp_w1:H=0.7795,top10E=0.27,eRank=196.2,q75/q25=15.07 mlp_w2:H=0.8561,top10E=0.14,eRank=302.0,q75/q25=18.16 vo_prod:H=0.6417,top10E=0.26,eRank=105.0,q75/q25=inf train_time:478837ms step_avg:74.82ms +[2025-09-02 06:18:11] [Rank 0] step:6401/10000 train_time:478849ms step_avg:74.81ms +[2025-09-02 06:18:11] [Rank 0] step:6401/10000 train_time:478849ms step_avg:74.81ms +[2025-09-02 06:18:12] [Rank 0] step:6421/10000 train_time:480270ms step_avg:74.80ms +[2025-09-02 06:18:12] [Rank 0] step:6421/10000 train_time:480270ms step_avg:74.80ms +[2025-09-02 06:18:14] [Rank 0] step:6441/10000 train_time:481843ms step_avg:74.81ms +[2025-09-02 06:18:14] [Rank 0] step:6441/10000 train_time:481843ms step_avg:74.81ms +[2025-09-02 06:18:15] [Rank 0] step:6461/10000 train_time:483419ms step_avg:74.82ms +[2025-09-02 06:18:15] [Rank 0] step:6461/10000 train_time:483419ms step_avg:74.82ms +[2025-09-02 06:18:17] [Rank 0] step:6481/10000 train_time:485001ms step_avg:74.83ms +[2025-09-02 06:18:17] [Rank 0] step:6481/10000 train_time:485001ms step_avg:74.83ms +[2025-09-02 06:18:18] [Rank 0] step:6501/10000 train_time:486572ms step_avg:74.85ms +[2025-09-02 06:18:18] [Rank 0] step:6501/10000 train_time:486572ms step_avg:74.85ms +[2025-09-02 06:18:20] [Rank 0] step:6521/10000 train_time:488142ms step_avg:74.86ms +[2025-09-02 06:18:20] [Rank 0] step:6521/10000 train_time:488142ms step_avg:74.86ms +[2025-09-02 06:18:22] [Rank 0] step:6541/10000 train_time:489717ms step_avg:74.87ms +[2025-09-02 06:18:22] [Rank 0] step:6541/10000 train_time:489717ms step_avg:74.87ms +[2025-09-02 06:18:23] [Rank 0] step:6561/10000 train_time:491293ms step_avg:74.88ms +[2025-09-02 06:18:23] [Rank 0] step:6561/10000 train_time:491293ms step_avg:74.88ms +[2025-09-02 06:18:25] [Rank 0] step:6581/10000 train_time:492865ms step_avg:74.89ms +[2025-09-02 06:18:25] [Rank 0] step:6581/10000 train_time:492865ms step_avg:74.89ms +[2025-09-02 06:18:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:18:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:18:38] [Rank 0] PRINT: step:6600/10000 val_loss:3.9549 svd_entropy: attn_qk:H=0.7490,top10E=0.27,eRank=169.3,q75/q25=84.59 attn_vo:H=0.7710,top10E=0.17,eRank=242.4,q75/q25=inf mlp_w1:H=0.7819,top10E=0.27,eRank=198.9,q75/q25=15.35 mlp_w2:H=0.8573,top10E=0.14,eRank=304.4,q75/q25=18.17 vo_prod:H=0.6438,top10E=0.26,eRank=106.5,q75/q25=inf train_time:494600ms step_avg:74.94ms +[2025-09-02 06:18:38] [Rank 0] PRINT: step:6600/10000 val_loss:3.9549 svd_entropy: attn_qk:H=0.7490,top10E=0.27,eRank=169.3,q75/q25=84.59 attn_vo:H=0.7710,top10E=0.17,eRank=242.4,q75/q25=inf mlp_w1:H=0.7819,top10E=0.27,eRank=198.9,q75/q25=15.35 mlp_w2:H=0.8573,top10E=0.14,eRank=304.4,q75/q25=18.17 vo_prod:H=0.6438,top10E=0.26,eRank=106.5,q75/q25=inf train_time:494600ms step_avg:74.94ms +[2025-09-02 06:18:38] [Rank 0] step:6601/10000 train_time:494613ms step_avg:74.93ms +[2025-09-02 06:18:38] [Rank 0] step:6601/10000 train_time:494613ms step_avg:74.93ms +[2025-09-02 06:18:40] [Rank 0] step:6621/10000 train_time:496043ms step_avg:74.92ms +[2025-09-02 06:18:40] [Rank 0] step:6621/10000 train_time:496043ms step_avg:74.92ms +[2025-09-02 06:18:41] [Rank 0] step:6641/10000 train_time:497620ms step_avg:74.93ms +[2025-09-02 06:18:41] [Rank 0] step:6641/10000 train_time:497620ms step_avg:74.93ms +[2025-09-02 06:18:43] [Rank 0] step:6661/10000 train_time:499194ms step_avg:74.94ms +[2025-09-02 06:18:43] [Rank 0] step:6661/10000 train_time:499194ms step_avg:74.94ms +[2025-09-02 06:18:44] [Rank 0] step:6681/10000 train_time:500787ms step_avg:74.96ms +[2025-09-02 06:18:44] [Rank 0] step:6681/10000 train_time:500787ms step_avg:74.96ms +[2025-09-02 06:18:46] [Rank 0] step:6701/10000 train_time:502398ms step_avg:74.97ms +[2025-09-02 06:18:46] [Rank 0] step:6701/10000 train_time:502398ms step_avg:74.97ms +[2025-09-02 06:18:47] [Rank 0] step:6721/10000 train_time:504003ms step_avg:74.99ms +[2025-09-02 06:18:47] [Rank 0] step:6721/10000 train_time:504003ms step_avg:74.99ms +[2025-09-02 06:18:49] [Rank 0] step:6741/10000 train_time:505604ms step_avg:75.00ms +[2025-09-02 06:18:49] [Rank 0] step:6741/10000 train_time:505604ms step_avg:75.00ms +[2025-09-02 06:18:51] [Rank 0] step:6761/10000 train_time:507210ms step_avg:75.02ms +[2025-09-02 06:18:51] [Rank 0] step:6761/10000 train_time:507210ms step_avg:75.02ms +[2025-09-02 06:18:52] [Rank 0] step:6781/10000 train_time:508920ms step_avg:75.05ms +[2025-09-02 06:18:52] [Rank 0] step:6781/10000 train_time:508920ms step_avg:75.05ms +[2025-09-02 06:18:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:18:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:19:06] [Rank 0] PRINT: step:6800/10000 val_loss:3.9381 svd_entropy: attn_qk:H=0.7504,top10E=0.27,eRank=170.6,q75/q25=84.81 attn_vo:H=0.7725,top10E=0.17,eRank=244.1,q75/q25=inf mlp_w1:H=0.7840,top10E=0.27,eRank=201.5,q75/q25=15.55 mlp_w2:H=0.8584,top10E=0.14,eRank=306.6,q75/q25=18.14 vo_prod:H=0.6457,top10E=0.25,eRank=107.9,q75/q25=inf train_time:510689ms step_avg:75.10ms +[2025-09-02 06:19:06] [Rank 0] PRINT: step:6800/10000 val_loss:3.9381 svd_entropy: attn_qk:H=0.7504,top10E=0.27,eRank=170.6,q75/q25=84.81 attn_vo:H=0.7725,top10E=0.17,eRank=244.1,q75/q25=inf mlp_w1:H=0.7840,top10E=0.27,eRank=201.5,q75/q25=15.55 mlp_w2:H=0.8584,top10E=0.14,eRank=306.6,q75/q25=18.14 vo_prod:H=0.6457,top10E=0.25,eRank=107.9,q75/q25=inf train_time:510689ms step_avg:75.10ms +[2025-09-02 06:19:06] [Rank 0] step:6801/10000 train_time:510702ms step_avg:75.09ms +[2025-09-02 06:19:06] [Rank 0] step:6801/10000 train_time:510702ms step_avg:75.09ms +[2025-09-02 06:19:08] [Rank 0] step:6821/10000 train_time:512164ms step_avg:75.09ms +[2025-09-02 06:19:08] [Rank 0] step:6821/10000 train_time:512164ms step_avg:75.09ms +[2025-09-02 06:19:09] [Rank 0] step:6841/10000 train_time:513761ms step_avg:75.10ms +[2025-09-02 06:19:09] [Rank 0] step:6841/10000 train_time:513761ms step_avg:75.10ms +[2025-09-02 06:19:11] [Rank 0] step:6861/10000 train_time:515361ms step_avg:75.11ms +[2025-09-02 06:19:11] [Rank 0] step:6861/10000 train_time:515361ms step_avg:75.11ms +[2025-09-02 06:19:12] [Rank 0] step:6881/10000 train_time:516958ms step_avg:75.13ms +[2025-09-02 06:19:12] [Rank 0] step:6881/10000 train_time:516958ms step_avg:75.13ms +[2025-09-02 06:19:14] [Rank 0] step:6901/10000 train_time:518557ms step_avg:75.14ms +[2025-09-02 06:19:14] [Rank 0] step:6901/10000 train_time:518557ms step_avg:75.14ms +[2025-09-02 06:19:16] [Rank 0] step:6921/10000 train_time:520154ms step_avg:75.16ms +[2025-09-02 06:19:16] [Rank 0] step:6921/10000 train_time:520154ms step_avg:75.16ms +[2025-09-02 06:19:17] [Rank 0] step:6941/10000 train_time:521761ms step_avg:75.17ms +[2025-09-02 06:19:17] [Rank 0] step:6941/10000 train_time:521761ms step_avg:75.17ms +[2025-09-02 06:19:19] [Rank 0] step:6961/10000 train_time:523375ms step_avg:75.19ms +[2025-09-02 06:19:19] [Rank 0] step:6961/10000 train_time:523375ms step_avg:75.19ms +[2025-09-02 06:19:20] [Rank 0] step:6981/10000 train_time:524980ms step_avg:75.20ms +[2025-09-02 06:19:20] [Rank 0] step:6981/10000 train_time:524980ms step_avg:75.20ms +[2025-09-02 06:19:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:19:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:19:34] [Rank 0] PRINT: step:7000/10000 val_loss:3.9254 svd_entropy: attn_qk:H=0.7518,top10E=0.27,eRank=171.7,q75/q25=84.98 attn_vo:H=0.7739,top10E=0.17,eRank=245.8,q75/q25=inf mlp_w1:H=0.7860,top10E=0.27,eRank=203.8,q75/q25=15.70 mlp_w2:H=0.8595,top10E=0.14,eRank=308.8,q75/q25=18.08 vo_prod:H=0.6476,top10E=0.25,eRank=109.4,q75/q25=inf train_time:526746ms step_avg:75.25ms +[2025-09-02 06:19:34] [Rank 0] PRINT: step:7000/10000 val_loss:3.9254 svd_entropy: attn_qk:H=0.7518,top10E=0.27,eRank=171.7,q75/q25=84.98 attn_vo:H=0.7739,top10E=0.17,eRank=245.8,q75/q25=inf mlp_w1:H=0.7860,top10E=0.27,eRank=203.8,q75/q25=15.70 mlp_w2:H=0.8595,top10E=0.14,eRank=308.8,q75/q25=18.08 vo_prod:H=0.6476,top10E=0.25,eRank=109.4,q75/q25=inf train_time:526746ms step_avg:75.25ms +[2025-09-02 06:19:34] [Rank 0] step:7001/10000 train_time:526759ms step_avg:75.24ms +[2025-09-02 06:19:34] [Rank 0] step:7001/10000 train_time:526759ms step_avg:75.24ms +[2025-09-02 06:19:35] [Rank 0] step:7021/10000 train_time:528208ms step_avg:75.23ms +[2025-09-02 06:19:35] [Rank 0] step:7021/10000 train_time:528208ms step_avg:75.23ms +[2025-09-02 06:19:37] [Rank 0] step:7041/10000 train_time:529809ms step_avg:75.25ms +[2025-09-02 06:19:37] [Rank 0] step:7041/10000 train_time:529809ms step_avg:75.25ms +[2025-09-02 06:19:39] [Rank 0] step:7061/10000 train_time:531408ms step_avg:75.26ms +[2025-09-02 06:19:39] [Rank 0] step:7061/10000 train_time:531408ms step_avg:75.26ms +[2025-09-02 06:19:40] [Rank 0] step:7081/10000 train_time:533007ms step_avg:75.27ms +[2025-09-02 06:19:40] [Rank 0] step:7081/10000 train_time:533007ms step_avg:75.27ms +[2025-09-02 06:19:42] [Rank 0] step:7101/10000 train_time:534608ms step_avg:75.29ms +[2025-09-02 06:19:42] [Rank 0] step:7101/10000 train_time:534608ms step_avg:75.29ms +[2025-09-02 06:19:43] [Rank 0] step:7121/10000 train_time:536208ms step_avg:75.30ms +[2025-09-02 06:19:43] [Rank 0] step:7121/10000 train_time:536208ms step_avg:75.30ms +[2025-09-02 06:19:45] [Rank 0] step:7141/10000 train_time:537808ms step_avg:75.31ms +[2025-09-02 06:19:45] [Rank 0] step:7141/10000 train_time:537808ms step_avg:75.31ms +[2025-09-02 06:19:47] [Rank 0] step:7161/10000 train_time:539409ms step_avg:75.33ms +[2025-09-02 06:19:47] [Rank 0] step:7161/10000 train_time:539409ms step_avg:75.33ms +[2025-09-02 06:19:48] [Rank 0] step:7181/10000 train_time:541012ms step_avg:75.34ms +[2025-09-02 06:19:48] [Rank 0] step:7181/10000 train_time:541012ms step_avg:75.34ms +[2025-09-02 06:19:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:19:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:20:02] [Rank 0] PRINT: step:7200/10000 val_loss:3.9109 svd_entropy: attn_qk:H=0.7530,top10E=0.27,eRank=172.8,q75/q25=85.12 attn_vo:H=0.7752,top10E=0.17,eRank=247.3,q75/q25=inf mlp_w1:H=0.7877,top10E=0.26,eRank=205.9,q75/q25=15.85 mlp_w2:H=0.8606,top10E=0.14,eRank=310.8,q75/q25=18.11 vo_prod:H=0.6494,top10E=0.25,eRank=110.7,q75/q25=inf train_time:542777ms step_avg:75.39ms +[2025-09-02 06:20:02] [Rank 0] PRINT: step:7200/10000 val_loss:3.9109 svd_entropy: attn_qk:H=0.7530,top10E=0.27,eRank=172.8,q75/q25=85.12 attn_vo:H=0.7752,top10E=0.17,eRank=247.3,q75/q25=inf mlp_w1:H=0.7877,top10E=0.26,eRank=205.9,q75/q25=15.85 mlp_w2:H=0.8606,top10E=0.14,eRank=310.8,q75/q25=18.11 vo_prod:H=0.6494,top10E=0.25,eRank=110.7,q75/q25=inf train_time:542777ms step_avg:75.39ms +[2025-09-02 06:20:02] [Rank 0] step:7201/10000 train_time:542789ms step_avg:75.38ms +[2025-09-02 06:20:02] [Rank 0] step:7201/10000 train_time:542789ms step_avg:75.38ms +[2025-09-02 06:20:03] [Rank 0] step:7221/10000 train_time:544252ms step_avg:75.37ms +[2025-09-02 06:20:03] [Rank 0] step:7221/10000 train_time:544252ms step_avg:75.37ms +[2025-09-02 06:20:05] [Rank 0] step:7241/10000 train_time:545850ms step_avg:75.38ms +[2025-09-02 06:20:05] [Rank 0] step:7241/10000 train_time:545850ms step_avg:75.38ms +[2025-09-02 06:20:07] [Rank 0] step:7261/10000 train_time:547447ms step_avg:75.40ms +[2025-09-02 06:20:07] [Rank 0] step:7261/10000 train_time:547447ms step_avg:75.40ms +[2025-09-02 06:20:08] [Rank 0] step:7281/10000 train_time:549054ms step_avg:75.41ms +[2025-09-02 06:20:08] [Rank 0] step:7281/10000 train_time:549054ms step_avg:75.41ms +[2025-09-02 06:20:10] [Rank 0] step:7301/10000 train_time:550654ms step_avg:75.42ms +[2025-09-02 06:20:10] [Rank 0] step:7301/10000 train_time:550654ms step_avg:75.42ms +[2025-09-02 06:20:11] [Rank 0] step:7321/10000 train_time:552260ms step_avg:75.44ms +[2025-09-02 06:20:11] [Rank 0] step:7321/10000 train_time:552260ms step_avg:75.44ms +[2025-09-02 06:20:13] [Rank 0] step:7341/10000 train_time:553867ms step_avg:75.45ms +[2025-09-02 06:20:13] [Rank 0] step:7341/10000 train_time:553867ms step_avg:75.45ms +[2025-09-02 06:20:15] [Rank 0] step:7361/10000 train_time:555477ms step_avg:75.46ms +[2025-09-02 06:20:15] [Rank 0] step:7361/10000 train_time:555477ms step_avg:75.46ms +[2025-09-02 06:20:16] [Rank 0] step:7381/10000 train_time:557085ms step_avg:75.48ms +[2025-09-02 06:20:16] [Rank 0] step:7381/10000 train_time:557085ms step_avg:75.48ms +[2025-09-02 06:20:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:20:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:20:29] [Rank 0] PRINT: step:7400/10000 val_loss:3.8929 svd_entropy: attn_qk:H=0.7541,top10E=0.26,eRank=173.8,q75/q25=85.40 attn_vo:H=0.7763,top10E=0.16,eRank=248.6,q75/q25=inf mlp_w1:H=0.7892,top10E=0.26,eRank=207.7,q75/q25=16.00 mlp_w2:H=0.8615,top10E=0.14,eRank=312.7,q75/q25=18.08 vo_prod:H=0.6509,top10E=0.25,eRank=111.8,q75/q25=inf train_time:558836ms step_avg:75.52ms +[2025-09-02 06:20:29] [Rank 0] PRINT: step:7400/10000 val_loss:3.8929 svd_entropy: attn_qk:H=0.7541,top10E=0.26,eRank=173.8,q75/q25=85.40 attn_vo:H=0.7763,top10E=0.16,eRank=248.6,q75/q25=inf mlp_w1:H=0.7892,top10E=0.26,eRank=207.7,q75/q25=16.00 mlp_w2:H=0.8615,top10E=0.14,eRank=312.7,q75/q25=18.08 vo_prod:H=0.6509,top10E=0.25,eRank=111.8,q75/q25=inf train_time:558836ms step_avg:75.52ms +[2025-09-02 06:20:30] [Rank 0] step:7401/10000 train_time:558848ms step_avg:75.51ms +[2025-09-02 06:20:30] [Rank 0] step:7401/10000 train_time:558848ms step_avg:75.51ms +[2025-09-02 06:20:31] [Rank 0] step:7421/10000 train_time:560302ms step_avg:75.50ms +[2025-09-02 06:20:31] [Rank 0] step:7421/10000 train_time:560302ms step_avg:75.50ms +[2025-09-02 06:20:33] [Rank 0] step:7441/10000 train_time:561903ms step_avg:75.51ms +[2025-09-02 06:20:33] [Rank 0] step:7441/10000 train_time:561903ms step_avg:75.51ms +[2025-09-02 06:20:34] [Rank 0] step:7461/10000 train_time:563537ms step_avg:75.53ms +[2025-09-02 06:20:34] [Rank 0] step:7461/10000 train_time:563537ms step_avg:75.53ms +[2025-09-02 06:20:36] [Rank 0] step:7481/10000 train_time:565146ms step_avg:75.54ms +[2025-09-02 06:20:36] [Rank 0] step:7481/10000 train_time:565146ms step_avg:75.54ms +[2025-09-02 06:20:38] [Rank 0] step:7501/10000 train_time:566754ms step_avg:75.56ms +[2025-09-02 06:20:38] [Rank 0] step:7501/10000 train_time:566754ms step_avg:75.56ms +[2025-09-02 06:20:39] [Rank 0] step:7521/10000 train_time:568361ms step_avg:75.57ms +[2025-09-02 06:20:39] [Rank 0] step:7521/10000 train_time:568361ms step_avg:75.57ms +[2025-09-02 06:20:41] [Rank 0] step:7541/10000 train_time:569980ms step_avg:75.58ms +[2025-09-02 06:20:41] [Rank 0] step:7541/10000 train_time:569980ms step_avg:75.58ms +[2025-09-02 06:20:42] [Rank 0] step:7561/10000 train_time:571576ms step_avg:75.60ms +[2025-09-02 06:20:42] [Rank 0] step:7561/10000 train_time:571576ms step_avg:75.60ms +[2025-09-02 06:20:44] [Rank 0] step:7581/10000 train_time:573189ms step_avg:75.61ms +[2025-09-02 06:20:44] [Rank 0] step:7581/10000 train_time:573189ms step_avg:75.61ms +[2025-09-02 06:20:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:20:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:20:57] [Rank 0] PRINT: step:7600/10000 val_loss:3.8925 svd_entropy: attn_qk:H=0.7552,top10E=0.26,eRank=174.8,q75/q25=84.64 attn_vo:H=0.7773,top10E=0.16,eRank=249.9,q75/q25=inf mlp_w1:H=0.7906,top10E=0.26,eRank=209.4,q75/q25=16.09 mlp_w2:H=0.8623,top10E=0.13,eRank=314.3,q75/q25=18.04 vo_prod:H=0.6523,top10E=0.24,eRank=113.0,q75/q25=inf train_time:574968ms step_avg:75.65ms +[2025-09-02 06:20:57] [Rank 0] PRINT: step:7600/10000 val_loss:3.8925 svd_entropy: attn_qk:H=0.7552,top10E=0.26,eRank=174.8,q75/q25=84.64 attn_vo:H=0.7773,top10E=0.16,eRank=249.9,q75/q25=inf mlp_w1:H=0.7906,top10E=0.26,eRank=209.4,q75/q25=16.09 mlp_w2:H=0.8623,top10E=0.13,eRank=314.3,q75/q25=18.04 vo_prod:H=0.6523,top10E=0.24,eRank=113.0,q75/q25=inf train_time:574968ms step_avg:75.65ms +[2025-09-02 06:20:57] [Rank 0] step:7601/10000 train_time:574980ms step_avg:75.65ms +[2025-09-02 06:20:57] [Rank 0] step:7601/10000 train_time:574980ms step_avg:75.65ms +[2025-09-02 06:20:59] [Rank 0] step:7621/10000 train_time:576438ms step_avg:75.64ms +[2025-09-02 06:20:59] [Rank 0] step:7621/10000 train_time:576438ms step_avg:75.64ms +[2025-09-02 06:21:01] [Rank 0] step:7641/10000 train_time:578044ms step_avg:75.65ms +[2025-09-02 06:21:01] [Rank 0] step:7641/10000 train_time:578044ms step_avg:75.65ms +[2025-09-02 06:21:02] [Rank 0] step:7661/10000 train_time:579653ms step_avg:75.66ms +[2025-09-02 06:21:02] [Rank 0] step:7661/10000 train_time:579653ms step_avg:75.66ms +[2025-09-02 06:21:04] [Rank 0] step:7681/10000 train_time:581255ms step_avg:75.67ms +[2025-09-02 06:21:04] [Rank 0] step:7681/10000 train_time:581255ms step_avg:75.67ms +[2025-09-02 06:21:05] [Rank 0] step:7701/10000 train_time:582858ms step_avg:75.69ms +[2025-09-02 06:21:05] [Rank 0] step:7701/10000 train_time:582858ms step_avg:75.69ms +[2025-09-02 06:21:07] [Rank 0] step:7721/10000 train_time:584477ms step_avg:75.70ms +[2025-09-02 06:21:07] [Rank 0] step:7721/10000 train_time:584477ms step_avg:75.70ms +[2025-09-02 06:21:09] [Rank 0] step:7741/10000 train_time:586085ms step_avg:75.71ms +[2025-09-02 06:21:09] [Rank 0] step:7741/10000 train_time:586085ms step_avg:75.71ms +[2025-09-02 06:21:10] [Rank 0] step:7761/10000 train_time:587704ms step_avg:75.73ms +[2025-09-02 06:21:10] [Rank 0] step:7761/10000 train_time:587704ms step_avg:75.73ms +[2025-09-02 06:21:12] [Rank 0] step:7781/10000 train_time:589320ms step_avg:75.74ms +[2025-09-02 06:21:12] [Rank 0] step:7781/10000 train_time:589320ms step_avg:75.74ms +[2025-09-02 06:21:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:21:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:21:25] [Rank 0] PRINT: step:7800/10000 val_loss:3.8734 svd_entropy: attn_qk:H=0.7561,top10E=0.26,eRank=175.6,q75/q25=84.09 attn_vo:H=0.7782,top10E=0.16,eRank=251.0,q75/q25=inf mlp_w1:H=0.7919,top10E=0.26,eRank=211.0,q75/q25=16.18 mlp_w2:H=0.8631,top10E=0.13,eRank=315.9,q75/q25=17.98 vo_prod:H=0.6535,top10E=0.24,eRank=113.9,q75/q25=inf train_time:591096ms step_avg:75.78ms +[2025-09-02 06:21:25] [Rank 0] PRINT: step:7800/10000 val_loss:3.8734 svd_entropy: attn_qk:H=0.7561,top10E=0.26,eRank=175.6,q75/q25=84.09 attn_vo:H=0.7782,top10E=0.16,eRank=251.0,q75/q25=inf mlp_w1:H=0.7919,top10E=0.26,eRank=211.0,q75/q25=16.18 mlp_w2:H=0.8631,top10E=0.13,eRank=315.9,q75/q25=17.98 vo_prod:H=0.6535,top10E=0.24,eRank=113.9,q75/q25=inf train_time:591096ms step_avg:75.78ms +[2025-09-02 06:21:25] [Rank 0] step:7801/10000 train_time:591108ms step_avg:75.77ms +[2025-09-02 06:21:25] [Rank 0] step:7801/10000 train_time:591108ms step_avg:75.77ms +[2025-09-02 06:21:27] [Rank 0] step:7821/10000 train_time:592570ms step_avg:75.77ms +[2025-09-02 06:21:27] [Rank 0] step:7821/10000 train_time:592570ms step_avg:75.77ms +[2025-09-02 06:21:28] [Rank 0] step:7841/10000 train_time:594174ms step_avg:75.78ms +[2025-09-02 06:21:28] [Rank 0] step:7841/10000 train_time:594174ms step_avg:75.78ms +[2025-09-02 06:21:30] [Rank 0] step:7861/10000 train_time:595785ms step_avg:75.79ms +[2025-09-02 06:21:30] [Rank 0] step:7861/10000 train_time:595785ms step_avg:75.79ms +[2025-09-02 06:21:32] [Rank 0] step:7881/10000 train_time:597396ms step_avg:75.80ms +[2025-09-02 06:21:32] [Rank 0] step:7881/10000 train_time:597396ms step_avg:75.80ms +[2025-09-02 06:21:33] [Rank 0] step:7901/10000 train_time:599004ms step_avg:75.81ms +[2025-09-02 06:21:33] [Rank 0] step:7901/10000 train_time:599004ms step_avg:75.81ms +[2025-09-02 06:21:35] [Rank 0] step:7921/10000 train_time:600615ms step_avg:75.83ms +[2025-09-02 06:21:35] [Rank 0] step:7921/10000 train_time:600615ms step_avg:75.83ms +[2025-09-02 06:21:37] [Rank 0] step:7941/10000 train_time:602227ms step_avg:75.84ms +[2025-09-02 06:21:37] [Rank 0] step:7941/10000 train_time:602227ms step_avg:75.84ms +[2025-09-02 06:21:38] [Rank 0] step:7961/10000 train_time:603839ms step_avg:75.85ms +[2025-09-02 06:21:38] [Rank 0] step:7961/10000 train_time:603839ms step_avg:75.85ms +[2025-09-02 06:21:40] [Rank 0] step:7981/10000 train_time:605445ms step_avg:75.86ms +[2025-09-02 06:21:40] [Rank 0] step:7981/10000 train_time:605445ms step_avg:75.86ms +[2025-09-02 06:21:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:21:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:21:53] [Rank 0] PRINT: step:8000/10000 val_loss:3.8584 svd_entropy: attn_qk:H=0.7570,top10E=0.26,eRank=176.4,q75/q25=84.09 attn_vo:H=0.7791,top10E=0.16,eRank=252.1,q75/q25=inf mlp_w1:H=0.7930,top10E=0.26,eRank=212.4,q75/q25=16.23 mlp_w2:H=0.8638,top10E=0.13,eRank=317.4,q75/q25=17.91 vo_prod:H=0.6548,top10E=0.24,eRank=115.0,q75/q25=inf train_time:607219ms step_avg:75.90ms +[2025-09-02 06:21:53] [Rank 0] PRINT: step:8000/10000 val_loss:3.8584 svd_entropy: attn_qk:H=0.7570,top10E=0.26,eRank=176.4,q75/q25=84.09 attn_vo:H=0.7791,top10E=0.16,eRank=252.1,q75/q25=inf mlp_w1:H=0.7930,top10E=0.26,eRank=212.4,q75/q25=16.23 mlp_w2:H=0.8638,top10E=0.13,eRank=317.4,q75/q25=17.91 vo_prod:H=0.6548,top10E=0.24,eRank=115.0,q75/q25=inf train_time:607219ms step_avg:75.90ms +[2025-09-02 06:21:53] [Rank 0] step:8001/10000 train_time:607231ms step_avg:75.89ms +[2025-09-02 06:21:53] [Rank 0] step:8001/10000 train_time:607231ms step_avg:75.89ms +[2025-09-02 06:21:55] [Rank 0] step:8021/10000 train_time:608690ms step_avg:75.89ms +[2025-09-02 06:21:55] [Rank 0] step:8021/10000 train_time:608690ms step_avg:75.89ms +[2025-09-02 06:21:56] [Rank 0] step:8041/10000 train_time:610306ms step_avg:75.90ms +[2025-09-02 06:21:56] [Rank 0] step:8041/10000 train_time:610306ms step_avg:75.90ms +[2025-09-02 06:21:58] [Rank 0] step:8061/10000 train_time:611912ms step_avg:75.91ms +[2025-09-02 06:21:58] [Rank 0] step:8061/10000 train_time:611912ms step_avg:75.91ms +[2025-09-02 06:21:59] [Rank 0] step:8081/10000 train_time:613509ms step_avg:75.92ms +[2025-09-02 06:21:59] [Rank 0] step:8081/10000 train_time:613509ms step_avg:75.92ms +[2025-09-02 06:22:01] [Rank 0] step:8101/10000 train_time:615123ms step_avg:75.93ms +[2025-09-02 06:22:01] [Rank 0] step:8101/10000 train_time:615123ms step_avg:75.93ms +[2025-09-02 06:22:03] [Rank 0] step:8121/10000 train_time:616726ms step_avg:75.94ms +[2025-09-02 06:22:03] [Rank 0] step:8121/10000 train_time:616726ms step_avg:75.94ms +[2025-09-02 06:22:04] [Rank 0] step:8141/10000 train_time:618433ms step_avg:75.97ms +[2025-09-02 06:22:04] [Rank 0] step:8141/10000 train_time:618433ms step_avg:75.97ms +[2025-09-02 06:22:06] [Rank 0] step:8161/10000 train_time:620052ms step_avg:75.98ms +[2025-09-02 06:22:06] [Rank 0] step:8161/10000 train_time:620052ms step_avg:75.98ms +[2025-09-02 06:22:08] [Rank 0] step:8181/10000 train_time:621690ms step_avg:75.99ms +[2025-09-02 06:22:08] [Rank 0] step:8181/10000 train_time:621690ms step_avg:75.99ms +[2025-09-02 06:22:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:22:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:22:21] [Rank 0] PRINT: step:8200/10000 val_loss:3.8510 svd_entropy: attn_qk:H=0.7578,top10E=0.26,eRank=177.1,q75/q25=84.17 attn_vo:H=0.7798,top10E=0.16,eRank=253.0,q75/q25=inf mlp_w1:H=0.7941,top10E=0.26,eRank=213.7,q75/q25=16.30 mlp_w2:H=0.8645,top10E=0.13,eRank=318.8,q75/q25=17.84 vo_prod:H=0.6559,top10E=0.24,eRank=115.8,q75/q25=inf train_time:623514ms step_avg:76.04ms +[2025-09-02 06:22:21] [Rank 0] PRINT: step:8200/10000 val_loss:3.8510 svd_entropy: attn_qk:H=0.7578,top10E=0.26,eRank=177.1,q75/q25=84.17 attn_vo:H=0.7798,top10E=0.16,eRank=253.0,q75/q25=inf mlp_w1:H=0.7941,top10E=0.26,eRank=213.7,q75/q25=16.30 mlp_w2:H=0.8645,top10E=0.13,eRank=318.8,q75/q25=17.84 vo_prod:H=0.6559,top10E=0.24,eRank=115.8,q75/q25=inf train_time:623514ms step_avg:76.04ms +[2025-09-02 06:22:21] [Rank 0] step:8201/10000 train_time:623526ms step_avg:76.03ms +[2025-09-02 06:22:21] [Rank 0] step:8201/10000 train_time:623526ms step_avg:76.03ms +[2025-09-02 06:22:23] [Rank 0] step:8221/10000 train_time:625012ms step_avg:76.03ms +[2025-09-02 06:22:23] [Rank 0] step:8221/10000 train_time:625012ms step_avg:76.03ms +[2025-09-02 06:22:24] [Rank 0] step:8241/10000 train_time:626655ms step_avg:76.04ms +[2025-09-02 06:22:24] [Rank 0] step:8241/10000 train_time:626655ms step_avg:76.04ms +[2025-09-02 06:22:26] [Rank 0] step:8261/10000 train_time:628290ms step_avg:76.05ms +[2025-09-02 06:22:26] [Rank 0] step:8261/10000 train_time:628290ms step_avg:76.05ms +[2025-09-02 06:22:28] [Rank 0] step:8281/10000 train_time:629929ms step_avg:76.07ms +[2025-09-02 06:22:28] [Rank 0] step:8281/10000 train_time:629929ms step_avg:76.07ms +[2025-09-02 06:22:29] [Rank 0] step:8301/10000 train_time:631564ms step_avg:76.08ms +[2025-09-02 06:22:29] [Rank 0] step:8301/10000 train_time:631564ms step_avg:76.08ms +[2025-09-02 06:22:31] [Rank 0] step:8321/10000 train_time:633193ms step_avg:76.10ms +[2025-09-02 06:22:31] [Rank 0] step:8321/10000 train_time:633193ms step_avg:76.10ms +[2025-09-02 06:22:33] [Rank 0] step:8341/10000 train_time:634831ms step_avg:76.11ms +[2025-09-02 06:22:33] [Rank 0] step:8341/10000 train_time:634831ms step_avg:76.11ms +[2025-09-02 06:22:34] [Rank 0] step:8361/10000 train_time:636468ms step_avg:76.12ms +[2025-09-02 06:22:34] [Rank 0] step:8361/10000 train_time:636468ms step_avg:76.12ms +[2025-09-02 06:22:36] [Rank 0] step:8381/10000 train_time:638101ms step_avg:76.14ms +[2025-09-02 06:22:36] [Rank 0] step:8381/10000 train_time:638101ms step_avg:76.14ms +[2025-09-02 06:22:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:22:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:22:49] [Rank 0] PRINT: step:8400/10000 val_loss:3.8391 svd_entropy: attn_qk:H=0.7584,top10E=0.26,eRank=177.7,q75/q25=84.29 attn_vo:H=0.7805,top10E=0.16,eRank=253.9,q75/q25=inf mlp_w1:H=0.7950,top10E=0.25,eRank=214.9,q75/q25=16.30 mlp_w2:H=0.8651,top10E=0.13,eRank=320.1,q75/q25=17.78 vo_prod:H=0.6569,top10E=0.24,eRank=116.6,q75/q25=inf train_time:639897ms step_avg:76.18ms +[2025-09-02 06:22:49] [Rank 0] PRINT: step:8400/10000 val_loss:3.8391 svd_entropy: attn_qk:H=0.7584,top10E=0.26,eRank=177.7,q75/q25=84.29 attn_vo:H=0.7805,top10E=0.16,eRank=253.9,q75/q25=inf mlp_w1:H=0.7950,top10E=0.25,eRank=214.9,q75/q25=16.30 mlp_w2:H=0.8651,top10E=0.13,eRank=320.1,q75/q25=17.78 vo_prod:H=0.6569,top10E=0.24,eRank=116.6,q75/q25=inf train_time:639897ms step_avg:76.18ms +[2025-09-02 06:22:49] [Rank 0] step:8401/10000 train_time:639910ms step_avg:76.17ms +[2025-09-02 06:22:49] [Rank 0] step:8401/10000 train_time:639910ms step_avg:76.17ms +[2025-09-02 06:22:51] [Rank 0] step:8421/10000 train_time:641379ms step_avg:76.16ms +[2025-09-02 06:22:51] [Rank 0] step:8421/10000 train_time:641379ms step_avg:76.16ms +[2025-09-02 06:22:52] [Rank 0] step:8441/10000 train_time:643014ms step_avg:76.18ms +[2025-09-02 06:22:52] [Rank 0] step:8441/10000 train_time:643014ms step_avg:76.18ms +[2025-09-02 06:22:54] [Rank 0] step:8461/10000 train_time:644642ms step_avg:76.19ms +[2025-09-02 06:22:54] [Rank 0] step:8461/10000 train_time:644642ms step_avg:76.19ms +[2025-09-02 06:22:56] [Rank 0] step:8481/10000 train_time:646281ms step_avg:76.20ms +[2025-09-02 06:22:56] [Rank 0] step:8481/10000 train_time:646281ms step_avg:76.20ms +[2025-09-02 06:22:57] [Rank 0] step:8501/10000 train_time:647939ms step_avg:76.22ms +[2025-09-02 06:22:57] [Rank 0] step:8501/10000 train_time:647939ms step_avg:76.22ms +[2025-09-02 06:22:59] [Rank 0] step:8521/10000 train_time:649582ms step_avg:76.23ms +[2025-09-02 06:22:59] [Rank 0] step:8521/10000 train_time:649582ms step_avg:76.23ms +[2025-09-02 06:23:01] [Rank 0] step:8541/10000 train_time:651231ms step_avg:76.25ms +[2025-09-02 06:23:01] [Rank 0] step:8541/10000 train_time:651231ms step_avg:76.25ms +[2025-09-02 06:23:02] [Rank 0] step:8561/10000 train_time:652869ms step_avg:76.26ms +[2025-09-02 06:23:02] [Rank 0] step:8561/10000 train_time:652869ms step_avg:76.26ms +[2025-09-02 06:23:04] [Rank 0] step:8581/10000 train_time:654506ms step_avg:76.27ms +[2025-09-02 06:23:04] [Rank 0] step:8581/10000 train_time:654506ms step_avg:76.27ms +[2025-09-02 06:23:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:23:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:23:17] [Rank 0] PRINT: step:8600/10000 val_loss:3.8299 svd_entropy: attn_qk:H=0.7590,top10E=0.26,eRank=178.3,q75/q25=84.06 attn_vo:H=0.7811,top10E=0.16,eRank=254.6,q75/q25=inf mlp_w1:H=0.7958,top10E=0.25,eRank=215.9,q75/q25=16.31 mlp_w2:H=0.8657,top10E=0.13,eRank=321.3,q75/q25=17.73 vo_prod:H=0.6578,top10E=0.24,eRank=117.3,q75/q25=inf train_time:656293ms step_avg:76.31ms +[2025-09-02 06:23:17] [Rank 0] PRINT: step:8600/10000 val_loss:3.8299 svd_entropy: attn_qk:H=0.7590,top10E=0.26,eRank=178.3,q75/q25=84.06 attn_vo:H=0.7811,top10E=0.16,eRank=254.6,q75/q25=inf mlp_w1:H=0.7958,top10E=0.25,eRank=215.9,q75/q25=16.31 mlp_w2:H=0.8657,top10E=0.13,eRank=321.3,q75/q25=17.73 vo_prod:H=0.6578,top10E=0.24,eRank=117.3,q75/q25=inf train_time:656293ms step_avg:76.31ms +[2025-09-02 06:23:17] [Rank 0] step:8601/10000 train_time:656305ms step_avg:76.31ms +[2025-09-02 06:23:17] [Rank 0] step:8601/10000 train_time:656305ms step_avg:76.31ms +[2025-09-02 06:23:19] [Rank 0] step:8621/10000 train_time:657807ms step_avg:76.30ms +[2025-09-02 06:23:19] [Rank 0] step:8621/10000 train_time:657807ms step_avg:76.30ms +[2025-09-02 06:23:21] [Rank 0] step:8641/10000 train_time:659441ms step_avg:76.32ms +[2025-09-02 06:23:21] [Rank 0] step:8641/10000 train_time:659441ms step_avg:76.32ms +[2025-09-02 06:23:22] [Rank 0] step:8661/10000 train_time:661080ms step_avg:76.33ms +[2025-09-02 06:23:22] [Rank 0] step:8661/10000 train_time:661080ms step_avg:76.33ms +[2025-09-02 06:23:24] [Rank 0] step:8681/10000 train_time:662715ms step_avg:76.34ms +[2025-09-02 06:23:24] [Rank 0] step:8681/10000 train_time:662715ms step_avg:76.34ms +[2025-09-02 06:23:25] [Rank 0] step:8701/10000 train_time:664346ms step_avg:76.35ms +[2025-09-02 06:23:25] [Rank 0] step:8701/10000 train_time:664346ms step_avg:76.35ms +[2025-09-02 06:23:27] [Rank 0] step:8721/10000 train_time:665988ms step_avg:76.37ms +[2025-09-02 06:23:27] [Rank 0] step:8721/10000 train_time:665988ms step_avg:76.37ms +[2025-09-02 06:23:29] [Rank 0] step:8741/10000 train_time:667614ms step_avg:76.38ms +[2025-09-02 06:23:29] [Rank 0] step:8741/10000 train_time:667614ms step_avg:76.38ms +[2025-09-02 06:23:30] [Rank 0] step:8761/10000 train_time:669244ms step_avg:76.39ms +[2025-09-02 06:23:30] [Rank 0] step:8761/10000 train_time:669244ms step_avg:76.39ms +[2025-09-02 06:23:32] [Rank 0] step:8781/10000 train_time:670889ms step_avg:76.40ms +[2025-09-02 06:23:32] [Rank 0] step:8781/10000 train_time:670889ms step_avg:76.40ms +[2025-09-02 06:23:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:23:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:23:45] [Rank 0] PRINT: step:8800/10000 val_loss:3.8202 svd_entropy: attn_qk:H=0.7596,top10E=0.26,eRank=178.8,q75/q25=84.38 attn_vo:H=0.7816,top10E=0.16,eRank=255.3,q75/q25=inf mlp_w1:H=0.7965,top10E=0.25,eRank=216.8,q75/q25=16.30 mlp_w2:H=0.8662,top10E=0.13,eRank=322.4,q75/q25=17.67 vo_prod:H=0.6586,top10E=0.24,eRank=117.9,q75/q25=inf train_time:672690ms step_avg:76.44ms +[2025-09-02 06:23:45] [Rank 0] PRINT: step:8800/10000 val_loss:3.8202 svd_entropy: attn_qk:H=0.7596,top10E=0.26,eRank=178.8,q75/q25=84.38 attn_vo:H=0.7816,top10E=0.16,eRank=255.3,q75/q25=inf mlp_w1:H=0.7965,top10E=0.25,eRank=216.8,q75/q25=16.30 mlp_w2:H=0.8662,top10E=0.13,eRank=322.4,q75/q25=17.67 vo_prod:H=0.6586,top10E=0.24,eRank=117.9,q75/q25=inf train_time:672690ms step_avg:76.44ms +[2025-09-02 06:23:45] [Rank 0] step:8801/10000 train_time:672703ms step_avg:76.43ms +[2025-09-02 06:23:45] [Rank 0] step:8801/10000 train_time:672703ms step_avg:76.43ms +[2025-09-02 06:23:47] [Rank 0] step:8821/10000 train_time:674179ms step_avg:76.43ms +[2025-09-02 06:23:47] [Rank 0] step:8821/10000 train_time:674179ms step_avg:76.43ms +[2025-09-02 06:23:49] [Rank 0] step:8841/10000 train_time:675832ms step_avg:76.44ms +[2025-09-02 06:23:49] [Rank 0] step:8841/10000 train_time:675832ms step_avg:76.44ms +[2025-09-02 06:23:50] [Rank 0] step:8861/10000 train_time:677468ms step_avg:76.46ms +[2025-09-02 06:23:50] [Rank 0] step:8861/10000 train_time:677468ms step_avg:76.46ms +[2025-09-02 06:23:52] [Rank 0] step:8881/10000 train_time:679107ms step_avg:76.47ms +[2025-09-02 06:23:52] [Rank 0] step:8881/10000 train_time:679107ms step_avg:76.47ms +[2025-09-02 06:23:54] [Rank 0] step:8901/10000 train_time:680750ms step_avg:76.48ms +[2025-09-02 06:23:54] [Rank 0] step:8901/10000 train_time:680750ms step_avg:76.48ms +[2025-09-02 06:23:55] [Rank 0] step:8921/10000 train_time:682396ms step_avg:76.49ms +[2025-09-02 06:23:55] [Rank 0] step:8921/10000 train_time:682396ms step_avg:76.49ms +[2025-09-02 06:23:57] [Rank 0] step:8941/10000 train_time:684047ms step_avg:76.51ms +[2025-09-02 06:23:57] [Rank 0] step:8941/10000 train_time:684047ms step_avg:76.51ms +[2025-09-02 06:23:58] [Rank 0] step:8961/10000 train_time:685680ms step_avg:76.52ms +[2025-09-02 06:23:58] [Rank 0] step:8961/10000 train_time:685680ms step_avg:76.52ms +[2025-09-02 06:24:00] [Rank 0] step:8981/10000 train_time:687314ms step_avg:76.53ms +[2025-09-02 06:24:00] [Rank 0] step:8981/10000 train_time:687314ms step_avg:76.53ms +[2025-09-02 06:24:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:24:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:24:13] [Rank 0] PRINT: step:9000/10000 val_loss:3.8119 svd_entropy: attn_qk:H=0.7600,top10E=0.26,eRank=179.2,q75/q25=84.10 attn_vo:H=0.7821,top10E=0.16,eRank=255.9,q75/q25=inf mlp_w1:H=0.7971,top10E=0.25,eRank=217.5,q75/q25=16.28 mlp_w2:H=0.8666,top10E=0.13,eRank=323.3,q75/q25=17.57 vo_prod:H=0.6593,top10E=0.23,eRank=118.5,q75/q25=inf train_time:689113ms step_avg:76.57ms +[2025-09-02 06:24:13] [Rank 0] PRINT: step:9000/10000 val_loss:3.8119 svd_entropy: attn_qk:H=0.7600,top10E=0.26,eRank=179.2,q75/q25=84.10 attn_vo:H=0.7821,top10E=0.16,eRank=255.9,q75/q25=inf mlp_w1:H=0.7971,top10E=0.25,eRank=217.5,q75/q25=16.28 mlp_w2:H=0.8666,top10E=0.13,eRank=323.3,q75/q25=17.57 vo_prod:H=0.6593,top10E=0.23,eRank=118.5,q75/q25=inf train_time:689113ms step_avg:76.57ms +[2025-09-02 06:24:13] [Rank 0] step:9001/10000 train_time:689126ms step_avg:76.56ms +[2025-09-02 06:24:13] [Rank 0] step:9001/10000 train_time:689126ms step_avg:76.56ms +[2025-09-02 06:24:15] [Rank 0] step:9021/10000 train_time:690604ms step_avg:76.56ms +[2025-09-02 06:24:15] [Rank 0] step:9021/10000 train_time:690604ms step_avg:76.56ms +[2025-09-02 06:24:17] [Rank 0] step:9041/10000 train_time:692234ms step_avg:76.57ms +[2025-09-02 06:24:17] [Rank 0] step:9041/10000 train_time:692234ms step_avg:76.57ms +[2025-09-02 06:24:18] [Rank 0] step:9061/10000 train_time:693882ms step_avg:76.58ms +[2025-09-02 06:24:18] [Rank 0] step:9061/10000 train_time:693882ms step_avg:76.58ms +[2025-09-02 06:24:20] [Rank 0] step:9081/10000 train_time:695524ms step_avg:76.59ms +[2025-09-02 06:24:20] [Rank 0] step:9081/10000 train_time:695524ms step_avg:76.59ms +[2025-09-02 06:24:22] [Rank 0] step:9101/10000 train_time:697180ms step_avg:76.60ms +[2025-09-02 06:24:22] [Rank 0] step:9101/10000 train_time:697180ms step_avg:76.60ms +[2025-09-02 06:24:23] [Rank 0] step:9121/10000 train_time:698820ms step_avg:76.62ms +[2025-09-02 06:24:23] [Rank 0] step:9121/10000 train_time:698820ms step_avg:76.62ms +[2025-09-02 06:24:25] [Rank 0] step:9141/10000 train_time:700447ms step_avg:76.63ms +[2025-09-02 06:24:25] [Rank 0] step:9141/10000 train_time:700447ms step_avg:76.63ms +[2025-09-02 06:24:27] [Rank 0] step:9161/10000 train_time:702073ms step_avg:76.64ms +[2025-09-02 06:24:27] [Rank 0] step:9161/10000 train_time:702073ms step_avg:76.64ms +[2025-09-02 06:24:28] [Rank 0] step:9181/10000 train_time:703740ms step_avg:76.65ms +[2025-09-02 06:24:28] [Rank 0] step:9181/10000 train_time:703740ms step_avg:76.65ms +[2025-09-02 06:24:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:24:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:24:42] [Rank 0] PRINT: step:9200/10000 val_loss:3.8041 svd_entropy: attn_qk:H=0.7604,top10E=0.26,eRank=179.6,q75/q25=84.24 attn_vo:H=0.7825,top10E=0.16,eRank=256.4,q75/q25=inf mlp_w1:H=0.7976,top10E=0.25,eRank=218.2,q75/q25=16.28 mlp_w2:H=0.8671,top10E=0.13,eRank=324.2,q75/q25=17.54 vo_prod:H=0.6600,top10E=0.23,eRank=119.1,q75/q25=inf train_time:705542ms step_avg:76.69ms +[2025-09-02 06:24:42] [Rank 0] PRINT: step:9200/10000 val_loss:3.8041 svd_entropy: attn_qk:H=0.7604,top10E=0.26,eRank=179.6,q75/q25=84.24 attn_vo:H=0.7825,top10E=0.16,eRank=256.4,q75/q25=inf mlp_w1:H=0.7976,top10E=0.25,eRank=218.2,q75/q25=16.28 mlp_w2:H=0.8671,top10E=0.13,eRank=324.2,q75/q25=17.54 vo_prod:H=0.6600,top10E=0.23,eRank=119.1,q75/q25=inf train_time:705542ms step_avg:76.69ms +[2025-09-02 06:24:42] [Rank 0] step:9201/10000 train_time:705554ms step_avg:76.68ms +[2025-09-02 06:24:42] [Rank 0] step:9201/10000 train_time:705554ms step_avg:76.68ms +[2025-09-02 06:24:43] [Rank 0] step:9221/10000 train_time:707055ms step_avg:76.68ms +[2025-09-02 06:24:43] [Rank 0] step:9221/10000 train_time:707055ms step_avg:76.68ms +[2025-09-02 06:24:45] [Rank 0] step:9241/10000 train_time:708702ms step_avg:76.69ms +[2025-09-02 06:24:45] [Rank 0] step:9241/10000 train_time:708702ms step_avg:76.69ms +[2025-09-02 06:24:47] [Rank 0] step:9261/10000 train_time:710349ms step_avg:76.70ms +[2025-09-02 06:24:47] [Rank 0] step:9261/10000 train_time:710349ms step_avg:76.70ms +[2025-09-02 06:24:48] [Rank 0] step:9281/10000 train_time:711979ms step_avg:76.71ms +[2025-09-02 06:24:48] [Rank 0] step:9281/10000 train_time:711979ms step_avg:76.71ms +[2025-09-02 06:24:50] [Rank 0] step:9301/10000 train_time:713616ms step_avg:76.72ms +[2025-09-02 06:24:50] [Rank 0] step:9301/10000 train_time:713616ms step_avg:76.72ms +[2025-09-02 06:24:51] [Rank 0] step:9321/10000 train_time:715256ms step_avg:76.74ms +[2025-09-02 06:24:51] [Rank 0] step:9321/10000 train_time:715256ms step_avg:76.74ms +[2025-09-02 06:24:53] [Rank 0] step:9341/10000 train_time:716896ms step_avg:76.75ms +[2025-09-02 06:24:53] [Rank 0] step:9341/10000 train_time:716896ms step_avg:76.75ms +[2025-09-02 06:24:55] [Rank 0] step:9361/10000 train_time:718544ms step_avg:76.76ms +[2025-09-02 06:24:55] [Rank 0] step:9361/10000 train_time:718544ms step_avg:76.76ms +[2025-09-02 06:24:56] [Rank 0] step:9381/10000 train_time:720194ms step_avg:76.77ms +[2025-09-02 06:24:56] [Rank 0] step:9381/10000 train_time:720194ms step_avg:76.77ms +[2025-09-02 06:24:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:24:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:25:10] [Rank 0] PRINT: step:9400/10000 val_loss:3.7976 svd_entropy: attn_qk:H=0.7607,top10E=0.26,eRank=179.9,q75/q25=83.95 attn_vo:H=0.7828,top10E=0.16,eRank=256.9,q75/q25=inf mlp_w1:H=0.7980,top10E=0.25,eRank=218.7,q75/q25=16.24 mlp_w2:H=0.8674,top10E=0.13,eRank=324.9,q75/q25=17.48 vo_prod:H=0.6605,top10E=0.23,eRank=119.5,q75/q25=inf train_time:722004ms step_avg:76.81ms +[2025-09-02 06:25:10] [Rank 0] PRINT: step:9400/10000 val_loss:3.7976 svd_entropy: attn_qk:H=0.7607,top10E=0.26,eRank=179.9,q75/q25=83.95 attn_vo:H=0.7828,top10E=0.16,eRank=256.9,q75/q25=inf mlp_w1:H=0.7980,top10E=0.25,eRank=218.7,q75/q25=16.24 mlp_w2:H=0.8674,top10E=0.13,eRank=324.9,q75/q25=17.48 vo_prod:H=0.6605,top10E=0.23,eRank=119.5,q75/q25=inf train_time:722004ms step_avg:76.81ms +[2025-09-02 06:25:10] [Rank 0] step:9401/10000 train_time:722015ms step_avg:76.80ms +[2025-09-02 06:25:10] [Rank 0] step:9401/10000 train_time:722015ms step_avg:76.80ms +[2025-09-02 06:25:11] [Rank 0] step:9421/10000 train_time:723494ms step_avg:76.80ms +[2025-09-02 06:25:11] [Rank 0] step:9421/10000 train_time:723494ms step_avg:76.80ms +[2025-09-02 06:25:13] [Rank 0] step:9441/10000 train_time:725132ms step_avg:76.81ms +[2025-09-02 06:25:13] [Rank 0] step:9441/10000 train_time:725132ms step_avg:76.81ms +[2025-09-02 06:25:15] [Rank 0] step:9461/10000 train_time:726778ms step_avg:76.82ms +[2025-09-02 06:25:15] [Rank 0] step:9461/10000 train_time:726778ms step_avg:76.82ms +[2025-09-02 06:25:16] [Rank 0] step:9481/10000 train_time:728418ms step_avg:76.83ms +[2025-09-02 06:25:16] [Rank 0] step:9481/10000 train_time:728418ms step_avg:76.83ms +[2025-09-02 06:25:18] [Rank 0] step:9501/10000 train_time:730072ms step_avg:76.84ms +[2025-09-02 06:25:18] [Rank 0] step:9501/10000 train_time:730072ms step_avg:76.84ms +[2025-09-02 06:25:20] [Rank 0] step:9521/10000 train_time:731700ms step_avg:76.85ms +[2025-09-02 06:25:20] [Rank 0] step:9521/10000 train_time:731700ms step_avg:76.85ms +[2025-09-02 06:25:21] [Rank 0] step:9541/10000 train_time:733338ms step_avg:76.86ms +[2025-09-02 06:25:21] [Rank 0] step:9541/10000 train_time:733338ms step_avg:76.86ms +[2025-09-02 06:25:23] [Rank 0] step:9561/10000 train_time:734971ms step_avg:76.87ms +[2025-09-02 06:25:23] [Rank 0] step:9561/10000 train_time:734971ms step_avg:76.87ms +[2025-09-02 06:25:25] [Rank 0] step:9581/10000 train_time:736608ms step_avg:76.88ms +[2025-09-02 06:25:25] [Rank 0] step:9581/10000 train_time:736608ms step_avg:76.88ms +[2025-09-02 06:25:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:25:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:25:38] [Rank 0] PRINT: step:9600/10000 val_loss:3.7921 svd_entropy: attn_qk:H=0.7610,top10E=0.26,eRank=180.1,q75/q25=84.13 attn_vo:H=0.7831,top10E=0.16,eRank=257.3,q75/q25=inf mlp_w1:H=0.7984,top10E=0.25,eRank=219.2,q75/q25=16.25 mlp_w2:H=0.8677,top10E=0.13,eRank=325.6,q75/q25=17.43 vo_prod:H=0.6609,top10E=0.23,eRank=119.9,q75/q25=inf train_time:738420ms step_avg:76.92ms +[2025-09-02 06:25:38] [Rank 0] PRINT: step:9600/10000 val_loss:3.7921 svd_entropy: attn_qk:H=0.7610,top10E=0.26,eRank=180.1,q75/q25=84.13 attn_vo:H=0.7831,top10E=0.16,eRank=257.3,q75/q25=inf mlp_w1:H=0.7984,top10E=0.25,eRank=219.2,q75/q25=16.25 mlp_w2:H=0.8677,top10E=0.13,eRank=325.6,q75/q25=17.43 vo_prod:H=0.6609,top10E=0.23,eRank=119.9,q75/q25=inf train_time:738420ms step_avg:76.92ms +[2025-09-02 06:25:38] [Rank 0] step:9601/10000 train_time:738431ms step_avg:76.91ms +[2025-09-02 06:25:38] [Rank 0] step:9601/10000 train_time:738431ms step_avg:76.91ms +[2025-09-02 06:25:40] [Rank 0] step:9621/10000 train_time:739932ms step_avg:76.91ms +[2025-09-02 06:25:40] [Rank 0] step:9621/10000 train_time:739932ms step_avg:76.91ms +[2025-09-02 06:25:41] [Rank 0] step:9641/10000 train_time:741574ms step_avg:76.92ms +[2025-09-02 06:25:41] [Rank 0] step:9641/10000 train_time:741574ms step_avg:76.92ms +[2025-09-02 06:25:43] [Rank 0] step:9661/10000 train_time:743241ms step_avg:76.93ms +[2025-09-02 06:25:43] [Rank 0] step:9661/10000 train_time:743241ms step_avg:76.93ms +[2025-09-02 06:25:45] [Rank 0] step:9681/10000 train_time:744899ms step_avg:76.94ms +[2025-09-02 06:25:45] [Rank 0] step:9681/10000 train_time:744899ms step_avg:76.94ms +[2025-09-02 06:25:46] [Rank 0] step:9701/10000 train_time:746578ms step_avg:76.96ms +[2025-09-02 06:25:46] [Rank 0] step:9701/10000 train_time:746578ms step_avg:76.96ms +[2025-09-02 06:25:48] [Rank 0] step:9721/10000 train_time:748236ms step_avg:76.97ms +[2025-09-02 06:25:48] [Rank 0] step:9721/10000 train_time:748236ms step_avg:76.97ms +[2025-09-02 06:25:50] [Rank 0] step:9741/10000 train_time:749922ms step_avg:76.99ms +[2025-09-02 06:25:50] [Rank 0] step:9741/10000 train_time:749922ms step_avg:76.99ms +[2025-09-02 06:25:51] [Rank 0] step:9761/10000 train_time:751585ms step_avg:77.00ms +[2025-09-02 06:25:51] [Rank 0] step:9761/10000 train_time:751585ms step_avg:77.00ms +[2025-09-02 06:25:53] [Rank 0] step:9781/10000 train_time:753263ms step_avg:77.01ms +[2025-09-02 06:25:53] [Rank 0] step:9781/10000 train_time:753263ms step_avg:77.01ms +[2025-09-02 06:25:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:25:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:26:06] [Rank 0] PRINT: step:9800/10000 val_loss:3.7863 svd_entropy: attn_qk:H=0.7612,top10E=0.26,eRank=180.3,q75/q25=84.20 attn_vo:H=0.7833,top10E=0.16,eRank=257.5,q75/q25=inf mlp_w1:H=0.7987,top10E=0.25,eRank=219.5,q75/q25=16.22 mlp_w2:H=0.8679,top10E=0.13,eRank=326.1,q75/q25=17.41 vo_prod:H=0.6613,top10E=0.23,eRank=120.2,q75/q25=inf train_time:755113ms step_avg:77.05ms +[2025-09-02 06:26:06] [Rank 0] PRINT: step:9800/10000 val_loss:3.7863 svd_entropy: attn_qk:H=0.7612,top10E=0.26,eRank=180.3,q75/q25=84.20 attn_vo:H=0.7833,top10E=0.16,eRank=257.5,q75/q25=inf mlp_w1:H=0.7987,top10E=0.25,eRank=219.5,q75/q25=16.22 mlp_w2:H=0.8679,top10E=0.13,eRank=326.1,q75/q25=17.41 vo_prod:H=0.6613,top10E=0.23,eRank=120.2,q75/q25=inf train_time:755113ms step_avg:77.05ms +[2025-09-02 06:26:07] [Rank 0] step:9801/10000 train_time:755124ms step_avg:77.05ms +[2025-09-02 06:26:07] [Rank 0] step:9801/10000 train_time:755124ms step_avg:77.05ms +[2025-09-02 06:26:08] [Rank 0] step:9821/10000 train_time:756618ms step_avg:77.04ms +[2025-09-02 06:26:08] [Rank 0] step:9821/10000 train_time:756618ms step_avg:77.04ms +[2025-09-02 06:26:10] [Rank 0] step:9841/10000 train_time:758299ms step_avg:77.06ms +[2025-09-02 06:26:10] [Rank 0] step:9841/10000 train_time:758299ms step_avg:77.06ms +[2025-09-02 06:26:12] [Rank 0] step:9861/10000 train_time:759958ms step_avg:77.07ms +[2025-09-02 06:26:12] [Rank 0] step:9861/10000 train_time:759958ms step_avg:77.07ms +[2025-09-02 06:26:13] [Rank 0] step:9881/10000 train_time:761612ms step_avg:77.08ms +[2025-09-02 06:26:13] [Rank 0] step:9881/10000 train_time:761612ms step_avg:77.08ms +[2025-09-02 06:26:15] [Rank 0] step:9901/10000 train_time:763282ms step_avg:77.09ms +[2025-09-02 06:26:15] [Rank 0] step:9901/10000 train_time:763282ms step_avg:77.09ms +[2025-09-02 06:26:17] [Rank 0] step:9921/10000 train_time:764945ms step_avg:77.10ms +[2025-09-02 06:26:17] [Rank 0] step:9921/10000 train_time:764945ms step_avg:77.10ms +[2025-09-02 06:26:18] [Rank 0] step:9941/10000 train_time:766616ms step_avg:77.12ms +[2025-09-02 06:26:18] [Rank 0] step:9941/10000 train_time:766616ms step_avg:77.12ms +[2025-09-02 06:26:20] [Rank 0] step:9961/10000 train_time:768283ms step_avg:77.13ms +[2025-09-02 06:26:20] [Rank 0] step:9961/10000 train_time:768283ms step_avg:77.13ms +[2025-09-02 06:26:22] [Rank 0] step:9981/10000 train_time:769946ms step_avg:77.14ms +[2025-09-02 06:26:22] [Rank 0] step:9981/10000 train_time:769946ms step_avg:77.14ms +[2025-09-02 06:26:23] [Rank 0] step:10000/10000 train_time:771537ms step_avg:77.15ms +[2025-09-02 06:26:23] [Rank 0] step:10000/10000 train_time:771537ms step_avg:77.15ms +[2025-09-02 06:26:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:26:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:26:35] [Rank 0] PRINT: step:10000/10000 val_loss:3.7799 svd_entropy: attn_qk:H=0.7613,top10E=0.26,eRank=180.4,q75/q25=84.31 attn_vo:H=0.7835,top10E=0.16,eRank=257.7,q75/q25=inf mlp_w1:H=0.7989,top10E=0.25,eRank=219.8,q75/q25=16.18 mlp_w2:H=0.8681,top10E=0.13,eRank=326.4,q75/q25=17.36 vo_prod:H=0.6616,top10E=0.23,eRank=120.4,q75/q25=inf train_time:771792ms step_avg:77.18ms +[2025-09-02 06:26:35] [Rank 0] PRINT: step:10000/10000 val_loss:3.7799 svd_entropy: attn_qk:H=0.7613,top10E=0.26,eRank=180.4,q75/q25=84.31 attn_vo:H=0.7835,top10E=0.16,eRank=257.7,q75/q25=inf mlp_w1:H=0.7989,top10E=0.25,eRank=219.8,q75/q25=16.18 mlp_w2:H=0.8681,top10E=0.13,eRank=326.4,q75/q25=17.36 vo_prod:H=0.6616,top10E=0.23,eRank=120.4,q75/q25=inf train_time:771792ms step_avg:77.18ms +[2025-09-02 06:26:35] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 06:26:35 2025 --- +[2025-09-02 06:26:35] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 06:26:35 2025 --- +[2025-09-02 06:26:35] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14416 MiB +[2025-09-02 06:26:35] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14416 MiB diff --git a/logs_svd_qkvo/mode_14_param_qkvo_seed_43/config.json b/logs_svd_qkvo/mode_14_param_qkvo_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..930a8a8a8acd5d4838a0a1d2c5830086f76041a4 --- /dev/null +++ b/logs_svd_qkvo/mode_14_param_qkvo_seed_43/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 14, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "73696ef0-55a5-42cd-a6cb-89d21d9d7f5d", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_14_param_qkvo_seed_43/training_log_73696ef0-55a5-42cd-a6cb-89d21d9d7f5d.txt b/logs_svd_qkvo/mode_14_param_qkvo_seed_43/training_log_73696ef0-55a5-42cd-a6cb-89d21d9d7f5d.txt new file mode 100644 index 0000000000000000000000000000000000000000..dc9fb2df763ee2e2241830575129307b5ebe5dc6 --- /dev/null +++ b/logs_svd_qkvo/mode_14_param_qkvo_seed_43/training_log_73696ef0-55a5-42cd-a6cb-89d21d9d7f5d.txt @@ -0,0 +1,2984 @@ +[2025-09-02 07:16:21] [Rank 0] PRINT: --- Script Start: Tue Sep 2 07:16:21 2025 --- +[2025-09-02 07:16:21] [Rank 0] PRINT: --- Script Start: Tue Sep 2 07:16:21 2025 --- +[2025-09-02 07:16:21] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=14, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 07:16:21] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=14, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 07:16:21] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 07:16:21] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 07:16:21] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-02 07:16:21] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-02 07:16:21] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_14_param_qkvo_seed_43 +[2025-09-02 07:16:21] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_14_param_qkvo_seed_43 +[2025-09-02 07:16:21] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 07:16:21] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 07:16:22] [Rank 0] PRINT: Constructing model... +[2025-09-02 07:16:22] [Rank 0] PRINT: Constructing model... +[2025-09-02 07:16:23] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 07:16:23] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 07:16:23] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 07:16:23] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 07:16:23] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 07:16:23] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 07:16:23] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 14 +[2025-09-02 07:16:23] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 14 +[2025-09-02 07:16:23] [Rank 0] PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 07:16:23] [Rank 0] PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 07:16:23] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 07:16:23] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 07:16:23] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 07:16:23] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 07:16:23] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 07:16:23] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 07:16:23] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 07:16:23] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 07:16:23] [Rank 0] PRINT: Starting warmup... +[2025-09-02 07:16:23] [Rank 0] PRINT: Starting warmup... +[2025-09-02 07:17:07] [Rank 0] PRINT: Warmup complete. +[2025-09-02 07:17:07] [Rank 0] PRINT: Warmup complete. +[2025-09-02 07:17:07] [Rank 0] PRINT: Starting training... +[2025-09-02 07:17:07] [Rank 0] PRINT: Starting training... +[2025-09-02 07:17:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:17:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:17:23] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.25 attn_vo:H=0.4624,top10E=0.02,eRank=233.0,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.7,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 07:17:23] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.25 attn_vo:H=0.4624,top10E=0.02,eRank=233.0,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.7,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 07:17:24] [Rank 0] step:21/10000 train_time:1309ms step_avg:62.31ms +[2025-09-02 07:17:24] [Rank 0] step:21/10000 train_time:1309ms step_avg:62.31ms +[2025-09-02 07:17:26] [Rank 0] step:41/10000 train_time:2718ms step_avg:66.29ms +[2025-09-02 07:17:26] [Rank 0] step:41/10000 train_time:2718ms step_avg:66.29ms +[2025-09-02 07:17:27] [Rank 0] step:61/10000 train_time:4131ms step_avg:67.72ms +[2025-09-02 07:17:27] [Rank 0] step:61/10000 train_time:4131ms step_avg:67.72ms +[2025-09-02 07:17:29] [Rank 0] step:81/10000 train_time:5545ms step_avg:68.45ms +[2025-09-02 07:17:29] [Rank 0] step:81/10000 train_time:5545ms step_avg:68.45ms +[2025-09-02 07:17:30] [Rank 0] step:101/10000 train_time:6960ms step_avg:68.91ms +[2025-09-02 07:17:30] [Rank 0] step:101/10000 train_time:6960ms step_avg:68.91ms +[2025-09-02 07:17:31] [Rank 0] step:121/10000 train_time:8376ms step_avg:69.22ms +[2025-09-02 07:17:31] [Rank 0] step:121/10000 train_time:8376ms step_avg:69.22ms +[2025-09-02 07:17:33] [Rank 0] step:141/10000 train_time:9793ms step_avg:69.45ms +[2025-09-02 07:17:33] [Rank 0] step:141/10000 train_time:9793ms step_avg:69.45ms +[2025-09-02 07:17:34] [Rank 0] step:161/10000 train_time:11211ms step_avg:69.63ms +[2025-09-02 07:17:34] [Rank 0] step:161/10000 train_time:11211ms step_avg:69.63ms +[2025-09-02 07:17:36] [Rank 0] step:181/10000 train_time:12629ms step_avg:69.77ms +[2025-09-02 07:17:36] [Rank 0] step:181/10000 train_time:12629ms step_avg:69.77ms +[2025-09-02 07:17:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:17:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:17:49] [Rank 0] PRINT: step:200/10000 val_loss:6.5070 svd_entropy: attn_qk:H=0.4934,top10E=0.73,eRank=73.4,q75/q25=12.05 attn_vo:H=0.4585,top10E=0.65,eRank=65.2,q75/q25=inf mlp_w1:H=0.4415,top10E=0.74,eRank=23.4,q75/q25=2.66 mlp_w2:H=0.1568,top10E=0.96,eRank=4.0,q75/q25=199.26 vo_prod:H=0.2305,top10E=0.86,eRank=8.6,q75/q25=inf train_time:14189ms step_avg:70.95ms +[2025-09-02 07:17:49] [Rank 0] PRINT: step:200/10000 val_loss:6.5070 svd_entropy: attn_qk:H=0.4934,top10E=0.73,eRank=73.4,q75/q25=12.05 attn_vo:H=0.4585,top10E=0.65,eRank=65.2,q75/q25=inf mlp_w1:H=0.4415,top10E=0.74,eRank=23.4,q75/q25=2.66 mlp_w2:H=0.1568,top10E=0.96,eRank=4.0,q75/q25=199.26 vo_prod:H=0.2305,top10E=0.86,eRank=8.6,q75/q25=inf train_time:14189ms step_avg:70.95ms +[2025-09-02 07:17:49] [Rank 0] step:201/10000 train_time:14200ms step_avg:70.65ms +[2025-09-02 07:17:49] [Rank 0] step:201/10000 train_time:14200ms step_avg:70.65ms +[2025-09-02 07:17:51] [Rank 0] step:221/10000 train_time:15491ms step_avg:70.10ms +[2025-09-02 07:17:51] [Rank 0] step:221/10000 train_time:15491ms step_avg:70.10ms +[2025-09-02 07:17:52] [Rank 0] step:241/10000 train_time:16908ms step_avg:70.16ms +[2025-09-02 07:17:52] [Rank 0] step:241/10000 train_time:16908ms step_avg:70.16ms +[2025-09-02 07:17:53] [Rank 0] step:261/10000 train_time:18325ms step_avg:70.21ms +[2025-09-02 07:17:53] [Rank 0] step:261/10000 train_time:18325ms step_avg:70.21ms +[2025-09-02 07:17:55] [Rank 0] step:281/10000 train_time:19742ms step_avg:70.26ms +[2025-09-02 07:17:55] [Rank 0] step:281/10000 train_time:19742ms step_avg:70.26ms +[2025-09-02 07:17:56] [Rank 0] step:301/10000 train_time:21160ms step_avg:70.30ms +[2025-09-02 07:17:56] [Rank 0] step:301/10000 train_time:21160ms step_avg:70.30ms +[2025-09-02 07:17:58] [Rank 0] step:321/10000 train_time:22579ms step_avg:70.34ms +[2025-09-02 07:17:58] [Rank 0] step:321/10000 train_time:22579ms step_avg:70.34ms +[2025-09-02 07:17:59] [Rank 0] step:341/10000 train_time:23997ms step_avg:70.37ms +[2025-09-02 07:17:59] [Rank 0] step:341/10000 train_time:23997ms step_avg:70.37ms +[2025-09-02 07:18:00] [Rank 0] step:361/10000 train_time:25416ms step_avg:70.41ms +[2025-09-02 07:18:00] [Rank 0] step:361/10000 train_time:25416ms step_avg:70.41ms +[2025-09-02 07:18:02] [Rank 0] step:381/10000 train_time:26834ms step_avg:70.43ms +[2025-09-02 07:18:02] [Rank 0] step:381/10000 train_time:26834ms step_avg:70.43ms +[2025-09-02 07:18:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:18:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:18:15] [Rank 0] PRINT: step:400/10000 val_loss:5.9834 svd_entropy: attn_qk:H=0.5427,top10E=0.64,eRank=81.5,q75/q25=13.33 attn_vo:H=0.5279,top10E=0.53,eRank=78.7,q75/q25=inf mlp_w1:H=0.4608,top10E=0.68,eRank=33.9,q75/q25=3.29 mlp_w2:H=0.5455,top10E=0.60,eRank=37.9,q75/q25=13.64 vo_prod:H=0.3537,top10E=0.77,eRank=16.0,q75/q25=inf train_time:28395ms step_avg:70.99ms +[2025-09-02 07:18:15] [Rank 0] PRINT: step:400/10000 val_loss:5.9834 svd_entropy: attn_qk:H=0.5427,top10E=0.64,eRank=81.5,q75/q25=13.33 attn_vo:H=0.5279,top10E=0.53,eRank=78.7,q75/q25=inf mlp_w1:H=0.4608,top10E=0.68,eRank=33.9,q75/q25=3.29 mlp_w2:H=0.5455,top10E=0.60,eRank=37.9,q75/q25=13.64 vo_prod:H=0.3537,top10E=0.77,eRank=16.0,q75/q25=inf train_time:28395ms step_avg:70.99ms +[2025-09-02 07:18:15] [Rank 0] step:401/10000 train_time:28406ms step_avg:70.84ms +[2025-09-02 07:18:15] [Rank 0] step:401/10000 train_time:28406ms step_avg:70.84ms +[2025-09-02 07:18:17] [Rank 0] step:421/10000 train_time:29701ms step_avg:70.55ms +[2025-09-02 07:18:17] [Rank 0] step:421/10000 train_time:29701ms step_avg:70.55ms +[2025-09-02 07:18:18] [Rank 0] step:441/10000 train_time:31118ms step_avg:70.56ms +[2025-09-02 07:18:18] [Rank 0] step:441/10000 train_time:31118ms step_avg:70.56ms +[2025-09-02 07:18:19] [Rank 0] step:461/10000 train_time:32538ms step_avg:70.58ms +[2025-09-02 07:18:19] [Rank 0] step:461/10000 train_time:32538ms step_avg:70.58ms +[2025-09-02 07:18:21] [Rank 0] step:481/10000 train_time:33958ms step_avg:70.60ms +[2025-09-02 07:18:21] [Rank 0] step:481/10000 train_time:33958ms step_avg:70.60ms +[2025-09-02 07:18:22] [Rank 0] step:501/10000 train_time:35376ms step_avg:70.61ms +[2025-09-02 07:18:22] [Rank 0] step:501/10000 train_time:35376ms step_avg:70.61ms +[2025-09-02 07:18:24] [Rank 0] step:521/10000 train_time:36796ms step_avg:70.63ms +[2025-09-02 07:18:24] [Rank 0] step:521/10000 train_time:36796ms step_avg:70.63ms +[2025-09-02 07:18:25] [Rank 0] step:541/10000 train_time:38216ms step_avg:70.64ms +[2025-09-02 07:18:25] [Rank 0] step:541/10000 train_time:38216ms step_avg:70.64ms +[2025-09-02 07:18:27] [Rank 0] step:561/10000 train_time:39636ms step_avg:70.65ms +[2025-09-02 07:18:27] [Rank 0] step:561/10000 train_time:39636ms step_avg:70.65ms +[2025-09-02 07:18:28] [Rank 0] step:581/10000 train_time:41056ms step_avg:70.66ms +[2025-09-02 07:18:28] [Rank 0] step:581/10000 train_time:41056ms step_avg:70.66ms +[2025-09-02 07:18:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:18:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:18:41] [Rank 0] PRINT: step:600/10000 val_loss:5.6916 svd_entropy: attn_qk:H=0.5733,top10E=0.57,eRank=88.0,q75/q25=14.80 attn_vo:H=0.5681,top10E=0.46,eRank=92.1,q75/q25=inf mlp_w1:H=0.5018,top10E=0.63,eRank=44.2,q75/q25=3.74 mlp_w2:H=0.6297,top10E=0.46,eRank=66.4,q75/q25=10.97 vo_prod:H=0.4130,top10E=0.68,eRank=22.2,q75/q25=inf train_time:42619ms step_avg:71.03ms +[2025-09-02 07:18:41] [Rank 0] PRINT: step:600/10000 val_loss:5.6916 svd_entropy: attn_qk:H=0.5733,top10E=0.57,eRank=88.0,q75/q25=14.80 attn_vo:H=0.5681,top10E=0.46,eRank=92.1,q75/q25=inf mlp_w1:H=0.5018,top10E=0.63,eRank=44.2,q75/q25=3.74 mlp_w2:H=0.6297,top10E=0.46,eRank=66.4,q75/q25=10.97 vo_prod:H=0.4130,top10E=0.68,eRank=22.2,q75/q25=inf train_time:42619ms step_avg:71.03ms +[2025-09-02 07:18:41] [Rank 0] step:601/10000 train_time:42630ms step_avg:70.93ms +[2025-09-02 07:18:41] [Rank 0] step:601/10000 train_time:42630ms step_avg:70.93ms +[2025-09-02 07:18:43] [Rank 0] step:621/10000 train_time:43916ms step_avg:70.72ms +[2025-09-02 07:18:43] [Rank 0] step:621/10000 train_time:43916ms step_avg:70.72ms +[2025-09-02 07:18:44] [Rank 0] step:641/10000 train_time:45335ms step_avg:70.72ms +[2025-09-02 07:18:44] [Rank 0] step:641/10000 train_time:45335ms step_avg:70.72ms +[2025-09-02 07:18:46] [Rank 0] step:661/10000 train_time:46753ms step_avg:70.73ms +[2025-09-02 07:18:46] [Rank 0] step:661/10000 train_time:46753ms step_avg:70.73ms +[2025-09-02 07:18:47] [Rank 0] step:681/10000 train_time:48172ms step_avg:70.74ms +[2025-09-02 07:18:47] [Rank 0] step:681/10000 train_time:48172ms step_avg:70.74ms +[2025-09-02 07:18:48] [Rank 0] step:701/10000 train_time:49591ms step_avg:70.74ms +[2025-09-02 07:18:48] [Rank 0] step:701/10000 train_time:49591ms step_avg:70.74ms +[2025-09-02 07:18:50] [Rank 0] step:721/10000 train_time:51012ms step_avg:70.75ms +[2025-09-02 07:18:50] [Rank 0] step:721/10000 train_time:51012ms step_avg:70.75ms +[2025-09-02 07:18:51] [Rank 0] step:741/10000 train_time:52432ms step_avg:70.76ms +[2025-09-02 07:18:51] [Rank 0] step:741/10000 train_time:52432ms step_avg:70.76ms +[2025-09-02 07:18:53] [Rank 0] step:761/10000 train_time:53864ms step_avg:70.78ms +[2025-09-02 07:18:53] [Rank 0] step:761/10000 train_time:53864ms step_avg:70.78ms +[2025-09-02 07:18:54] [Rank 0] step:781/10000 train_time:55296ms step_avg:70.80ms +[2025-09-02 07:18:54] [Rank 0] step:781/10000 train_time:55296ms step_avg:70.80ms +[2025-09-02 07:18:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:18:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:19:07] [Rank 0] PRINT: step:800/10000 val_loss:5.4642 svd_entropy: attn_qk:H=0.5971,top10E=0.52,eRank=93.7,q75/q25=16.65 attn_vo:H=0.5979,top10E=0.41,eRank=104.8,q75/q25=inf mlp_w1:H=0.5357,top10E=0.58,eRank=52.9,q75/q25=4.14 mlp_w2:H=0.6779,top10E=0.37,eRank=91.3,q75/q25=10.32 vo_prod:H=0.4515,top10E=0.60,eRank=28.1,q75/q25=inf train_time:56873ms step_avg:71.09ms +[2025-09-02 07:19:07] [Rank 0] PRINT: step:800/10000 val_loss:5.4642 svd_entropy: attn_qk:H=0.5971,top10E=0.52,eRank=93.7,q75/q25=16.65 attn_vo:H=0.5979,top10E=0.41,eRank=104.8,q75/q25=inf mlp_w1:H=0.5357,top10E=0.58,eRank=52.9,q75/q25=4.14 mlp_w2:H=0.6779,top10E=0.37,eRank=91.3,q75/q25=10.32 vo_prod:H=0.4515,top10E=0.60,eRank=28.1,q75/q25=inf train_time:56873ms step_avg:71.09ms +[2025-09-02 07:19:07] [Rank 0] step:801/10000 train_time:56884ms step_avg:71.02ms +[2025-09-02 07:19:07] [Rank 0] step:801/10000 train_time:56884ms step_avg:71.02ms +[2025-09-02 07:19:09] [Rank 0] step:821/10000 train_time:58197ms step_avg:70.89ms +[2025-09-02 07:19:09] [Rank 0] step:821/10000 train_time:58197ms step_avg:70.89ms +[2025-09-02 07:19:10] [Rank 0] step:841/10000 train_time:59628ms step_avg:70.90ms +[2025-09-02 07:19:10] [Rank 0] step:841/10000 train_time:59628ms step_avg:70.90ms +[2025-09-02 07:19:12] [Rank 0] step:861/10000 train_time:61058ms step_avg:70.92ms +[2025-09-02 07:19:12] [Rank 0] step:861/10000 train_time:61058ms step_avg:70.92ms +[2025-09-02 07:19:13] [Rank 0] step:881/10000 train_time:62490ms step_avg:70.93ms +[2025-09-02 07:19:13] [Rank 0] step:881/10000 train_time:62490ms step_avg:70.93ms +[2025-09-02 07:19:15] [Rank 0] step:901/10000 train_time:63924ms step_avg:70.95ms +[2025-09-02 07:19:15] [Rank 0] step:901/10000 train_time:63924ms step_avg:70.95ms +[2025-09-02 07:19:16] [Rank 0] step:921/10000 train_time:65356ms step_avg:70.96ms +[2025-09-02 07:19:16] [Rank 0] step:921/10000 train_time:65356ms step_avg:70.96ms +[2025-09-02 07:19:17] [Rank 0] step:941/10000 train_time:66789ms step_avg:70.98ms +[2025-09-02 07:19:17] [Rank 0] step:941/10000 train_time:66789ms step_avg:70.98ms +[2025-09-02 07:19:19] [Rank 0] step:961/10000 train_time:68221ms step_avg:70.99ms +[2025-09-02 07:19:19] [Rank 0] step:961/10000 train_time:68221ms step_avg:70.99ms +[2025-09-02 07:19:20] [Rank 0] step:981/10000 train_time:69656ms step_avg:71.00ms +[2025-09-02 07:19:20] [Rank 0] step:981/10000 train_time:69656ms step_avg:71.00ms +[2025-09-02 07:19:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:19:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:19:33] [Rank 0] PRINT: step:1000/10000 val_loss:5.3050 svd_entropy: attn_qk:H=0.6161,top10E=0.48,eRank=99.0,q75/q25=18.88 attn_vo:H=0.6212,top10E=0.38,eRank=117.2,q75/q25=inf mlp_w1:H=0.5631,top10E=0.55,eRank=60.5,q75/q25=4.60 mlp_w2:H=0.7074,top10E=0.32,eRank=110.9,q75/q25=11.23 vo_prod:H=0.4780,top10E=0.55,eRank=33.2,q75/q25=inf train_time:71233ms step_avg:71.23ms +[2025-09-02 07:19:33] [Rank 0] PRINT: step:1000/10000 val_loss:5.3050 svd_entropy: attn_qk:H=0.6161,top10E=0.48,eRank=99.0,q75/q25=18.88 attn_vo:H=0.6212,top10E=0.38,eRank=117.2,q75/q25=inf mlp_w1:H=0.5631,top10E=0.55,eRank=60.5,q75/q25=4.60 mlp_w2:H=0.7074,top10E=0.32,eRank=110.9,q75/q25=11.23 vo_prod:H=0.4780,top10E=0.55,eRank=33.2,q75/q25=inf train_time:71233ms step_avg:71.23ms +[2025-09-02 07:19:34] [Rank 0] step:1001/10000 train_time:71243ms step_avg:71.17ms +[2025-09-02 07:19:34] [Rank 0] step:1001/10000 train_time:71243ms step_avg:71.17ms +[2025-09-02 07:19:35] [Rank 0] step:1021/10000 train_time:72567ms step_avg:71.07ms +[2025-09-02 07:19:35] [Rank 0] step:1021/10000 train_time:72567ms step_avg:71.07ms +[2025-09-02 07:19:36] [Rank 0] step:1041/10000 train_time:73999ms step_avg:71.08ms +[2025-09-02 07:19:36] [Rank 0] step:1041/10000 train_time:73999ms step_avg:71.08ms +[2025-09-02 07:19:38] [Rank 0] step:1061/10000 train_time:75434ms step_avg:71.10ms +[2025-09-02 07:19:38] [Rank 0] step:1061/10000 train_time:75434ms step_avg:71.10ms +[2025-09-02 07:19:39] [Rank 0] step:1081/10000 train_time:76867ms step_avg:71.11ms +[2025-09-02 07:19:39] [Rank 0] step:1081/10000 train_time:76867ms step_avg:71.11ms +[2025-09-02 07:19:41] [Rank 0] step:1101/10000 train_time:78301ms step_avg:71.12ms +[2025-09-02 07:19:41] [Rank 0] step:1101/10000 train_time:78301ms step_avg:71.12ms +[2025-09-02 07:19:42] [Rank 0] step:1121/10000 train_time:79735ms step_avg:71.13ms +[2025-09-02 07:19:42] [Rank 0] step:1121/10000 train_time:79735ms step_avg:71.13ms +[2025-09-02 07:19:44] [Rank 0] step:1141/10000 train_time:81170ms step_avg:71.14ms +[2025-09-02 07:19:44] [Rank 0] step:1141/10000 train_time:81170ms step_avg:71.14ms +[2025-09-02 07:19:45] [Rank 0] step:1161/10000 train_time:82605ms step_avg:71.15ms +[2025-09-02 07:19:45] [Rank 0] step:1161/10000 train_time:82605ms step_avg:71.15ms +[2025-09-02 07:19:46] [Rank 0] step:1181/10000 train_time:84041ms step_avg:71.16ms +[2025-09-02 07:19:46] [Rank 0] step:1181/10000 train_time:84041ms step_avg:71.16ms +[2025-09-02 07:19:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:19:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:20:00] [Rank 0] PRINT: step:1200/10000 val_loss:5.1522 svd_entropy: attn_qk:H=0.6321,top10E=0.45,eRank=104.4,q75/q25=21.66 attn_vo:H=0.6414,top10E=0.35,eRank=130.1,q75/q25=inf mlp_w1:H=0.5854,top10E=0.52,eRank=67.5,q75/q25=5.09 mlp_w2:H=0.7293,top10E=0.29,eRank=128.4,q75/q25=13.07 vo_prod:H=0.4997,top10E=0.51,eRank=38.3,q75/q25=inf train_time:85620ms step_avg:71.35ms +[2025-09-02 07:20:00] [Rank 0] PRINT: step:1200/10000 val_loss:5.1522 svd_entropy: attn_qk:H=0.6321,top10E=0.45,eRank=104.4,q75/q25=21.66 attn_vo:H=0.6414,top10E=0.35,eRank=130.1,q75/q25=inf mlp_w1:H=0.5854,top10E=0.52,eRank=67.5,q75/q25=5.09 mlp_w2:H=0.7293,top10E=0.29,eRank=128.4,q75/q25=13.07 vo_prod:H=0.4997,top10E=0.51,eRank=38.3,q75/q25=inf train_time:85620ms step_avg:71.35ms +[2025-09-02 07:20:00] [Rank 0] step:1201/10000 train_time:85631ms step_avg:71.30ms +[2025-09-02 07:20:00] [Rank 0] step:1201/10000 train_time:85631ms step_avg:71.30ms +[2025-09-02 07:20:01] [Rank 0] step:1221/10000 train_time:86922ms step_avg:71.19ms +[2025-09-02 07:20:01] [Rank 0] step:1221/10000 train_time:86922ms step_avg:71.19ms +[2025-09-02 07:20:03] [Rank 0] step:1241/10000 train_time:88356ms step_avg:71.20ms +[2025-09-02 07:20:03] [Rank 0] step:1241/10000 train_time:88356ms step_avg:71.20ms +[2025-09-02 07:20:04] [Rank 0] step:1261/10000 train_time:89790ms step_avg:71.21ms +[2025-09-02 07:20:04] [Rank 0] step:1261/10000 train_time:89790ms step_avg:71.21ms +[2025-09-02 07:20:06] [Rank 0] step:1281/10000 train_time:91224ms step_avg:71.21ms +[2025-09-02 07:20:06] [Rank 0] step:1281/10000 train_time:91224ms step_avg:71.21ms +[2025-09-02 07:20:07] [Rank 0] step:1301/10000 train_time:92658ms step_avg:71.22ms +[2025-09-02 07:20:07] [Rank 0] step:1301/10000 train_time:92658ms step_avg:71.22ms +[2025-09-02 07:20:08] [Rank 0] step:1321/10000 train_time:94096ms step_avg:71.23ms +[2025-09-02 07:20:08] [Rank 0] step:1321/10000 train_time:94096ms step_avg:71.23ms +[2025-09-02 07:20:10] [Rank 0] step:1341/10000 train_time:95529ms step_avg:71.24ms +[2025-09-02 07:20:10] [Rank 0] step:1341/10000 train_time:95529ms step_avg:71.24ms +[2025-09-02 07:20:11] [Rank 0] step:1361/10000 train_time:96963ms step_avg:71.24ms +[2025-09-02 07:20:11] [Rank 0] step:1361/10000 train_time:96963ms step_avg:71.24ms +[2025-09-02 07:20:13] [Rank 0] step:1381/10000 train_time:98398ms step_avg:71.25ms +[2025-09-02 07:20:13] [Rank 0] step:1381/10000 train_time:98398ms step_avg:71.25ms +[2025-09-02 07:20:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:20:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:20:26] [Rank 0] PRINT: step:1400/10000 val_loss:5.0117 svd_entropy: attn_qk:H=0.6452,top10E=0.43,eRank=109.3,q75/q25=25.62 attn_vo:H=0.6587,top10E=0.33,eRank=142.1,q75/q25=inf mlp_w1:H=0.6057,top10E=0.50,eRank=74.6,q75/q25=5.70 mlp_w2:H=0.7471,top10E=0.26,eRank=144.7,q75/q25=15.24 vo_prod:H=0.5174,top10E=0.47,eRank=43.0,q75/q25=inf train_time:99975ms step_avg:71.41ms +[2025-09-02 07:20:26] [Rank 0] PRINT: step:1400/10000 val_loss:5.0117 svd_entropy: attn_qk:H=0.6452,top10E=0.43,eRank=109.3,q75/q25=25.62 attn_vo:H=0.6587,top10E=0.33,eRank=142.1,q75/q25=inf mlp_w1:H=0.6057,top10E=0.50,eRank=74.6,q75/q25=5.70 mlp_w2:H=0.7471,top10E=0.26,eRank=144.7,q75/q25=15.24 vo_prod:H=0.5174,top10E=0.47,eRank=43.0,q75/q25=inf train_time:99975ms step_avg:71.41ms +[2025-09-02 07:20:26] [Rank 0] step:1401/10000 train_time:99986ms step_avg:71.37ms +[2025-09-02 07:20:26] [Rank 0] step:1401/10000 train_time:99986ms step_avg:71.37ms +[2025-09-02 07:20:27] [Rank 0] step:1421/10000 train_time:101286ms step_avg:71.28ms +[2025-09-02 07:20:27] [Rank 0] step:1421/10000 train_time:101286ms step_avg:71.28ms +[2025-09-02 07:20:29] [Rank 0] step:1441/10000 train_time:102718ms step_avg:71.28ms +[2025-09-02 07:20:29] [Rank 0] step:1441/10000 train_time:102718ms step_avg:71.28ms +[2025-09-02 07:20:30] [Rank 0] step:1461/10000 train_time:104151ms step_avg:71.29ms +[2025-09-02 07:20:30] [Rank 0] step:1461/10000 train_time:104151ms step_avg:71.29ms +[2025-09-02 07:20:32] [Rank 0] step:1481/10000 train_time:105585ms step_avg:71.29ms +[2025-09-02 07:20:32] [Rank 0] step:1481/10000 train_time:105585ms step_avg:71.29ms +[2025-09-02 07:20:33] [Rank 0] step:1501/10000 train_time:107025ms step_avg:71.30ms +[2025-09-02 07:20:33] [Rank 0] step:1501/10000 train_time:107025ms step_avg:71.30ms +[2025-09-02 07:20:35] [Rank 0] step:1521/10000 train_time:108472ms step_avg:71.32ms +[2025-09-02 07:20:35] [Rank 0] step:1521/10000 train_time:108472ms step_avg:71.32ms +[2025-09-02 07:20:36] [Rank 0] step:1541/10000 train_time:109916ms step_avg:71.33ms +[2025-09-02 07:20:36] [Rank 0] step:1541/10000 train_time:109916ms step_avg:71.33ms +[2025-09-02 07:20:37] [Rank 0] step:1561/10000 train_time:111361ms step_avg:71.34ms +[2025-09-02 07:20:37] [Rank 0] step:1561/10000 train_time:111361ms step_avg:71.34ms +[2025-09-02 07:20:39] [Rank 0] step:1581/10000 train_time:112804ms step_avg:71.35ms +[2025-09-02 07:20:39] [Rank 0] step:1581/10000 train_time:112804ms step_avg:71.35ms +[2025-09-02 07:20:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:20:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:20:52] [Rank 0] PRINT: step:1600/10000 val_loss:4.8648 svd_entropy: attn_qk:H=0.6561,top10E=0.41,eRank=113.6,q75/q25=30.50 attn_vo:H=0.6732,top10E=0.31,eRank=152.7,q75/q25=inf mlp_w1:H=0.6231,top10E=0.47,eRank=81.5,q75/q25=6.37 mlp_w2:H=0.7611,top10E=0.24,eRank=159.0,q75/q25=17.62 vo_prod:H=0.5324,top10E=0.44,eRank=47.5,q75/q25=inf train_time:114402ms step_avg:71.50ms +[2025-09-02 07:20:52] [Rank 0] PRINT: step:1600/10000 val_loss:4.8648 svd_entropy: attn_qk:H=0.6561,top10E=0.41,eRank=113.6,q75/q25=30.50 attn_vo:H=0.6732,top10E=0.31,eRank=152.7,q75/q25=inf mlp_w1:H=0.6231,top10E=0.47,eRank=81.5,q75/q25=6.37 mlp_w2:H=0.7611,top10E=0.24,eRank=159.0,q75/q25=17.62 vo_prod:H=0.5324,top10E=0.44,eRank=47.5,q75/q25=inf train_time:114402ms step_avg:71.50ms +[2025-09-02 07:20:52] [Rank 0] step:1601/10000 train_time:114413ms step_avg:71.46ms +[2025-09-02 07:20:52] [Rank 0] step:1601/10000 train_time:114413ms step_avg:71.46ms +[2025-09-02 07:20:54] [Rank 0] step:1621/10000 train_time:115717ms step_avg:71.39ms +[2025-09-02 07:20:54] [Rank 0] step:1621/10000 train_time:115717ms step_avg:71.39ms +[2025-09-02 07:20:55] [Rank 0] step:1641/10000 train_time:117161ms step_avg:71.40ms +[2025-09-02 07:20:55] [Rank 0] step:1641/10000 train_time:117161ms step_avg:71.40ms +[2025-09-02 07:20:57] [Rank 0] step:1661/10000 train_time:118605ms step_avg:71.41ms +[2025-09-02 07:20:57] [Rank 0] step:1661/10000 train_time:118605ms step_avg:71.41ms +[2025-09-02 07:20:58] [Rank 0] step:1681/10000 train_time:120049ms step_avg:71.42ms +[2025-09-02 07:20:58] [Rank 0] step:1681/10000 train_time:120049ms step_avg:71.42ms +[2025-09-02 07:20:59] [Rank 0] step:1701/10000 train_time:121493ms step_avg:71.42ms +[2025-09-02 07:20:59] [Rank 0] step:1701/10000 train_time:121493ms step_avg:71.42ms +[2025-09-02 07:21:01] [Rank 0] step:1721/10000 train_time:122938ms step_avg:71.43ms +[2025-09-02 07:21:01] [Rank 0] step:1721/10000 train_time:122938ms step_avg:71.43ms +[2025-09-02 07:21:02] [Rank 0] step:1741/10000 train_time:124383ms step_avg:71.44ms +[2025-09-02 07:21:02] [Rank 0] step:1741/10000 train_time:124383ms step_avg:71.44ms +[2025-09-02 07:21:04] [Rank 0] step:1761/10000 train_time:125829ms step_avg:71.45ms +[2025-09-02 07:21:04] [Rank 0] step:1761/10000 train_time:125829ms step_avg:71.45ms +[2025-09-02 07:21:05] [Rank 0] step:1781/10000 train_time:127274ms step_avg:71.46ms +[2025-09-02 07:21:05] [Rank 0] step:1781/10000 train_time:127274ms step_avg:71.46ms +[2025-09-02 07:21:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:21:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:21:18] [Rank 0] PRINT: step:1800/10000 val_loss:4.7562 svd_entropy: attn_qk:H=0.6655,top10E=0.40,eRank=117.6,q75/q25=35.41 attn_vo:H=0.6854,top10E=0.29,eRank=161.9,q75/q25=inf mlp_w1:H=0.6399,top10E=0.45,eRank=88.8,q75/q25=7.04 mlp_w2:H=0.7730,top10E=0.22,eRank=172.4,q75/q25=19.93 vo_prod:H=0.5450,top10E=0.41,eRank=51.7,q75/q25=inf train_time:128865ms step_avg:71.59ms +[2025-09-02 07:21:18] [Rank 0] PRINT: step:1800/10000 val_loss:4.7562 svd_entropy: attn_qk:H=0.6655,top10E=0.40,eRank=117.6,q75/q25=35.41 attn_vo:H=0.6854,top10E=0.29,eRank=161.9,q75/q25=inf mlp_w1:H=0.6399,top10E=0.45,eRank=88.8,q75/q25=7.04 mlp_w2:H=0.7730,top10E=0.22,eRank=172.4,q75/q25=19.93 vo_prod:H=0.5450,top10E=0.41,eRank=51.7,q75/q25=inf train_time:128865ms step_avg:71.59ms +[2025-09-02 07:21:19] [Rank 0] step:1801/10000 train_time:128876ms step_avg:71.56ms +[2025-09-02 07:21:19] [Rank 0] step:1801/10000 train_time:128876ms step_avg:71.56ms +[2025-09-02 07:21:20] [Rank 0] step:1821/10000 train_time:130201ms step_avg:71.50ms +[2025-09-02 07:21:20] [Rank 0] step:1821/10000 train_time:130201ms step_avg:71.50ms +[2025-09-02 07:21:21] [Rank 0] step:1841/10000 train_time:131644ms step_avg:71.51ms +[2025-09-02 07:21:21] [Rank 0] step:1841/10000 train_time:131644ms step_avg:71.51ms +[2025-09-02 07:21:23] [Rank 0] step:1861/10000 train_time:133088ms step_avg:71.51ms +[2025-09-02 07:21:23] [Rank 0] step:1861/10000 train_time:133088ms step_avg:71.51ms +[2025-09-02 07:21:24] [Rank 0] step:1881/10000 train_time:134534ms step_avg:71.52ms +[2025-09-02 07:21:24] [Rank 0] step:1881/10000 train_time:134534ms step_avg:71.52ms +[2025-09-02 07:21:26] [Rank 0] step:1901/10000 train_time:135979ms step_avg:71.53ms +[2025-09-02 07:21:26] [Rank 0] step:1901/10000 train_time:135979ms step_avg:71.53ms +[2025-09-02 07:21:27] [Rank 0] step:1921/10000 train_time:137425ms step_avg:71.54ms +[2025-09-02 07:21:27] [Rank 0] step:1921/10000 train_time:137425ms step_avg:71.54ms +[2025-09-02 07:21:29] [Rank 0] step:1941/10000 train_time:138873ms step_avg:71.55ms +[2025-09-02 07:21:29] [Rank 0] step:1941/10000 train_time:138873ms step_avg:71.55ms +[2025-09-02 07:21:30] [Rank 0] step:1961/10000 train_time:140321ms step_avg:71.56ms +[2025-09-02 07:21:30] [Rank 0] step:1961/10000 train_time:140321ms step_avg:71.56ms +[2025-09-02 07:21:32] [Rank 0] step:1981/10000 train_time:141768ms step_avg:71.56ms +[2025-09-02 07:21:32] [Rank 0] step:1981/10000 train_time:141768ms step_avg:71.56ms +[2025-09-02 07:21:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:21:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:21:45] [Rank 0] PRINT: step:2000/10000 val_loss:4.6812 svd_entropy: attn_qk:H=0.6738,top10E=0.38,eRank=121.4,q75/q25=40.88 attn_vo:H=0.6958,top10E=0.28,eRank=169.8,q75/q25=inf mlp_w1:H=0.6533,top10E=0.44,eRank=95.3,q75/q25=7.86 mlp_w2:H=0.7819,top10E=0.21,eRank=183.2,q75/q25=22.61 vo_prod:H=0.5560,top10E=0.39,eRank=55.8,q75/q25=inf train_time:143361ms step_avg:71.68ms +[2025-09-02 07:21:45] [Rank 0] PRINT: step:2000/10000 val_loss:4.6812 svd_entropy: attn_qk:H=0.6738,top10E=0.38,eRank=121.4,q75/q25=40.88 attn_vo:H=0.6958,top10E=0.28,eRank=169.8,q75/q25=inf mlp_w1:H=0.6533,top10E=0.44,eRank=95.3,q75/q25=7.86 mlp_w2:H=0.7819,top10E=0.21,eRank=183.2,q75/q25=22.61 vo_prod:H=0.5560,top10E=0.39,eRank=55.8,q75/q25=inf train_time:143361ms step_avg:71.68ms +[2025-09-02 07:21:45] [Rank 0] step:2001/10000 train_time:143371ms step_avg:71.65ms +[2025-09-02 07:21:45] [Rank 0] step:2001/10000 train_time:143371ms step_avg:71.65ms +[2025-09-02 07:21:46] [Rank 0] step:2021/10000 train_time:144683ms step_avg:71.59ms +[2025-09-02 07:21:46] [Rank 0] step:2021/10000 train_time:144683ms step_avg:71.59ms +[2025-09-02 07:21:48] [Rank 0] step:2041/10000 train_time:146246ms step_avg:71.65ms +[2025-09-02 07:21:48] [Rank 0] step:2041/10000 train_time:146246ms step_avg:71.65ms +[2025-09-02 07:21:49] [Rank 0] step:2061/10000 train_time:147690ms step_avg:71.66ms +[2025-09-02 07:21:49] [Rank 0] step:2061/10000 train_time:147690ms step_avg:71.66ms +[2025-09-02 07:21:51] [Rank 0] step:2081/10000 train_time:149134ms step_avg:71.66ms +[2025-09-02 07:21:51] [Rank 0] step:2081/10000 train_time:149134ms step_avg:71.66ms +[2025-09-02 07:21:52] [Rank 0] step:2101/10000 train_time:150578ms step_avg:71.67ms +[2025-09-02 07:21:52] [Rank 0] step:2101/10000 train_time:150578ms step_avg:71.67ms +[2025-09-02 07:21:54] [Rank 0] step:2121/10000 train_time:152022ms step_avg:71.67ms +[2025-09-02 07:21:54] [Rank 0] step:2121/10000 train_time:152022ms step_avg:71.67ms +[2025-09-02 07:21:55] [Rank 0] step:2141/10000 train_time:153467ms step_avg:71.68ms +[2025-09-02 07:21:55] [Rank 0] step:2141/10000 train_time:153467ms step_avg:71.68ms +[2025-09-02 07:21:57] [Rank 0] step:2161/10000 train_time:154912ms step_avg:71.69ms +[2025-09-02 07:21:57] [Rank 0] step:2161/10000 train_time:154912ms step_avg:71.69ms +[2025-09-02 07:21:58] [Rank 0] step:2181/10000 train_time:156358ms step_avg:71.69ms +[2025-09-02 07:21:58] [Rank 0] step:2181/10000 train_time:156358ms step_avg:71.69ms +[2025-09-02 07:21:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:21:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:22:11] [Rank 0] PRINT: step:2200/10000 val_loss:4.5983 svd_entropy: attn_qk:H=0.6810,top10E=0.37,eRank=124.9,q75/q25=45.71 attn_vo:H=0.7043,top10E=0.26,eRank=176.7,q75/q25=inf mlp_w1:H=0.6652,top10E=0.42,eRank=101.6,q75/q25=8.54 mlp_w2:H=0.7892,top10E=0.20,eRank=192.6,q75/q25=24.64 vo_prod:H=0.5652,top10E=0.38,eRank=59.5,q75/q25=inf train_time:157947ms step_avg:71.79ms +[2025-09-02 07:22:11] [Rank 0] PRINT: step:2200/10000 val_loss:4.5983 svd_entropy: attn_qk:H=0.6810,top10E=0.37,eRank=124.9,q75/q25=45.71 attn_vo:H=0.7043,top10E=0.26,eRank=176.7,q75/q25=inf mlp_w1:H=0.6652,top10E=0.42,eRank=101.6,q75/q25=8.54 mlp_w2:H=0.7892,top10E=0.20,eRank=192.6,q75/q25=24.64 vo_prod:H=0.5652,top10E=0.38,eRank=59.5,q75/q25=inf train_time:157947ms step_avg:71.79ms +[2025-09-02 07:22:11] [Rank 0] step:2201/10000 train_time:157957ms step_avg:71.77ms +[2025-09-02 07:22:11] [Rank 0] step:2201/10000 train_time:157957ms step_avg:71.77ms +[2025-09-02 07:22:13] [Rank 0] step:2221/10000 train_time:159270ms step_avg:71.71ms +[2025-09-02 07:22:13] [Rank 0] step:2221/10000 train_time:159270ms step_avg:71.71ms +[2025-09-02 07:22:14] [Rank 0] step:2241/10000 train_time:160744ms step_avg:71.73ms +[2025-09-02 07:22:14] [Rank 0] step:2241/10000 train_time:160744ms step_avg:71.73ms +[2025-09-02 07:22:16] [Rank 0] step:2261/10000 train_time:162231ms step_avg:71.75ms +[2025-09-02 07:22:16] [Rank 0] step:2261/10000 train_time:162231ms step_avg:71.75ms +[2025-09-02 07:22:17] [Rank 0] step:2281/10000 train_time:163718ms step_avg:71.77ms +[2025-09-02 07:22:17] [Rank 0] step:2281/10000 train_time:163718ms step_avg:71.77ms +[2025-09-02 07:22:19] [Rank 0] step:2301/10000 train_time:165206ms step_avg:71.80ms +[2025-09-02 07:22:19] [Rank 0] step:2301/10000 train_time:165206ms step_avg:71.80ms +[2025-09-02 07:22:20] [Rank 0] step:2321/10000 train_time:166692ms step_avg:71.82ms +[2025-09-02 07:22:20] [Rank 0] step:2321/10000 train_time:166692ms step_avg:71.82ms +[2025-09-02 07:22:22] [Rank 0] step:2341/10000 train_time:168179ms step_avg:71.84ms +[2025-09-02 07:22:22] [Rank 0] step:2341/10000 train_time:168179ms step_avg:71.84ms +[2025-09-02 07:22:23] [Rank 0] step:2361/10000 train_time:169669ms step_avg:71.86ms +[2025-09-02 07:22:23] [Rank 0] step:2361/10000 train_time:169669ms step_avg:71.86ms +[2025-09-02 07:22:25] [Rank 0] step:2381/10000 train_time:171158ms step_avg:71.88ms +[2025-09-02 07:22:25] [Rank 0] step:2381/10000 train_time:171158ms step_avg:71.88ms +[2025-09-02 07:22:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:22:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:22:38] [Rank 0] PRINT: step:2400/10000 val_loss:4.5200 svd_entropy: attn_qk:H=0.6871,top10E=0.36,eRank=128.0,q75/q25=50.72 attn_vo:H=0.7119,top10E=0.25,eRank=182.8,q75/q25=inf mlp_w1:H=0.6762,top10E=0.41,eRank=107.9,q75/q25=9.29 mlp_w2:H=0.7962,top10E=0.19,eRank=201.9,q75/q25=26.48 vo_prod:H=0.5735,top10E=0.36,eRank=63.1,q75/q25=inf train_time:172796ms step_avg:72.00ms +[2025-09-02 07:22:38] [Rank 0] PRINT: step:2400/10000 val_loss:4.5200 svd_entropy: attn_qk:H=0.6871,top10E=0.36,eRank=128.0,q75/q25=50.72 attn_vo:H=0.7119,top10E=0.25,eRank=182.8,q75/q25=inf mlp_w1:H=0.6762,top10E=0.41,eRank=107.9,q75/q25=9.29 mlp_w2:H=0.7962,top10E=0.19,eRank=201.9,q75/q25=26.48 vo_prod:H=0.5735,top10E=0.36,eRank=63.1,q75/q25=inf train_time:172796ms step_avg:72.00ms +[2025-09-02 07:22:38] [Rank 0] step:2401/10000 train_time:172807ms step_avg:71.97ms +[2025-09-02 07:22:38] [Rank 0] step:2401/10000 train_time:172807ms step_avg:71.97ms +[2025-09-02 07:22:39] [Rank 0] step:2421/10000 train_time:174154ms step_avg:71.93ms +[2025-09-02 07:22:39] [Rank 0] step:2421/10000 train_time:174154ms step_avg:71.93ms +[2025-09-02 07:22:41] [Rank 0] step:2441/10000 train_time:175641ms step_avg:71.95ms +[2025-09-02 07:22:41] [Rank 0] step:2441/10000 train_time:175641ms step_avg:71.95ms +[2025-09-02 07:22:42] [Rank 0] step:2461/10000 train_time:177128ms step_avg:71.97ms +[2025-09-02 07:22:42] [Rank 0] step:2461/10000 train_time:177128ms step_avg:71.97ms +[2025-09-02 07:22:44] [Rank 0] step:2481/10000 train_time:178616ms step_avg:71.99ms +[2025-09-02 07:22:44] [Rank 0] step:2481/10000 train_time:178616ms step_avg:71.99ms +[2025-09-02 07:22:45] [Rank 0] step:2501/10000 train_time:180103ms step_avg:72.01ms +[2025-09-02 07:22:45] [Rank 0] step:2501/10000 train_time:180103ms step_avg:72.01ms +[2025-09-02 07:22:47] [Rank 0] step:2521/10000 train_time:181590ms step_avg:72.03ms +[2025-09-02 07:22:47] [Rank 0] step:2521/10000 train_time:181590ms step_avg:72.03ms +[2025-09-02 07:22:48] [Rank 0] step:2541/10000 train_time:183079ms step_avg:72.05ms +[2025-09-02 07:22:48] [Rank 0] step:2541/10000 train_time:183079ms step_avg:72.05ms +[2025-09-02 07:22:50] [Rank 0] step:2561/10000 train_time:184570ms step_avg:72.07ms +[2025-09-02 07:22:50] [Rank 0] step:2561/10000 train_time:184570ms step_avg:72.07ms +[2025-09-02 07:22:51] [Rank 0] step:2581/10000 train_time:186060ms step_avg:72.09ms +[2025-09-02 07:22:51] [Rank 0] step:2581/10000 train_time:186060ms step_avg:72.09ms +[2025-09-02 07:22:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:22:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:23:05] [Rank 0] PRINT: step:2600/10000 val_loss:4.4579 svd_entropy: attn_qk:H=0.6932,top10E=0.35,eRank=131.2,q75/q25=54.84 attn_vo:H=0.7187,top10E=0.24,eRank=188.4,q75/q25=inf mlp_w1:H=0.6860,top10E=0.39,eRank=114.0,q75/q25=9.89 mlp_w2:H=0.8026,top10E=0.19,eRank=210.8,q75/q25=27.19 vo_prod:H=0.5817,top10E=0.35,eRank=66.7,q75/q25=inf train_time:187700ms step_avg:72.19ms +[2025-09-02 07:23:05] [Rank 0] PRINT: step:2600/10000 val_loss:4.4579 svd_entropy: attn_qk:H=0.6932,top10E=0.35,eRank=131.2,q75/q25=54.84 attn_vo:H=0.7187,top10E=0.24,eRank=188.4,q75/q25=inf mlp_w1:H=0.6860,top10E=0.39,eRank=114.0,q75/q25=9.89 mlp_w2:H=0.8026,top10E=0.19,eRank=210.8,q75/q25=27.19 vo_prod:H=0.5817,top10E=0.35,eRank=66.7,q75/q25=inf train_time:187700ms step_avg:72.19ms +[2025-09-02 07:23:05] [Rank 0] step:2601/10000 train_time:187711ms step_avg:72.17ms +[2025-09-02 07:23:05] [Rank 0] step:2601/10000 train_time:187711ms step_avg:72.17ms +[2025-09-02 07:23:06] [Rank 0] step:2621/10000 train_time:189078ms step_avg:72.14ms +[2025-09-02 07:23:06] [Rank 0] step:2621/10000 train_time:189078ms step_avg:72.14ms +[2025-09-02 07:23:08] [Rank 0] step:2641/10000 train_time:190567ms step_avg:72.16ms +[2025-09-02 07:23:08] [Rank 0] step:2641/10000 train_time:190567ms step_avg:72.16ms +[2025-09-02 07:23:09] [Rank 0] step:2661/10000 train_time:192057ms step_avg:72.17ms +[2025-09-02 07:23:09] [Rank 0] step:2661/10000 train_time:192057ms step_avg:72.17ms +[2025-09-02 07:23:11] [Rank 0] step:2681/10000 train_time:193546ms step_avg:72.19ms +[2025-09-02 07:23:11] [Rank 0] step:2681/10000 train_time:193546ms step_avg:72.19ms +[2025-09-02 07:23:12] [Rank 0] step:2701/10000 train_time:195036ms step_avg:72.21ms +[2025-09-02 07:23:12] [Rank 0] step:2701/10000 train_time:195036ms step_avg:72.21ms +[2025-09-02 07:23:14] [Rank 0] step:2721/10000 train_time:196527ms step_avg:72.23ms +[2025-09-02 07:23:14] [Rank 0] step:2721/10000 train_time:196527ms step_avg:72.23ms +[2025-09-02 07:23:15] [Rank 0] step:2741/10000 train_time:198018ms step_avg:72.24ms +[2025-09-02 07:23:15] [Rank 0] step:2741/10000 train_time:198018ms step_avg:72.24ms +[2025-09-02 07:23:17] [Rank 0] step:2761/10000 train_time:199509ms step_avg:72.26ms +[2025-09-02 07:23:17] [Rank 0] step:2761/10000 train_time:199509ms step_avg:72.26ms +[2025-09-02 07:23:18] [Rank 0] step:2781/10000 train_time:201001ms step_avg:72.28ms +[2025-09-02 07:23:18] [Rank 0] step:2781/10000 train_time:201001ms step_avg:72.28ms +[2025-09-02 07:23:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:23:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:23:31] [Rank 0] PRINT: step:2800/10000 val_loss:4.4153 svd_entropy: attn_qk:H=0.6987,top10E=0.35,eRank=134.2,q75/q25=59.30 attn_vo:H=0.7249,top10E=0.23,eRank=193.6,q75/q25=inf mlp_w1:H=0.6953,top10E=0.38,eRank=120.2,q75/q25=10.43 mlp_w2:H=0.8086,top10E=0.18,eRank=219.5,q75/q25=27.46 vo_prod:H=0.5889,top10E=0.33,eRank=70.1,q75/q25=inf train_time:202642ms step_avg:72.37ms +[2025-09-02 07:23:31] [Rank 0] PRINT: step:2800/10000 val_loss:4.4153 svd_entropy: attn_qk:H=0.6987,top10E=0.35,eRank=134.2,q75/q25=59.30 attn_vo:H=0.7249,top10E=0.23,eRank=193.6,q75/q25=inf mlp_w1:H=0.6953,top10E=0.38,eRank=120.2,q75/q25=10.43 mlp_w2:H=0.8086,top10E=0.18,eRank=219.5,q75/q25=27.46 vo_prod:H=0.5889,top10E=0.33,eRank=70.1,q75/q25=inf train_time:202642ms step_avg:72.37ms +[2025-09-02 07:23:32] [Rank 0] step:2801/10000 train_time:202653ms step_avg:72.35ms +[2025-09-02 07:23:32] [Rank 0] step:2801/10000 train_time:202653ms step_avg:72.35ms +[2025-09-02 07:23:33] [Rank 0] step:2821/10000 train_time:204014ms step_avg:72.32ms +[2025-09-02 07:23:33] [Rank 0] step:2821/10000 train_time:204014ms step_avg:72.32ms +[2025-09-02 07:23:35] [Rank 0] step:2841/10000 train_time:205506ms step_avg:72.34ms +[2025-09-02 07:23:35] [Rank 0] step:2841/10000 train_time:205506ms step_avg:72.34ms +[2025-09-02 07:23:36] [Rank 0] step:2861/10000 train_time:206994ms step_avg:72.35ms +[2025-09-02 07:23:36] [Rank 0] step:2861/10000 train_time:206994ms step_avg:72.35ms +[2025-09-02 07:23:37] [Rank 0] step:2881/10000 train_time:208484ms step_avg:72.37ms +[2025-09-02 07:23:37] [Rank 0] step:2881/10000 train_time:208484ms step_avg:72.37ms +[2025-09-02 07:23:39] [Rank 0] step:2901/10000 train_time:209973ms step_avg:72.38ms +[2025-09-02 07:23:39] [Rank 0] step:2901/10000 train_time:209973ms step_avg:72.38ms +[2025-09-02 07:23:40] [Rank 0] step:2921/10000 train_time:211463ms step_avg:72.39ms +[2025-09-02 07:23:40] [Rank 0] step:2921/10000 train_time:211463ms step_avg:72.39ms +[2025-09-02 07:23:42] [Rank 0] step:2941/10000 train_time:212954ms step_avg:72.41ms +[2025-09-02 07:23:42] [Rank 0] step:2941/10000 train_time:212954ms step_avg:72.41ms +[2025-09-02 07:23:43] [Rank 0] step:2961/10000 train_time:214444ms step_avg:72.42ms +[2025-09-02 07:23:43] [Rank 0] step:2961/10000 train_time:214444ms step_avg:72.42ms +[2025-09-02 07:23:45] [Rank 0] step:2981/10000 train_time:215940ms step_avg:72.44ms +[2025-09-02 07:23:45] [Rank 0] step:2981/10000 train_time:215940ms step_avg:72.44ms +[2025-09-02 07:23:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:23:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:23:58] [Rank 0] PRINT: step:3000/10000 val_loss:4.3684 svd_entropy: attn_qk:H=0.7035,top10E=0.34,eRank=137.0,q75/q25=62.94 attn_vo:H=0.7303,top10E=0.22,eRank=198.4,q75/q25=inf mlp_w1:H=0.7031,top10E=0.37,eRank=125.6,q75/q25=11.14 mlp_w2:H=0.8130,top10E=0.17,eRank=226.2,q75/q25=28.56 vo_prod:H=0.5948,top10E=0.32,eRank=73.0,q75/q25=inf train_time:217588ms step_avg:72.53ms +[2025-09-02 07:23:58] [Rank 0] PRINT: step:3000/10000 val_loss:4.3684 svd_entropy: attn_qk:H=0.7035,top10E=0.34,eRank=137.0,q75/q25=62.94 attn_vo:H=0.7303,top10E=0.22,eRank=198.4,q75/q25=inf mlp_w1:H=0.7031,top10E=0.37,eRank=125.6,q75/q25=11.14 mlp_w2:H=0.8130,top10E=0.17,eRank=226.2,q75/q25=28.56 vo_prod:H=0.5948,top10E=0.32,eRank=73.0,q75/q25=inf train_time:217588ms step_avg:72.53ms +[2025-09-02 07:23:58] [Rank 0] step:3001/10000 train_time:217599ms step_avg:72.51ms +[2025-09-02 07:23:58] [Rank 0] step:3001/10000 train_time:217599ms step_avg:72.51ms +[2025-09-02 07:24:00] [Rank 0] step:3021/10000 train_time:218955ms step_avg:72.48ms +[2025-09-02 07:24:00] [Rank 0] step:3021/10000 train_time:218955ms step_avg:72.48ms +[2025-09-02 07:24:01] [Rank 0] step:3041/10000 train_time:220449ms step_avg:72.49ms +[2025-09-02 07:24:01] [Rank 0] step:3041/10000 train_time:220449ms step_avg:72.49ms +[2025-09-02 07:24:03] [Rank 0] step:3061/10000 train_time:221947ms step_avg:72.51ms +[2025-09-02 07:24:03] [Rank 0] step:3061/10000 train_time:221947ms step_avg:72.51ms +[2025-09-02 07:24:04] [Rank 0] step:3081/10000 train_time:223444ms step_avg:72.52ms +[2025-09-02 07:24:04] [Rank 0] step:3081/10000 train_time:223444ms step_avg:72.52ms +[2025-09-02 07:24:06] [Rank 0] step:3101/10000 train_time:224941ms step_avg:72.54ms +[2025-09-02 07:24:06] [Rank 0] step:3101/10000 train_time:224941ms step_avg:72.54ms +[2025-09-02 07:24:07] [Rank 0] step:3121/10000 train_time:226437ms step_avg:72.55ms +[2025-09-02 07:24:07] [Rank 0] step:3121/10000 train_time:226437ms step_avg:72.55ms +[2025-09-02 07:24:09] [Rank 0] step:3141/10000 train_time:227934ms step_avg:72.57ms +[2025-09-02 07:24:09] [Rank 0] step:3141/10000 train_time:227934ms step_avg:72.57ms +[2025-09-02 07:24:10] [Rank 0] step:3161/10000 train_time:229431ms step_avg:72.58ms +[2025-09-02 07:24:10] [Rank 0] step:3161/10000 train_time:229431ms step_avg:72.58ms +[2025-09-02 07:24:12] [Rank 0] step:3181/10000 train_time:230928ms step_avg:72.60ms +[2025-09-02 07:24:12] [Rank 0] step:3181/10000 train_time:230928ms step_avg:72.60ms +[2025-09-02 07:24:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:24:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:24:25] [Rank 0] PRINT: step:3200/10000 val_loss:4.3336 svd_entropy: attn_qk:H=0.7078,top10E=0.33,eRank=139.6,q75/q25=66.70 attn_vo:H=0.7351,top10E=0.22,eRank=202.8,q75/q25=inf mlp_w1:H=0.7106,top10E=0.36,eRank=131.2,q75/q25=11.72 mlp_w2:H=0.8172,top10E=0.17,eRank=232.6,q75/q25=28.95 vo_prod:H=0.6003,top10E=0.32,eRank=75.9,q75/q25=inf train_time:232575ms step_avg:72.68ms +[2025-09-02 07:24:25] [Rank 0] PRINT: step:3200/10000 val_loss:4.3336 svd_entropy: attn_qk:H=0.7078,top10E=0.33,eRank=139.6,q75/q25=66.70 attn_vo:H=0.7351,top10E=0.22,eRank=202.8,q75/q25=inf mlp_w1:H=0.7106,top10E=0.36,eRank=131.2,q75/q25=11.72 mlp_w2:H=0.8172,top10E=0.17,eRank=232.6,q75/q25=28.95 vo_prod:H=0.6003,top10E=0.32,eRank=75.9,q75/q25=inf train_time:232575ms step_avg:72.68ms +[2025-09-02 07:24:25] [Rank 0] step:3201/10000 train_time:232586ms step_avg:72.66ms +[2025-09-02 07:24:25] [Rank 0] step:3201/10000 train_time:232586ms step_avg:72.66ms +[2025-09-02 07:24:27] [Rank 0] step:3221/10000 train_time:233935ms step_avg:72.63ms +[2025-09-02 07:24:27] [Rank 0] step:3221/10000 train_time:233935ms step_avg:72.63ms +[2025-09-02 07:24:28] [Rank 0] step:3241/10000 train_time:235430ms step_avg:72.64ms +[2025-09-02 07:24:28] [Rank 0] step:3241/10000 train_time:235430ms step_avg:72.64ms +[2025-09-02 07:24:30] [Rank 0] step:3261/10000 train_time:236925ms step_avg:72.65ms +[2025-09-02 07:24:30] [Rank 0] step:3261/10000 train_time:236925ms step_avg:72.65ms +[2025-09-02 07:24:31] [Rank 0] step:3281/10000 train_time:238422ms step_avg:72.67ms +[2025-09-02 07:24:31] [Rank 0] step:3281/10000 train_time:238422ms step_avg:72.67ms +[2025-09-02 07:24:33] [Rank 0] step:3301/10000 train_time:239918ms step_avg:72.68ms +[2025-09-02 07:24:33] [Rank 0] step:3301/10000 train_time:239918ms step_avg:72.68ms +[2025-09-02 07:24:34] [Rank 0] step:3321/10000 train_time:241414ms step_avg:72.69ms +[2025-09-02 07:24:34] [Rank 0] step:3321/10000 train_time:241414ms step_avg:72.69ms +[2025-09-02 07:24:36] [Rank 0] step:3341/10000 train_time:242911ms step_avg:72.71ms +[2025-09-02 07:24:36] [Rank 0] step:3341/10000 train_time:242911ms step_avg:72.71ms +[2025-09-02 07:24:37] [Rank 0] step:3361/10000 train_time:244410ms step_avg:72.72ms +[2025-09-02 07:24:37] [Rank 0] step:3361/10000 train_time:244410ms step_avg:72.72ms +[2025-09-02 07:24:39] [Rank 0] step:3381/10000 train_time:245907ms step_avg:72.73ms +[2025-09-02 07:24:39] [Rank 0] step:3381/10000 train_time:245907ms step_avg:72.73ms +[2025-09-02 07:24:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:24:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:24:52] [Rank 0] PRINT: step:3400/10000 val_loss:4.2873 svd_entropy: attn_qk:H=0.7121,top10E=0.33,eRank=142.2,q75/q25=69.53 attn_vo:H=0.7397,top10E=0.21,eRank=207.1,q75/q25=inf mlp_w1:H=0.7177,top10E=0.35,eRank=136.7,q75/q25=12.26 mlp_w2:H=0.8213,top10E=0.17,eRank=239.1,q75/q25=29.04 vo_prod:H=0.6056,top10E=0.31,eRank=78.7,q75/q25=inf train_time:247555ms step_avg:72.81ms +[2025-09-02 07:24:52] [Rank 0] PRINT: step:3400/10000 val_loss:4.2873 svd_entropy: attn_qk:H=0.7121,top10E=0.33,eRank=142.2,q75/q25=69.53 attn_vo:H=0.7397,top10E=0.21,eRank=207.1,q75/q25=inf mlp_w1:H=0.7177,top10E=0.35,eRank=136.7,q75/q25=12.26 mlp_w2:H=0.8213,top10E=0.17,eRank=239.1,q75/q25=29.04 vo_prod:H=0.6056,top10E=0.31,eRank=78.7,q75/q25=inf train_time:247555ms step_avg:72.81ms +[2025-09-02 07:24:52] [Rank 0] step:3401/10000 train_time:247566ms step_avg:72.79ms +[2025-09-02 07:24:52] [Rank 0] step:3401/10000 train_time:247566ms step_avg:72.79ms +[2025-09-02 07:24:53] [Rank 0] step:3421/10000 train_time:248922ms step_avg:72.76ms +[2025-09-02 07:24:53] [Rank 0] step:3421/10000 train_time:248922ms step_avg:72.76ms +[2025-09-02 07:24:55] [Rank 0] step:3441/10000 train_time:250420ms step_avg:72.78ms +[2025-09-02 07:24:55] [Rank 0] step:3441/10000 train_time:250420ms step_avg:72.78ms +[2025-09-02 07:24:56] [Rank 0] step:3461/10000 train_time:251916ms step_avg:72.79ms +[2025-09-02 07:24:56] [Rank 0] step:3461/10000 train_time:251916ms step_avg:72.79ms +[2025-09-02 07:24:58] [Rank 0] step:3481/10000 train_time:253413ms step_avg:72.80ms +[2025-09-02 07:24:58] [Rank 0] step:3481/10000 train_time:253413ms step_avg:72.80ms +[2025-09-02 07:24:59] [Rank 0] step:3501/10000 train_time:254911ms step_avg:72.81ms +[2025-09-02 07:24:59] [Rank 0] step:3501/10000 train_time:254911ms step_avg:72.81ms +[2025-09-02 07:25:01] [Rank 0] step:3521/10000 train_time:256410ms step_avg:72.82ms +[2025-09-02 07:25:01] [Rank 0] step:3521/10000 train_time:256410ms step_avg:72.82ms +[2025-09-02 07:25:02] [Rank 0] step:3541/10000 train_time:257909ms step_avg:72.84ms +[2025-09-02 07:25:02] [Rank 0] step:3541/10000 train_time:257909ms step_avg:72.84ms +[2025-09-02 07:25:04] [Rank 0] step:3561/10000 train_time:259408ms step_avg:72.85ms +[2025-09-02 07:25:04] [Rank 0] step:3561/10000 train_time:259408ms step_avg:72.85ms +[2025-09-02 07:25:05] [Rank 0] step:3581/10000 train_time:260906ms step_avg:72.86ms +[2025-09-02 07:25:05] [Rank 0] step:3581/10000 train_time:260906ms step_avg:72.86ms +[2025-09-02 07:25:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:25:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:25:19] [Rank 0] PRINT: step:3600/10000 val_loss:4.2770 svd_entropy: attn_qk:H=0.7160,top10E=0.32,eRank=144.8,q75/q25=72.31 attn_vo:H=0.7437,top10E=0.20,eRank=211.1,q75/q25=inf mlp_w1:H=0.7241,top10E=0.34,eRank=141.9,q75/q25=12.78 mlp_w2:H=0.8246,top10E=0.16,eRank=244.7,q75/q25=29.35 vo_prod:H=0.6103,top10E=0.30,eRank=81.3,q75/q25=inf train_time:262555ms step_avg:72.93ms +[2025-09-02 07:25:19] [Rank 0] PRINT: step:3600/10000 val_loss:4.2770 svd_entropy: attn_qk:H=0.7160,top10E=0.32,eRank=144.8,q75/q25=72.31 attn_vo:H=0.7437,top10E=0.20,eRank=211.1,q75/q25=inf mlp_w1:H=0.7241,top10E=0.34,eRank=141.9,q75/q25=12.78 mlp_w2:H=0.8246,top10E=0.16,eRank=244.7,q75/q25=29.35 vo_prod:H=0.6103,top10E=0.30,eRank=81.3,q75/q25=inf train_time:262555ms step_avg:72.93ms +[2025-09-02 07:25:19] [Rank 0] step:3601/10000 train_time:262566ms step_avg:72.91ms +[2025-09-02 07:25:19] [Rank 0] step:3601/10000 train_time:262566ms step_avg:72.91ms +[2025-09-02 07:25:20] [Rank 0] step:3621/10000 train_time:263923ms step_avg:72.89ms +[2025-09-02 07:25:20] [Rank 0] step:3621/10000 train_time:263923ms step_avg:72.89ms +[2025-09-02 07:25:22] [Rank 0] step:3641/10000 train_time:265420ms step_avg:72.90ms +[2025-09-02 07:25:22] [Rank 0] step:3641/10000 train_time:265420ms step_avg:72.90ms +[2025-09-02 07:25:23] [Rank 0] step:3661/10000 train_time:266917ms step_avg:72.91ms +[2025-09-02 07:25:23] [Rank 0] step:3661/10000 train_time:266917ms step_avg:72.91ms +[2025-09-02 07:25:25] [Rank 0] step:3681/10000 train_time:268415ms step_avg:72.92ms +[2025-09-02 07:25:25] [Rank 0] step:3681/10000 train_time:268415ms step_avg:72.92ms +[2025-09-02 07:25:26] [Rank 0] step:3701/10000 train_time:269913ms step_avg:72.93ms +[2025-09-02 07:25:26] [Rank 0] step:3701/10000 train_time:269913ms step_avg:72.93ms +[2025-09-02 07:25:28] [Rank 0] step:3721/10000 train_time:271437ms step_avg:72.95ms +[2025-09-02 07:25:28] [Rank 0] step:3721/10000 train_time:271437ms step_avg:72.95ms +[2025-09-02 07:25:29] [Rank 0] step:3741/10000 train_time:272973ms step_avg:72.97ms +[2025-09-02 07:25:29] [Rank 0] step:3741/10000 train_time:272973ms step_avg:72.97ms +[2025-09-02 07:25:31] [Rank 0] step:3761/10000 train_time:274508ms step_avg:72.99ms +[2025-09-02 07:25:31] [Rank 0] step:3761/10000 train_time:274508ms step_avg:72.99ms +[2025-09-02 07:25:32] [Rank 0] step:3781/10000 train_time:276045ms step_avg:73.01ms +[2025-09-02 07:25:32] [Rank 0] step:3781/10000 train_time:276045ms step_avg:73.01ms +[2025-09-02 07:25:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:25:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:25:46] [Rank 0] PRINT: step:3800/10000 val_loss:4.2161 svd_entropy: attn_qk:H=0.7194,top10E=0.31,eRank=147.0,q75/q25=74.59 attn_vo:H=0.7475,top10E=0.20,eRank=214.8,q75/q25=inf mlp_w1:H=0.7302,top10E=0.33,eRank=147.2,q75/q25=13.34 mlp_w2:H=0.8278,top10E=0.16,eRank=249.9,q75/q25=29.55 vo_prod:H=0.6148,top10E=0.29,eRank=83.9,q75/q25=inf train_time:277736ms step_avg:73.09ms +[2025-09-02 07:25:46] [Rank 0] PRINT: step:3800/10000 val_loss:4.2161 svd_entropy: attn_qk:H=0.7194,top10E=0.31,eRank=147.0,q75/q25=74.59 attn_vo:H=0.7475,top10E=0.20,eRank=214.8,q75/q25=inf mlp_w1:H=0.7302,top10E=0.33,eRank=147.2,q75/q25=13.34 mlp_w2:H=0.8278,top10E=0.16,eRank=249.9,q75/q25=29.55 vo_prod:H=0.6148,top10E=0.29,eRank=83.9,q75/q25=inf train_time:277736ms step_avg:73.09ms +[2025-09-02 07:25:46] [Rank 0] step:3801/10000 train_time:277747ms step_avg:73.07ms +[2025-09-02 07:25:46] [Rank 0] step:3801/10000 train_time:277747ms step_avg:73.07ms +[2025-09-02 07:25:47] [Rank 0] step:3821/10000 train_time:279142ms step_avg:73.05ms +[2025-09-02 07:25:47] [Rank 0] step:3821/10000 train_time:279142ms step_avg:73.05ms +[2025-09-02 07:25:49] [Rank 0] step:3841/10000 train_time:280677ms step_avg:73.07ms +[2025-09-02 07:25:49] [Rank 0] step:3841/10000 train_time:280677ms step_avg:73.07ms +[2025-09-02 07:25:50] [Rank 0] step:3861/10000 train_time:282211ms step_avg:73.09ms +[2025-09-02 07:25:50] [Rank 0] step:3861/10000 train_time:282211ms step_avg:73.09ms +[2025-09-02 07:25:52] [Rank 0] step:3881/10000 train_time:283744ms step_avg:73.11ms +[2025-09-02 07:25:52] [Rank 0] step:3881/10000 train_time:283744ms step_avg:73.11ms +[2025-09-02 07:25:54] [Rank 0] step:3901/10000 train_time:285277ms step_avg:73.13ms +[2025-09-02 07:25:54] [Rank 0] step:3901/10000 train_time:285277ms step_avg:73.13ms +[2025-09-02 07:25:55] [Rank 0] step:3921/10000 train_time:286811ms step_avg:73.15ms +[2025-09-02 07:25:55] [Rank 0] step:3921/10000 train_time:286811ms step_avg:73.15ms +[2025-09-02 07:25:57] [Rank 0] step:3941/10000 train_time:288343ms step_avg:73.17ms +[2025-09-02 07:25:57] [Rank 0] step:3941/10000 train_time:288343ms step_avg:73.17ms +[2025-09-02 07:25:58] [Rank 0] step:3961/10000 train_time:289874ms step_avg:73.18ms +[2025-09-02 07:25:58] [Rank 0] step:3961/10000 train_time:289874ms step_avg:73.18ms +[2025-09-02 07:26:00] [Rank 0] step:3981/10000 train_time:291407ms step_avg:73.20ms +[2025-09-02 07:26:00] [Rank 0] step:3981/10000 train_time:291407ms step_avg:73.20ms +[2025-09-02 07:26:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:26:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:26:13] [Rank 0] PRINT: step:4000/10000 val_loss:4.1889 svd_entropy: attn_qk:H=0.7230,top10E=0.31,eRank=149.3,q75/q25=76.81 attn_vo:H=0.7509,top10E=0.19,eRank=218.3,q75/q25=inf mlp_w1:H=0.7359,top10E=0.33,eRank=152.2,q75/q25=13.86 mlp_w2:H=0.8307,top10E=0.16,eRank=254.9,q75/q25=29.83 vo_prod:H=0.6189,top10E=0.29,eRank=86.3,q75/q25=inf train_time:293093ms step_avg:73.27ms +[2025-09-02 07:26:13] [Rank 0] PRINT: step:4000/10000 val_loss:4.1889 svd_entropy: attn_qk:H=0.7230,top10E=0.31,eRank=149.3,q75/q25=76.81 attn_vo:H=0.7509,top10E=0.19,eRank=218.3,q75/q25=inf mlp_w1:H=0.7359,top10E=0.33,eRank=152.2,q75/q25=13.86 mlp_w2:H=0.8307,top10E=0.16,eRank=254.9,q75/q25=29.83 vo_prod:H=0.6189,top10E=0.29,eRank=86.3,q75/q25=inf train_time:293093ms step_avg:73.27ms +[2025-09-02 07:26:13] [Rank 0] step:4001/10000 train_time:293104ms step_avg:73.26ms +[2025-09-02 07:26:13] [Rank 0] step:4001/10000 train_time:293104ms step_avg:73.26ms +[2025-09-02 07:26:15] [Rank 0] step:4021/10000 train_time:294501ms step_avg:73.24ms +[2025-09-02 07:26:15] [Rank 0] step:4021/10000 train_time:294501ms step_avg:73.24ms +[2025-09-02 07:26:16] [Rank 0] step:4041/10000 train_time:296033ms step_avg:73.26ms +[2025-09-02 07:26:16] [Rank 0] step:4041/10000 train_time:296033ms step_avg:73.26ms +[2025-09-02 07:26:18] [Rank 0] step:4061/10000 train_time:297597ms step_avg:73.28ms +[2025-09-02 07:26:18] [Rank 0] step:4061/10000 train_time:297597ms step_avg:73.28ms +[2025-09-02 07:26:19] [Rank 0] step:4081/10000 train_time:299232ms step_avg:73.32ms +[2025-09-02 07:26:19] [Rank 0] step:4081/10000 train_time:299232ms step_avg:73.32ms +[2025-09-02 07:26:21] [Rank 0] step:4101/10000 train_time:300763ms step_avg:73.34ms +[2025-09-02 07:26:21] [Rank 0] step:4101/10000 train_time:300763ms step_avg:73.34ms +[2025-09-02 07:26:22] [Rank 0] step:4121/10000 train_time:302294ms step_avg:73.35ms +[2025-09-02 07:26:22] [Rank 0] step:4121/10000 train_time:302294ms step_avg:73.35ms +[2025-09-02 07:26:24] [Rank 0] step:4141/10000 train_time:303827ms step_avg:73.37ms +[2025-09-02 07:26:24] [Rank 0] step:4141/10000 train_time:303827ms step_avg:73.37ms +[2025-09-02 07:26:25] [Rank 0] step:4161/10000 train_time:305359ms step_avg:73.39ms +[2025-09-02 07:26:25] [Rank 0] step:4161/10000 train_time:305359ms step_avg:73.39ms +[2025-09-02 07:26:27] [Rank 0] step:4181/10000 train_time:306893ms step_avg:73.40ms +[2025-09-02 07:26:27] [Rank 0] step:4181/10000 train_time:306893ms step_avg:73.40ms +[2025-09-02 07:26:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:26:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:26:40] [Rank 0] PRINT: step:4200/10000 val_loss:4.1725 svd_entropy: attn_qk:H=0.7262,top10E=0.30,eRank=151.5,q75/q25=78.69 attn_vo:H=0.7542,top10E=0.19,eRank=221.6,q75/q25=inf mlp_w1:H=0.7411,top10E=0.32,eRank=157.1,q75/q25=14.46 mlp_w2:H=0.8333,top10E=0.15,eRank=259.4,q75/q25=30.33 vo_prod:H=0.6228,top10E=0.28,eRank=88.7,q75/q25=inf train_time:308578ms step_avg:73.47ms +[2025-09-02 07:26:40] [Rank 0] PRINT: step:4200/10000 val_loss:4.1725 svd_entropy: attn_qk:H=0.7262,top10E=0.30,eRank=151.5,q75/q25=78.69 attn_vo:H=0.7542,top10E=0.19,eRank=221.6,q75/q25=inf mlp_w1:H=0.7411,top10E=0.32,eRank=157.1,q75/q25=14.46 mlp_w2:H=0.8333,top10E=0.15,eRank=259.4,q75/q25=30.33 vo_prod:H=0.6228,top10E=0.28,eRank=88.7,q75/q25=inf train_time:308578ms step_avg:73.47ms +[2025-09-02 07:26:40] [Rank 0] step:4201/10000 train_time:308589ms step_avg:73.46ms +[2025-09-02 07:26:40] [Rank 0] step:4201/10000 train_time:308589ms step_avg:73.46ms +[2025-09-02 07:26:42] [Rank 0] step:4221/10000 train_time:309971ms step_avg:73.44ms +[2025-09-02 07:26:42] [Rank 0] step:4221/10000 train_time:309971ms step_avg:73.44ms +[2025-09-02 07:26:43] [Rank 0] step:4241/10000 train_time:311503ms step_avg:73.45ms +[2025-09-02 07:26:43] [Rank 0] step:4241/10000 train_time:311503ms step_avg:73.45ms +[2025-09-02 07:26:45] [Rank 0] step:4261/10000 train_time:313034ms step_avg:73.46ms +[2025-09-02 07:26:45] [Rank 0] step:4261/10000 train_time:313034ms step_avg:73.46ms +[2025-09-02 07:26:47] [Rank 0] step:4281/10000 train_time:314563ms step_avg:73.48ms +[2025-09-02 07:26:47] [Rank 0] step:4281/10000 train_time:314563ms step_avg:73.48ms +[2025-09-02 07:26:48] [Rank 0] step:4301/10000 train_time:316095ms step_avg:73.49ms +[2025-09-02 07:26:48] [Rank 0] step:4301/10000 train_time:316095ms step_avg:73.49ms +[2025-09-02 07:26:50] [Rank 0] step:4321/10000 train_time:317630ms step_avg:73.51ms +[2025-09-02 07:26:50] [Rank 0] step:4321/10000 train_time:317630ms step_avg:73.51ms +[2025-09-02 07:26:51] [Rank 0] step:4341/10000 train_time:319160ms step_avg:73.52ms +[2025-09-02 07:26:51] [Rank 0] step:4341/10000 train_time:319160ms step_avg:73.52ms +[2025-09-02 07:26:53] [Rank 0] step:4361/10000 train_time:320693ms step_avg:73.54ms +[2025-09-02 07:26:53] [Rank 0] step:4361/10000 train_time:320693ms step_avg:73.54ms +[2025-09-02 07:26:54] [Rank 0] step:4381/10000 train_time:322224ms step_avg:73.55ms +[2025-09-02 07:26:54] [Rank 0] step:4381/10000 train_time:322224ms step_avg:73.55ms +[2025-09-02 07:26:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:26:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:27:07] [Rank 0] PRINT: step:4400/10000 val_loss:4.1448 svd_entropy: attn_qk:H=0.7292,top10E=0.30,eRank=153.6,q75/q25=80.11 attn_vo:H=0.7571,top10E=0.19,eRank=224.8,q75/q25=inf mlp_w1:H=0.7463,top10E=0.31,eRank=162.1,q75/q25=15.07 mlp_w2:H=0.8356,top10E=0.15,eRank=263.4,q75/q25=30.93 vo_prod:H=0.6263,top10E=0.27,eRank=91.0,q75/q25=inf train_time:323911ms step_avg:73.62ms +[2025-09-02 07:27:07] [Rank 0] PRINT: step:4400/10000 val_loss:4.1448 svd_entropy: attn_qk:H=0.7292,top10E=0.30,eRank=153.6,q75/q25=80.11 attn_vo:H=0.7571,top10E=0.19,eRank=224.8,q75/q25=inf mlp_w1:H=0.7463,top10E=0.31,eRank=162.1,q75/q25=15.07 mlp_w2:H=0.8356,top10E=0.15,eRank=263.4,q75/q25=30.93 vo_prod:H=0.6263,top10E=0.27,eRank=91.0,q75/q25=inf train_time:323911ms step_avg:73.62ms +[2025-09-02 07:27:07] [Rank 0] step:4401/10000 train_time:323922ms step_avg:73.60ms +[2025-09-02 07:27:07] [Rank 0] step:4401/10000 train_time:323922ms step_avg:73.60ms +[2025-09-02 07:27:09] [Rank 0] step:4421/10000 train_time:325315ms step_avg:73.58ms +[2025-09-02 07:27:09] [Rank 0] step:4421/10000 train_time:325315ms step_avg:73.58ms +[2025-09-02 07:27:10] [Rank 0] step:4441/10000 train_time:326846ms step_avg:73.60ms +[2025-09-02 07:27:10] [Rank 0] step:4441/10000 train_time:326846ms step_avg:73.60ms +[2025-09-02 07:27:12] [Rank 0] step:4461/10000 train_time:328384ms step_avg:73.61ms +[2025-09-02 07:27:12] [Rank 0] step:4461/10000 train_time:328384ms step_avg:73.61ms +[2025-09-02 07:27:14] [Rank 0] step:4481/10000 train_time:329921ms step_avg:73.63ms +[2025-09-02 07:27:14] [Rank 0] step:4481/10000 train_time:329921ms step_avg:73.63ms +[2025-09-02 07:27:15] [Rank 0] step:4501/10000 train_time:331460ms step_avg:73.64ms +[2025-09-02 07:27:15] [Rank 0] step:4501/10000 train_time:331460ms step_avg:73.64ms +[2025-09-02 07:27:17] [Rank 0] step:4521/10000 train_time:332999ms step_avg:73.66ms +[2025-09-02 07:27:17] [Rank 0] step:4521/10000 train_time:332999ms step_avg:73.66ms +[2025-09-02 07:27:18] [Rank 0] step:4541/10000 train_time:334537ms step_avg:73.67ms +[2025-09-02 07:27:18] [Rank 0] step:4541/10000 train_time:334537ms step_avg:73.67ms +[2025-09-02 07:27:20] [Rank 0] step:4561/10000 train_time:336078ms step_avg:73.69ms +[2025-09-02 07:27:20] [Rank 0] step:4561/10000 train_time:336078ms step_avg:73.69ms +[2025-09-02 07:27:21] [Rank 0] step:4581/10000 train_time:337619ms step_avg:73.70ms +[2025-09-02 07:27:21] [Rank 0] step:4581/10000 train_time:337619ms step_avg:73.70ms +[2025-09-02 07:27:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:27:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:27:35] [Rank 0] PRINT: step:4600/10000 val_loss:4.1144 svd_entropy: attn_qk:H=0.7322,top10E=0.30,eRank=155.8,q75/q25=81.61 attn_vo:H=0.7601,top10E=0.18,eRank=228.0,q75/q25=inf mlp_w1:H=0.7512,top10E=0.31,eRank=166.8,q75/q25=15.67 mlp_w2:H=0.8377,top10E=0.15,eRank=267.3,q75/q25=31.02 vo_prod:H=0.6300,top10E=0.27,eRank=93.4,q75/q25=inf train_time:339314ms step_avg:73.76ms +[2025-09-02 07:27:35] [Rank 0] PRINT: step:4600/10000 val_loss:4.1144 svd_entropy: attn_qk:H=0.7322,top10E=0.30,eRank=155.8,q75/q25=81.61 attn_vo:H=0.7601,top10E=0.18,eRank=228.0,q75/q25=inf mlp_w1:H=0.7512,top10E=0.31,eRank=166.8,q75/q25=15.67 mlp_w2:H=0.8377,top10E=0.15,eRank=267.3,q75/q25=31.02 vo_prod:H=0.6300,top10E=0.27,eRank=93.4,q75/q25=inf train_time:339314ms step_avg:73.76ms +[2025-09-02 07:27:35] [Rank 0] step:4601/10000 train_time:339325ms step_avg:73.75ms +[2025-09-02 07:27:35] [Rank 0] step:4601/10000 train_time:339325ms step_avg:73.75ms +[2025-09-02 07:27:36] [Rank 0] step:4621/10000 train_time:340720ms step_avg:73.73ms +[2025-09-02 07:27:36] [Rank 0] step:4621/10000 train_time:340720ms step_avg:73.73ms +[2025-09-02 07:27:38] [Rank 0] step:4641/10000 train_time:342259ms step_avg:73.75ms +[2025-09-02 07:27:38] [Rank 0] step:4641/10000 train_time:342259ms step_avg:73.75ms +[2025-09-02 07:27:39] [Rank 0] step:4661/10000 train_time:343800ms step_avg:73.76ms +[2025-09-02 07:27:39] [Rank 0] step:4661/10000 train_time:343800ms step_avg:73.76ms +[2025-09-02 07:27:41] [Rank 0] step:4681/10000 train_time:345342ms step_avg:73.78ms +[2025-09-02 07:27:41] [Rank 0] step:4681/10000 train_time:345342ms step_avg:73.78ms +[2025-09-02 07:27:42] [Rank 0] step:4701/10000 train_time:346882ms step_avg:73.79ms +[2025-09-02 07:27:42] [Rank 0] step:4701/10000 train_time:346882ms step_avg:73.79ms +[2025-09-02 07:27:44] [Rank 0] step:4721/10000 train_time:348421ms step_avg:73.80ms +[2025-09-02 07:27:44] [Rank 0] step:4721/10000 train_time:348421ms step_avg:73.80ms +[2025-09-02 07:27:45] [Rank 0] step:4741/10000 train_time:349962ms step_avg:73.82ms +[2025-09-02 07:27:45] [Rank 0] step:4741/10000 train_time:349962ms step_avg:73.82ms +[2025-09-02 07:27:47] [Rank 0] step:4761/10000 train_time:351502ms step_avg:73.83ms +[2025-09-02 07:27:47] [Rank 0] step:4761/10000 train_time:351502ms step_avg:73.83ms +[2025-09-02 07:27:49] [Rank 0] step:4781/10000 train_time:353042ms step_avg:73.84ms +[2025-09-02 07:27:49] [Rank 0] step:4781/10000 train_time:353042ms step_avg:73.84ms +[2025-09-02 07:27:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:27:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:28:02] [Rank 0] PRINT: step:4800/10000 val_loss:4.1018 svd_entropy: attn_qk:H=0.7351,top10E=0.29,eRank=158.0,q75/q25=83.07 attn_vo:H=0.7628,top10E=0.18,eRank=231.0,q75/q25=inf mlp_w1:H=0.7554,top10E=0.30,eRank=171.2,q75/q25=16.23 mlp_w2:H=0.8398,top10E=0.15,eRank=271.0,q75/q25=31.77 vo_prod:H=0.6333,top10E=0.26,eRank=95.6,q75/q25=inf train_time:354739ms step_avg:73.90ms +[2025-09-02 07:28:02] [Rank 0] PRINT: step:4800/10000 val_loss:4.1018 svd_entropy: attn_qk:H=0.7351,top10E=0.29,eRank=158.0,q75/q25=83.07 attn_vo:H=0.7628,top10E=0.18,eRank=231.0,q75/q25=inf mlp_w1:H=0.7554,top10E=0.30,eRank=171.2,q75/q25=16.23 mlp_w2:H=0.8398,top10E=0.15,eRank=271.0,q75/q25=31.77 vo_prod:H=0.6333,top10E=0.26,eRank=95.6,q75/q25=inf train_time:354739ms step_avg:73.90ms +[2025-09-02 07:28:02] [Rank 0] step:4801/10000 train_time:354750ms step_avg:73.89ms +[2025-09-02 07:28:02] [Rank 0] step:4801/10000 train_time:354750ms step_avg:73.89ms +[2025-09-02 07:28:03] [Rank 0] step:4821/10000 train_time:356146ms step_avg:73.87ms +[2025-09-02 07:28:03] [Rank 0] step:4821/10000 train_time:356146ms step_avg:73.87ms +[2025-09-02 07:28:05] [Rank 0] step:4841/10000 train_time:357683ms step_avg:73.89ms +[2025-09-02 07:28:05] [Rank 0] step:4841/10000 train_time:357683ms step_avg:73.89ms +[2025-09-02 07:28:07] [Rank 0] step:4861/10000 train_time:359225ms step_avg:73.90ms +[2025-09-02 07:28:07] [Rank 0] step:4861/10000 train_time:359225ms step_avg:73.90ms +[2025-09-02 07:28:08] [Rank 0] step:4881/10000 train_time:360764ms step_avg:73.91ms +[2025-09-02 07:28:08] [Rank 0] step:4881/10000 train_time:360764ms step_avg:73.91ms +[2025-09-02 07:28:10] [Rank 0] step:4901/10000 train_time:362301ms step_avg:73.92ms +[2025-09-02 07:28:10] [Rank 0] step:4901/10000 train_time:362301ms step_avg:73.92ms +[2025-09-02 07:28:11] [Rank 0] step:4921/10000 train_time:363842ms step_avg:73.94ms +[2025-09-02 07:28:11] [Rank 0] step:4921/10000 train_time:363842ms step_avg:73.94ms +[2025-09-02 07:28:13] [Rank 0] step:4941/10000 train_time:365382ms step_avg:73.95ms +[2025-09-02 07:28:13] [Rank 0] step:4941/10000 train_time:365382ms step_avg:73.95ms +[2025-09-02 07:28:14] [Rank 0] step:4961/10000 train_time:366923ms step_avg:73.96ms +[2025-09-02 07:28:14] [Rank 0] step:4961/10000 train_time:366923ms step_avg:73.96ms +[2025-09-02 07:28:16] [Rank 0] step:4981/10000 train_time:368466ms step_avg:73.97ms +[2025-09-02 07:28:16] [Rank 0] step:4981/10000 train_time:368466ms step_avg:73.97ms +[2025-09-02 07:28:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:28:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:28:29] [Rank 0] PRINT: step:5000/10000 val_loss:4.0811 svd_entropy: attn_qk:H=0.7378,top10E=0.29,eRank=159.9,q75/q25=84.31 attn_vo:H=0.7653,top10E=0.18,eRank=233.9,q75/q25=inf mlp_w1:H=0.7595,top10E=0.29,eRank=175.5,q75/q25=16.87 mlp_w2:H=0.8416,top10E=0.15,eRank=274.3,q75/q25=32.25 vo_prod:H=0.6366,top10E=0.26,eRank=97.8,q75/q25=inf train_time:370158ms step_avg:74.03ms +[2025-09-02 07:28:29] [Rank 0] PRINT: step:5000/10000 val_loss:4.0811 svd_entropy: attn_qk:H=0.7378,top10E=0.29,eRank=159.9,q75/q25=84.31 attn_vo:H=0.7653,top10E=0.18,eRank=233.9,q75/q25=inf mlp_w1:H=0.7595,top10E=0.29,eRank=175.5,q75/q25=16.87 mlp_w2:H=0.8416,top10E=0.15,eRank=274.3,q75/q25=32.25 vo_prod:H=0.6366,top10E=0.26,eRank=97.8,q75/q25=inf train_time:370158ms step_avg:74.03ms +[2025-09-02 07:28:29] [Rank 0] step:5001/10000 train_time:370168ms step_avg:74.02ms +[2025-09-02 07:28:29] [Rank 0] step:5001/10000 train_time:370168ms step_avg:74.02ms +[2025-09-02 07:28:31] [Rank 0] step:5021/10000 train_time:371585ms step_avg:74.01ms +[2025-09-02 07:28:31] [Rank 0] step:5021/10000 train_time:371585ms step_avg:74.01ms +[2025-09-02 07:28:32] [Rank 0] step:5041/10000 train_time:373122ms step_avg:74.02ms +[2025-09-02 07:28:32] [Rank 0] step:5041/10000 train_time:373122ms step_avg:74.02ms +[2025-09-02 07:28:34] [Rank 0] step:5061/10000 train_time:374657ms step_avg:74.03ms +[2025-09-02 07:28:34] [Rank 0] step:5061/10000 train_time:374657ms step_avg:74.03ms +[2025-09-02 07:28:35] [Rank 0] step:5081/10000 train_time:376194ms step_avg:74.04ms +[2025-09-02 07:28:35] [Rank 0] step:5081/10000 train_time:376194ms step_avg:74.04ms +[2025-09-02 07:28:37] [Rank 0] step:5101/10000 train_time:377733ms step_avg:74.05ms +[2025-09-02 07:28:37] [Rank 0] step:5101/10000 train_time:377733ms step_avg:74.05ms +[2025-09-02 07:28:39] [Rank 0] step:5121/10000 train_time:379271ms step_avg:74.06ms +[2025-09-02 07:28:39] [Rank 0] step:5121/10000 train_time:379271ms step_avg:74.06ms +[2025-09-02 07:28:40] [Rank 0] step:5141/10000 train_time:380812ms step_avg:74.07ms +[2025-09-02 07:28:40] [Rank 0] step:5141/10000 train_time:380812ms step_avg:74.07ms +[2025-09-02 07:28:42] [Rank 0] step:5161/10000 train_time:382350ms step_avg:74.08ms +[2025-09-02 07:28:42] [Rank 0] step:5161/10000 train_time:382350ms step_avg:74.08ms +[2025-09-02 07:28:43] [Rank 0] step:5181/10000 train_time:383890ms step_avg:74.10ms +[2025-09-02 07:28:43] [Rank 0] step:5181/10000 train_time:383890ms step_avg:74.10ms +[2025-09-02 07:28:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:28:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:28:56] [Rank 0] PRINT: step:5200/10000 val_loss:4.0561 svd_entropy: attn_qk:H=0.7403,top10E=0.28,eRank=161.9,q75/q25=85.49 attn_vo:H=0.7677,top10E=0.17,eRank=236.6,q75/q25=inf mlp_w1:H=0.7635,top10E=0.29,eRank=179.7,q75/q25=17.42 mlp_w2:H=0.8432,top10E=0.14,eRank=277.4,q75/q25=32.51 vo_prod:H=0.6397,top10E=0.25,eRank=100.0,q75/q25=inf train_time:385609ms step_avg:74.16ms +[2025-09-02 07:28:56] [Rank 0] PRINT: step:5200/10000 val_loss:4.0561 svd_entropy: attn_qk:H=0.7403,top10E=0.28,eRank=161.9,q75/q25=85.49 attn_vo:H=0.7677,top10E=0.17,eRank=236.6,q75/q25=inf mlp_w1:H=0.7635,top10E=0.29,eRank=179.7,q75/q25=17.42 mlp_w2:H=0.8432,top10E=0.14,eRank=277.4,q75/q25=32.51 vo_prod:H=0.6397,top10E=0.25,eRank=100.0,q75/q25=inf train_time:385609ms step_avg:74.16ms +[2025-09-02 07:28:57] [Rank 0] step:5201/10000 train_time:385620ms step_avg:74.14ms +[2025-09-02 07:28:57] [Rank 0] step:5201/10000 train_time:385620ms step_avg:74.14ms +[2025-09-02 07:28:58] [Rank 0] step:5221/10000 train_time:387044ms step_avg:74.13ms +[2025-09-02 07:28:58] [Rank 0] step:5221/10000 train_time:387044ms step_avg:74.13ms +[2025-09-02 07:29:00] [Rank 0] step:5241/10000 train_time:388611ms step_avg:74.15ms +[2025-09-02 07:29:00] [Rank 0] step:5241/10000 train_time:388611ms step_avg:74.15ms +[2025-09-02 07:29:01] [Rank 0] step:5261/10000 train_time:390181ms step_avg:74.16ms +[2025-09-02 07:29:01] [Rank 0] step:5261/10000 train_time:390181ms step_avg:74.16ms +[2025-09-02 07:29:03] [Rank 0] step:5281/10000 train_time:391753ms step_avg:74.18ms +[2025-09-02 07:29:03] [Rank 0] step:5281/10000 train_time:391753ms step_avg:74.18ms +[2025-09-02 07:29:04] [Rank 0] step:5301/10000 train_time:393332ms step_avg:74.20ms +[2025-09-02 07:29:04] [Rank 0] step:5301/10000 train_time:393332ms step_avg:74.20ms +[2025-09-02 07:29:06] [Rank 0] step:5321/10000 train_time:394902ms step_avg:74.22ms +[2025-09-02 07:29:06] [Rank 0] step:5321/10000 train_time:394902ms step_avg:74.22ms +[2025-09-02 07:29:08] [Rank 0] step:5341/10000 train_time:396472ms step_avg:74.23ms +[2025-09-02 07:29:08] [Rank 0] step:5341/10000 train_time:396472ms step_avg:74.23ms +[2025-09-02 07:29:09] [Rank 0] step:5361/10000 train_time:398046ms step_avg:74.25ms +[2025-09-02 07:29:09] [Rank 0] step:5361/10000 train_time:398046ms step_avg:74.25ms +[2025-09-02 07:29:11] [Rank 0] step:5381/10000 train_time:399618ms step_avg:74.26ms +[2025-09-02 07:29:11] [Rank 0] step:5381/10000 train_time:399618ms step_avg:74.26ms +[2025-09-02 07:29:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:29:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:29:24] [Rank 0] PRINT: step:5400/10000 val_loss:4.0396 svd_entropy: attn_qk:H=0.7426,top10E=0.28,eRank=163.6,q75/q25=85.95 attn_vo:H=0.7699,top10E=0.17,eRank=239.2,q75/q25=inf mlp_w1:H=0.7672,top10E=0.28,eRank=183.8,q75/q25=17.98 mlp_w2:H=0.8449,top10E=0.14,eRank=280.4,q75/q25=32.46 vo_prod:H=0.6426,top10E=0.25,eRank=102.1,q75/q25=inf train_time:401345ms step_avg:74.32ms +[2025-09-02 07:29:24] [Rank 0] PRINT: step:5400/10000 val_loss:4.0396 svd_entropy: attn_qk:H=0.7426,top10E=0.28,eRank=163.6,q75/q25=85.95 attn_vo:H=0.7699,top10E=0.17,eRank=239.2,q75/q25=inf mlp_w1:H=0.7672,top10E=0.28,eRank=183.8,q75/q25=17.98 mlp_w2:H=0.8449,top10E=0.14,eRank=280.4,q75/q25=32.46 vo_prod:H=0.6426,top10E=0.25,eRank=102.1,q75/q25=inf train_time:401345ms step_avg:74.32ms +[2025-09-02 07:29:24] [Rank 0] step:5401/10000 train_time:401356ms step_avg:74.31ms +[2025-09-02 07:29:24] [Rank 0] step:5401/10000 train_time:401356ms step_avg:74.31ms +[2025-09-02 07:29:26] [Rank 0] step:5421/10000 train_time:402790ms step_avg:74.30ms +[2025-09-02 07:29:26] [Rank 0] step:5421/10000 train_time:402790ms step_avg:74.30ms +[2025-09-02 07:29:27] [Rank 0] step:5441/10000 train_time:404355ms step_avg:74.32ms +[2025-09-02 07:29:27] [Rank 0] step:5441/10000 train_time:404355ms step_avg:74.32ms +[2025-09-02 07:29:29] [Rank 0] step:5461/10000 train_time:405929ms step_avg:74.33ms +[2025-09-02 07:29:29] [Rank 0] step:5461/10000 train_time:405929ms step_avg:74.33ms +[2025-09-02 07:29:30] [Rank 0] step:5481/10000 train_time:407504ms step_avg:74.35ms +[2025-09-02 07:29:30] [Rank 0] step:5481/10000 train_time:407504ms step_avg:74.35ms +[2025-09-02 07:29:32] [Rank 0] step:5501/10000 train_time:409079ms step_avg:74.36ms +[2025-09-02 07:29:32] [Rank 0] step:5501/10000 train_time:409079ms step_avg:74.36ms +[2025-09-02 07:29:33] [Rank 0] step:5521/10000 train_time:410657ms step_avg:74.38ms +[2025-09-02 07:29:33] [Rank 0] step:5521/10000 train_time:410657ms step_avg:74.38ms +[2025-09-02 07:29:35] [Rank 0] step:5541/10000 train_time:412230ms step_avg:74.40ms +[2025-09-02 07:29:35] [Rank 0] step:5541/10000 train_time:412230ms step_avg:74.40ms +[2025-09-02 07:29:37] [Rank 0] step:5561/10000 train_time:413803ms step_avg:74.41ms +[2025-09-02 07:29:37] [Rank 0] step:5561/10000 train_time:413803ms step_avg:74.41ms +[2025-09-02 07:29:38] [Rank 0] step:5581/10000 train_time:415375ms step_avg:74.43ms +[2025-09-02 07:29:38] [Rank 0] step:5581/10000 train_time:415375ms step_avg:74.43ms +[2025-09-02 07:29:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:29:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:29:52] [Rank 0] PRINT: step:5600/10000 val_loss:4.0241 svd_entropy: attn_qk:H=0.7450,top10E=0.28,eRank=165.6,q75/q25=86.47 attn_vo:H=0.7720,top10E=0.17,eRank=241.6,q75/q25=inf mlp_w1:H=0.7706,top10E=0.28,eRank=187.6,q75/q25=18.49 mlp_w2:H=0.8464,top10E=0.14,eRank=283.3,q75/q25=32.88 vo_prod:H=0.6453,top10E=0.25,eRank=104.1,q75/q25=inf train_time:417106ms step_avg:74.48ms +[2025-09-02 07:29:52] [Rank 0] PRINT: step:5600/10000 val_loss:4.0241 svd_entropy: attn_qk:H=0.7450,top10E=0.28,eRank=165.6,q75/q25=86.47 attn_vo:H=0.7720,top10E=0.17,eRank=241.6,q75/q25=inf mlp_w1:H=0.7706,top10E=0.28,eRank=187.6,q75/q25=18.49 mlp_w2:H=0.8464,top10E=0.14,eRank=283.3,q75/q25=32.88 vo_prod:H=0.6453,top10E=0.25,eRank=104.1,q75/q25=inf train_time:417106ms step_avg:74.48ms +[2025-09-02 07:29:52] [Rank 0] step:5601/10000 train_time:417117ms step_avg:74.47ms +[2025-09-02 07:29:52] [Rank 0] step:5601/10000 train_time:417117ms step_avg:74.47ms +[2025-09-02 07:29:53] [Rank 0] step:5621/10000 train_time:418543ms step_avg:74.46ms +[2025-09-02 07:29:53] [Rank 0] step:5621/10000 train_time:418543ms step_avg:74.46ms +[2025-09-02 07:29:55] [Rank 0] step:5641/10000 train_time:420115ms step_avg:74.48ms +[2025-09-02 07:29:55] [Rank 0] step:5641/10000 train_time:420115ms step_avg:74.48ms +[2025-09-02 07:29:56] [Rank 0] step:5661/10000 train_time:421684ms step_avg:74.49ms +[2025-09-02 07:29:56] [Rank 0] step:5661/10000 train_time:421684ms step_avg:74.49ms +[2025-09-02 07:29:58] [Rank 0] step:5681/10000 train_time:423260ms step_avg:74.50ms +[2025-09-02 07:29:58] [Rank 0] step:5681/10000 train_time:423260ms step_avg:74.50ms +[2025-09-02 07:30:00] [Rank 0] step:5701/10000 train_time:424829ms step_avg:74.52ms +[2025-09-02 07:30:00] [Rank 0] step:5701/10000 train_time:424829ms step_avg:74.52ms +[2025-09-02 07:30:01] [Rank 0] step:5721/10000 train_time:426406ms step_avg:74.53ms +[2025-09-02 07:30:01] [Rank 0] step:5721/10000 train_time:426406ms step_avg:74.53ms +[2025-09-02 07:30:03] [Rank 0] step:5741/10000 train_time:427981ms step_avg:74.55ms +[2025-09-02 07:30:03] [Rank 0] step:5741/10000 train_time:427981ms step_avg:74.55ms +[2025-09-02 07:30:04] [Rank 0] step:5761/10000 train_time:429554ms step_avg:74.56ms +[2025-09-02 07:30:04] [Rank 0] step:5761/10000 train_time:429554ms step_avg:74.56ms +[2025-09-02 07:30:06] [Rank 0] step:5781/10000 train_time:431129ms step_avg:74.58ms +[2025-09-02 07:30:06] [Rank 0] step:5781/10000 train_time:431129ms step_avg:74.58ms +[2025-09-02 07:30:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:30:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:30:19] [Rank 0] PRINT: step:5800/10000 val_loss:4.0144 svd_entropy: attn_qk:H=0.7471,top10E=0.27,eRank=167.3,q75/q25=87.52 attn_vo:H=0.7740,top10E=0.16,eRank=244.0,q75/q25=inf mlp_w1:H=0.7738,top10E=0.27,eRank=191.2,q75/q25=19.00 mlp_w2:H=0.8478,top10E=0.14,eRank=285.9,q75/q25=33.18 vo_prod:H=0.6478,top10E=0.24,eRank=106.0,q75/q25=inf train_time:432864ms step_avg:74.63ms +[2025-09-02 07:30:19] [Rank 0] PRINT: step:5800/10000 val_loss:4.0144 svd_entropy: attn_qk:H=0.7471,top10E=0.27,eRank=167.3,q75/q25=87.52 attn_vo:H=0.7740,top10E=0.16,eRank=244.0,q75/q25=inf mlp_w1:H=0.7738,top10E=0.27,eRank=191.2,q75/q25=19.00 mlp_w2:H=0.8478,top10E=0.14,eRank=285.9,q75/q25=33.18 vo_prod:H=0.6478,top10E=0.24,eRank=106.0,q75/q25=inf train_time:432864ms step_avg:74.63ms +[2025-09-02 07:30:19] [Rank 0] step:5801/10000 train_time:432875ms step_avg:74.62ms +[2025-09-02 07:30:19] [Rank 0] step:5801/10000 train_time:432875ms step_avg:74.62ms +[2025-09-02 07:30:21] [Rank 0] step:5821/10000 train_time:434301ms step_avg:74.61ms +[2025-09-02 07:30:21] [Rank 0] step:5821/10000 train_time:434301ms step_avg:74.61ms +[2025-09-02 07:30:22] [Rank 0] step:5841/10000 train_time:435871ms step_avg:74.62ms +[2025-09-02 07:30:22] [Rank 0] step:5841/10000 train_time:435871ms step_avg:74.62ms +[2025-09-02 07:30:24] [Rank 0] step:5861/10000 train_time:437444ms step_avg:74.64ms +[2025-09-02 07:30:24] [Rank 0] step:5861/10000 train_time:437444ms step_avg:74.64ms +[2025-09-02 07:30:25] [Rank 0] step:5881/10000 train_time:439019ms step_avg:74.65ms +[2025-09-02 07:30:25] [Rank 0] step:5881/10000 train_time:439019ms step_avg:74.65ms +[2025-09-02 07:30:27] [Rank 0] step:5901/10000 train_time:440593ms step_avg:74.66ms +[2025-09-02 07:30:27] [Rank 0] step:5901/10000 train_time:440593ms step_avg:74.66ms +[2025-09-02 07:30:29] [Rank 0] step:5921/10000 train_time:442166ms step_avg:74.68ms +[2025-09-02 07:30:29] [Rank 0] step:5921/10000 train_time:442166ms step_avg:74.68ms +[2025-09-02 07:30:30] [Rank 0] step:5941/10000 train_time:443742ms step_avg:74.69ms +[2025-09-02 07:30:30] [Rank 0] step:5941/10000 train_time:443742ms step_avg:74.69ms +[2025-09-02 07:30:32] [Rank 0] step:5961/10000 train_time:445319ms step_avg:74.71ms +[2025-09-02 07:30:32] [Rank 0] step:5961/10000 train_time:445319ms step_avg:74.71ms +[2025-09-02 07:30:33] [Rank 0] step:5981/10000 train_time:446894ms step_avg:74.72ms +[2025-09-02 07:30:33] [Rank 0] step:5981/10000 train_time:446894ms step_avg:74.72ms +[2025-09-02 07:30:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:30:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:30:47] [Rank 0] PRINT: step:6000/10000 val_loss:3.9910 svd_entropy: attn_qk:H=0.7491,top10E=0.27,eRank=168.9,q75/q25=87.18 attn_vo:H=0.7760,top10E=0.16,eRank=246.4,q75/q25=inf mlp_w1:H=0.7768,top10E=0.27,eRank=194.8,q75/q25=19.57 mlp_w2:H=0.8492,top10E=0.14,eRank=288.6,q75/q25=33.40 vo_prod:H=0.6504,top10E=0.24,eRank=107.9,q75/q25=inf train_time:448624ms step_avg:74.77ms +[2025-09-02 07:30:47] [Rank 0] PRINT: step:6000/10000 val_loss:3.9910 svd_entropy: attn_qk:H=0.7491,top10E=0.27,eRank=168.9,q75/q25=87.18 attn_vo:H=0.7760,top10E=0.16,eRank=246.4,q75/q25=inf mlp_w1:H=0.7768,top10E=0.27,eRank=194.8,q75/q25=19.57 mlp_w2:H=0.8492,top10E=0.14,eRank=288.6,q75/q25=33.40 vo_prod:H=0.6504,top10E=0.24,eRank=107.9,q75/q25=inf train_time:448624ms step_avg:74.77ms +[2025-09-02 07:30:47] [Rank 0] step:6001/10000 train_time:448636ms step_avg:74.76ms +[2025-09-02 07:30:47] [Rank 0] step:6001/10000 train_time:448636ms step_avg:74.76ms +[2025-09-02 07:30:48] [Rank 0] step:6021/10000 train_time:450070ms step_avg:74.75ms +[2025-09-02 07:30:48] [Rank 0] step:6021/10000 train_time:450070ms step_avg:74.75ms +[2025-09-02 07:30:50] [Rank 0] step:6041/10000 train_time:451641ms step_avg:74.76ms +[2025-09-02 07:30:50] [Rank 0] step:6041/10000 train_time:451641ms step_avg:74.76ms +[2025-09-02 07:30:51] [Rank 0] step:6061/10000 train_time:453221ms step_avg:74.78ms +[2025-09-02 07:30:51] [Rank 0] step:6061/10000 train_time:453221ms step_avg:74.78ms +[2025-09-02 07:30:53] [Rank 0] step:6081/10000 train_time:454796ms step_avg:74.79ms +[2025-09-02 07:30:53] [Rank 0] step:6081/10000 train_time:454796ms step_avg:74.79ms +[2025-09-02 07:30:55] [Rank 0] step:6101/10000 train_time:456375ms step_avg:74.80ms +[2025-09-02 07:30:55] [Rank 0] step:6101/10000 train_time:456375ms step_avg:74.80ms +[2025-09-02 07:30:56] [Rank 0] step:6121/10000 train_time:458217ms step_avg:74.86ms +[2025-09-02 07:30:56] [Rank 0] step:6121/10000 train_time:458217ms step_avg:74.86ms +[2025-09-02 07:30:58] [Rank 0] step:6141/10000 train_time:459802ms step_avg:74.87ms +[2025-09-02 07:30:58] [Rank 0] step:6141/10000 train_time:459802ms step_avg:74.87ms +[2025-09-02 07:31:00] [Rank 0] step:6161/10000 train_time:461380ms step_avg:74.89ms +[2025-09-02 07:31:00] [Rank 0] step:6161/10000 train_time:461380ms step_avg:74.89ms +[2025-09-02 07:31:01] [Rank 0] step:6181/10000 train_time:462956ms step_avg:74.90ms +[2025-09-02 07:31:01] [Rank 0] step:6181/10000 train_time:462956ms step_avg:74.90ms +[2025-09-02 07:31:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:31:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:31:15] [Rank 0] PRINT: step:6200/10000 val_loss:3.9756 svd_entropy: attn_qk:H=0.7511,top10E=0.27,eRank=170.6,q75/q25=88.15 attn_vo:H=0.7778,top10E=0.16,eRank=248.7,q75/q25=inf mlp_w1:H=0.7796,top10E=0.27,eRank=198.0,q75/q25=20.09 mlp_w2:H=0.8504,top10E=0.14,eRank=291.0,q75/q25=33.73 vo_prod:H=0.6527,top10E=0.24,eRank=109.7,q75/q25=inf train_time:464690ms step_avg:74.95ms +[2025-09-02 07:31:15] [Rank 0] PRINT: step:6200/10000 val_loss:3.9756 svd_entropy: attn_qk:H=0.7511,top10E=0.27,eRank=170.6,q75/q25=88.15 attn_vo:H=0.7778,top10E=0.16,eRank=248.7,q75/q25=inf mlp_w1:H=0.7796,top10E=0.27,eRank=198.0,q75/q25=20.09 mlp_w2:H=0.8504,top10E=0.14,eRank=291.0,q75/q25=33.73 vo_prod:H=0.6527,top10E=0.24,eRank=109.7,q75/q25=inf train_time:464690ms step_avg:74.95ms +[2025-09-02 07:31:15] [Rank 0] step:6201/10000 train_time:464701ms step_avg:74.94ms +[2025-09-02 07:31:15] [Rank 0] step:6201/10000 train_time:464701ms step_avg:74.94ms +[2025-09-02 07:31:16] [Rank 0] step:6221/10000 train_time:466131ms step_avg:74.93ms +[2025-09-02 07:31:16] [Rank 0] step:6221/10000 train_time:466131ms step_avg:74.93ms +[2025-09-02 07:31:18] [Rank 0] step:6241/10000 train_time:467700ms step_avg:74.94ms +[2025-09-02 07:31:18] [Rank 0] step:6241/10000 train_time:467700ms step_avg:74.94ms +[2025-09-02 07:31:19] [Rank 0] step:6261/10000 train_time:469276ms step_avg:74.95ms +[2025-09-02 07:31:19] [Rank 0] step:6261/10000 train_time:469276ms step_avg:74.95ms +[2025-09-02 07:31:21] [Rank 0] step:6281/10000 train_time:470855ms step_avg:74.97ms +[2025-09-02 07:31:21] [Rank 0] step:6281/10000 train_time:470855ms step_avg:74.97ms +[2025-09-02 07:31:23] [Rank 0] step:6301/10000 train_time:472439ms step_avg:74.98ms +[2025-09-02 07:31:23] [Rank 0] step:6301/10000 train_time:472439ms step_avg:74.98ms +[2025-09-02 07:31:24] [Rank 0] step:6321/10000 train_time:474013ms step_avg:74.99ms +[2025-09-02 07:31:24] [Rank 0] step:6321/10000 train_time:474013ms step_avg:74.99ms +[2025-09-02 07:31:26] [Rank 0] step:6341/10000 train_time:475593ms step_avg:75.00ms +[2025-09-02 07:31:26] [Rank 0] step:6341/10000 train_time:475593ms step_avg:75.00ms +[2025-09-02 07:31:27] [Rank 0] step:6361/10000 train_time:477176ms step_avg:75.02ms +[2025-09-02 07:31:27] [Rank 0] step:6361/10000 train_time:477176ms step_avg:75.02ms +[2025-09-02 07:31:29] [Rank 0] step:6381/10000 train_time:478757ms step_avg:75.03ms +[2025-09-02 07:31:29] [Rank 0] step:6381/10000 train_time:478757ms step_avg:75.03ms +[2025-09-02 07:31:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:31:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:31:42] [Rank 0] PRINT: step:6400/10000 val_loss:3.9579 svd_entropy: attn_qk:H=0.7529,top10E=0.27,eRank=172.1,q75/q25=88.73 attn_vo:H=0.7794,top10E=0.16,eRank=250.6,q75/q25=inf mlp_w1:H=0.7820,top10E=0.26,eRank=200.9,q75/q25=20.61 mlp_w2:H=0.8515,top10E=0.14,eRank=293.1,q75/q25=34.13 vo_prod:H=0.6547,top10E=0.24,eRank=111.3,q75/q25=inf train_time:480493ms step_avg:75.08ms +[2025-09-02 07:31:42] [Rank 0] PRINT: step:6400/10000 val_loss:3.9579 svd_entropy: attn_qk:H=0.7529,top10E=0.27,eRank=172.1,q75/q25=88.73 attn_vo:H=0.7794,top10E=0.16,eRank=250.6,q75/q25=inf mlp_w1:H=0.7820,top10E=0.26,eRank=200.9,q75/q25=20.61 mlp_w2:H=0.8515,top10E=0.14,eRank=293.1,q75/q25=34.13 vo_prod:H=0.6547,top10E=0.24,eRank=111.3,q75/q25=inf train_time:480493ms step_avg:75.08ms +[2025-09-02 07:31:42] [Rank 0] step:6401/10000 train_time:480504ms step_avg:75.07ms +[2025-09-02 07:31:42] [Rank 0] step:6401/10000 train_time:480504ms step_avg:75.07ms +[2025-09-02 07:31:44] [Rank 0] step:6421/10000 train_time:481929ms step_avg:75.06ms +[2025-09-02 07:31:44] [Rank 0] step:6421/10000 train_time:481929ms step_avg:75.06ms +[2025-09-02 07:31:46] [Rank 0] step:6441/10000 train_time:483503ms step_avg:75.07ms +[2025-09-02 07:31:46] [Rank 0] step:6441/10000 train_time:483503ms step_avg:75.07ms +[2025-09-02 07:31:47] [Rank 0] step:6461/10000 train_time:485084ms step_avg:75.08ms +[2025-09-02 07:31:47] [Rank 0] step:6461/10000 train_time:485084ms step_avg:75.08ms +[2025-09-02 07:31:49] [Rank 0] step:6481/10000 train_time:486669ms step_avg:75.09ms +[2025-09-02 07:31:49] [Rank 0] step:6481/10000 train_time:486669ms step_avg:75.09ms +[2025-09-02 07:31:50] [Rank 0] step:6501/10000 train_time:488244ms step_avg:75.10ms +[2025-09-02 07:31:50] [Rank 0] step:6501/10000 train_time:488244ms step_avg:75.10ms +[2025-09-02 07:31:52] [Rank 0] step:6521/10000 train_time:489817ms step_avg:75.11ms +[2025-09-02 07:31:52] [Rank 0] step:6521/10000 train_time:489817ms step_avg:75.11ms +[2025-09-02 07:31:53] [Rank 0] step:6541/10000 train_time:491396ms step_avg:75.13ms +[2025-09-02 07:31:53] [Rank 0] step:6541/10000 train_time:491396ms step_avg:75.13ms +[2025-09-02 07:31:55] [Rank 0] step:6561/10000 train_time:492977ms step_avg:75.14ms +[2025-09-02 07:31:55] [Rank 0] step:6561/10000 train_time:492977ms step_avg:75.14ms +[2025-09-02 07:31:57] [Rank 0] step:6581/10000 train_time:494553ms step_avg:75.15ms +[2025-09-02 07:31:57] [Rank 0] step:6581/10000 train_time:494553ms step_avg:75.15ms +[2025-09-02 07:31:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:31:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:32:10] [Rank 0] PRINT: step:6600/10000 val_loss:3.9478 svd_entropy: attn_qk:H=0.7545,top10E=0.26,eRank=173.5,q75/q25=88.58 attn_vo:H=0.7810,top10E=0.16,eRank=252.6,q75/q25=inf mlp_w1:H=0.7842,top10E=0.26,eRank=203.6,q75/q25=21.05 mlp_w2:H=0.8525,top10E=0.13,eRank=295.1,q75/q25=34.26 vo_prod:H=0.6568,top10E=0.23,eRank=113.0,q75/q25=inf train_time:496290ms step_avg:75.20ms +[2025-09-02 07:32:10] [Rank 0] PRINT: step:6600/10000 val_loss:3.9478 svd_entropy: attn_qk:H=0.7545,top10E=0.26,eRank=173.5,q75/q25=88.58 attn_vo:H=0.7810,top10E=0.16,eRank=252.6,q75/q25=inf mlp_w1:H=0.7842,top10E=0.26,eRank=203.6,q75/q25=21.05 mlp_w2:H=0.8525,top10E=0.13,eRank=295.1,q75/q25=34.26 vo_prod:H=0.6568,top10E=0.23,eRank=113.0,q75/q25=inf train_time:496290ms step_avg:75.20ms +[2025-09-02 07:32:10] [Rank 0] step:6601/10000 train_time:496302ms step_avg:75.19ms +[2025-09-02 07:32:10] [Rank 0] step:6601/10000 train_time:496302ms step_avg:75.19ms +[2025-09-02 07:32:12] [Rank 0] step:6621/10000 train_time:497726ms step_avg:75.17ms +[2025-09-02 07:32:12] [Rank 0] step:6621/10000 train_time:497726ms step_avg:75.17ms +[2025-09-02 07:32:13] [Rank 0] step:6641/10000 train_time:499308ms step_avg:75.19ms +[2025-09-02 07:32:13] [Rank 0] step:6641/10000 train_time:499308ms step_avg:75.19ms +[2025-09-02 07:32:15] [Rank 0] step:6661/10000 train_time:500885ms step_avg:75.20ms +[2025-09-02 07:32:15] [Rank 0] step:6661/10000 train_time:500885ms step_avg:75.20ms +[2025-09-02 07:32:16] [Rank 0] step:6681/10000 train_time:502480ms step_avg:75.21ms +[2025-09-02 07:32:16] [Rank 0] step:6681/10000 train_time:502480ms step_avg:75.21ms +[2025-09-02 07:32:18] [Rank 0] step:6701/10000 train_time:504093ms step_avg:75.23ms +[2025-09-02 07:32:18] [Rank 0] step:6701/10000 train_time:504093ms step_avg:75.23ms +[2025-09-02 07:32:20] [Rank 0] step:6721/10000 train_time:505700ms step_avg:75.24ms +[2025-09-02 07:32:20] [Rank 0] step:6721/10000 train_time:505700ms step_avg:75.24ms +[2025-09-02 07:32:21] [Rank 0] step:6741/10000 train_time:507305ms step_avg:75.26ms +[2025-09-02 07:32:21] [Rank 0] step:6741/10000 train_time:507305ms step_avg:75.26ms +[2025-09-02 07:32:23] [Rank 0] step:6761/10000 train_time:508913ms step_avg:75.27ms +[2025-09-02 07:32:23] [Rank 0] step:6761/10000 train_time:508913ms step_avg:75.27ms +[2025-09-02 07:32:24] [Rank 0] step:6781/10000 train_time:510531ms step_avg:75.29ms +[2025-09-02 07:32:24] [Rank 0] step:6781/10000 train_time:510531ms step_avg:75.29ms +[2025-09-02 07:32:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:32:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:32:38] [Rank 0] PRINT: step:6800/10000 val_loss:3.9286 svd_entropy: attn_qk:H=0.7560,top10E=0.26,eRank=174.8,q75/q25=88.94 attn_vo:H=0.7823,top10E=0.15,eRank=254.3,q75/q25=inf mlp_w1:H=0.7863,top10E=0.26,eRank=206.1,q75/q25=21.26 mlp_w2:H=0.8536,top10E=0.13,eRank=297.3,q75/q25=34.04 vo_prod:H=0.6587,top10E=0.23,eRank=114.5,q75/q25=inf train_time:512305ms step_avg:75.34ms +[2025-09-02 07:32:38] [Rank 0] PRINT: step:6800/10000 val_loss:3.9286 svd_entropy: attn_qk:H=0.7560,top10E=0.26,eRank=174.8,q75/q25=88.94 attn_vo:H=0.7823,top10E=0.15,eRank=254.3,q75/q25=inf mlp_w1:H=0.7863,top10E=0.26,eRank=206.1,q75/q25=21.26 mlp_w2:H=0.8536,top10E=0.13,eRank=297.3,q75/q25=34.04 vo_prod:H=0.6587,top10E=0.23,eRank=114.5,q75/q25=inf train_time:512305ms step_avg:75.34ms +[2025-09-02 07:32:38] [Rank 0] step:6801/10000 train_time:512317ms step_avg:75.33ms +[2025-09-02 07:32:38] [Rank 0] step:6801/10000 train_time:512317ms step_avg:75.33ms +[2025-09-02 07:32:40] [Rank 0] step:6821/10000 train_time:513784ms step_avg:75.32ms +[2025-09-02 07:32:40] [Rank 0] step:6821/10000 train_time:513784ms step_avg:75.32ms +[2025-09-02 07:32:41] [Rank 0] step:6841/10000 train_time:515383ms step_avg:75.34ms +[2025-09-02 07:32:41] [Rank 0] step:6841/10000 train_time:515383ms step_avg:75.34ms +[2025-09-02 07:32:43] [Rank 0] step:6861/10000 train_time:516994ms step_avg:75.35ms +[2025-09-02 07:32:43] [Rank 0] step:6861/10000 train_time:516994ms step_avg:75.35ms +[2025-09-02 07:32:44] [Rank 0] step:6881/10000 train_time:518595ms step_avg:75.37ms +[2025-09-02 07:32:44] [Rank 0] step:6881/10000 train_time:518595ms step_avg:75.37ms +[2025-09-02 07:32:46] [Rank 0] step:6901/10000 train_time:520202ms step_avg:75.38ms +[2025-09-02 07:32:46] [Rank 0] step:6901/10000 train_time:520202ms step_avg:75.38ms +[2025-09-02 07:32:48] [Rank 0] step:6921/10000 train_time:521803ms step_avg:75.39ms +[2025-09-02 07:32:48] [Rank 0] step:6921/10000 train_time:521803ms step_avg:75.39ms +[2025-09-02 07:32:49] [Rank 0] step:6941/10000 train_time:523412ms step_avg:75.41ms +[2025-09-02 07:32:49] [Rank 0] step:6941/10000 train_time:523412ms step_avg:75.41ms +[2025-09-02 07:32:51] [Rank 0] step:6961/10000 train_time:525031ms step_avg:75.42ms +[2025-09-02 07:32:51] [Rank 0] step:6961/10000 train_time:525031ms step_avg:75.42ms +[2025-09-02 07:32:52] [Rank 0] step:6981/10000 train_time:526640ms step_avg:75.44ms +[2025-09-02 07:32:52] [Rank 0] step:6981/10000 train_time:526640ms step_avg:75.44ms +[2025-09-02 07:32:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:32:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:33:06] [Rank 0] PRINT: step:7000/10000 val_loss:3.9153 svd_entropy: attn_qk:H=0.7574,top10E=0.26,eRank=176.0,q75/q25=88.90 attn_vo:H=0.7836,top10E=0.15,eRank=255.9,q75/q25=inf mlp_w1:H=0.7881,top10E=0.25,eRank=208.3,q75/q25=21.62 mlp_w2:H=0.8546,top10E=0.13,eRank=299.1,q75/q25=34.13 vo_prod:H=0.6605,top10E=0.23,eRank=116.0,q75/q25=inf train_time:528414ms step_avg:75.49ms +[2025-09-02 07:33:06] [Rank 0] PRINT: step:7000/10000 val_loss:3.9153 svd_entropy: attn_qk:H=0.7574,top10E=0.26,eRank=176.0,q75/q25=88.90 attn_vo:H=0.7836,top10E=0.15,eRank=255.9,q75/q25=inf mlp_w1:H=0.7881,top10E=0.25,eRank=208.3,q75/q25=21.62 mlp_w2:H=0.8546,top10E=0.13,eRank=299.1,q75/q25=34.13 vo_prod:H=0.6605,top10E=0.23,eRank=116.0,q75/q25=inf train_time:528414ms step_avg:75.49ms +[2025-09-02 07:33:06] [Rank 0] step:7001/10000 train_time:528425ms step_avg:75.48ms +[2025-09-02 07:33:06] [Rank 0] step:7001/10000 train_time:528425ms step_avg:75.48ms +[2025-09-02 07:33:08] [Rank 0] step:7021/10000 train_time:529873ms step_avg:75.47ms +[2025-09-02 07:33:08] [Rank 0] step:7021/10000 train_time:529873ms step_avg:75.47ms +[2025-09-02 07:33:09] [Rank 0] step:7041/10000 train_time:531479ms step_avg:75.48ms +[2025-09-02 07:33:09] [Rank 0] step:7041/10000 train_time:531479ms step_avg:75.48ms +[2025-09-02 07:33:11] [Rank 0] step:7061/10000 train_time:533084ms step_avg:75.50ms +[2025-09-02 07:33:11] [Rank 0] step:7061/10000 train_time:533084ms step_avg:75.50ms +[2025-09-02 07:33:12] [Rank 0] step:7081/10000 train_time:534688ms step_avg:75.51ms +[2025-09-02 07:33:12] [Rank 0] step:7081/10000 train_time:534688ms step_avg:75.51ms +[2025-09-02 07:33:14] [Rank 0] step:7101/10000 train_time:536289ms step_avg:75.52ms +[2025-09-02 07:33:14] [Rank 0] step:7101/10000 train_time:536289ms step_avg:75.52ms +[2025-09-02 07:33:16] [Rank 0] step:7121/10000 train_time:537896ms step_avg:75.54ms +[2025-09-02 07:33:16] [Rank 0] step:7121/10000 train_time:537896ms step_avg:75.54ms +[2025-09-02 07:33:17] [Rank 0] step:7141/10000 train_time:539500ms step_avg:75.55ms +[2025-09-02 07:33:17] [Rank 0] step:7141/10000 train_time:539500ms step_avg:75.55ms +[2025-09-02 07:33:19] [Rank 0] step:7161/10000 train_time:541179ms step_avg:75.57ms +[2025-09-02 07:33:19] [Rank 0] step:7161/10000 train_time:541179ms step_avg:75.57ms +[2025-09-02 07:33:20] [Rank 0] step:7181/10000 train_time:542784ms step_avg:75.59ms +[2025-09-02 07:33:20] [Rank 0] step:7181/10000 train_time:542784ms step_avg:75.59ms +[2025-09-02 07:33:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:33:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:33:34] [Rank 0] PRINT: step:7200/10000 val_loss:3.9060 svd_entropy: attn_qk:H=0.7586,top10E=0.26,eRank=177.1,q75/q25=89.26 attn_vo:H=0.7848,top10E=0.15,eRank=257.4,q75/q25=inf mlp_w1:H=0.7897,top10E=0.25,eRank=210.3,q75/q25=21.96 mlp_w2:H=0.8554,top10E=0.13,eRank=300.8,q75/q25=34.19 vo_prod:H=0.6622,top10E=0.23,eRank=117.4,q75/q25=inf train_time:544555ms step_avg:75.63ms +[2025-09-02 07:33:34] [Rank 0] PRINT: step:7200/10000 val_loss:3.9060 svd_entropy: attn_qk:H=0.7586,top10E=0.26,eRank=177.1,q75/q25=89.26 attn_vo:H=0.7848,top10E=0.15,eRank=257.4,q75/q25=inf mlp_w1:H=0.7897,top10E=0.25,eRank=210.3,q75/q25=21.96 mlp_w2:H=0.8554,top10E=0.13,eRank=300.8,q75/q25=34.19 vo_prod:H=0.6622,top10E=0.23,eRank=117.4,q75/q25=inf train_time:544555ms step_avg:75.63ms +[2025-09-02 07:33:34] [Rank 0] step:7201/10000 train_time:544567ms step_avg:75.62ms +[2025-09-02 07:33:34] [Rank 0] step:7201/10000 train_time:544567ms step_avg:75.62ms +[2025-09-02 07:33:35] [Rank 0] step:7221/10000 train_time:546024ms step_avg:75.62ms +[2025-09-02 07:33:35] [Rank 0] step:7221/10000 train_time:546024ms step_avg:75.62ms +[2025-09-02 07:33:37] [Rank 0] step:7241/10000 train_time:547626ms step_avg:75.63ms +[2025-09-02 07:33:37] [Rank 0] step:7241/10000 train_time:547626ms step_avg:75.63ms +[2025-09-02 07:33:39] [Rank 0] step:7261/10000 train_time:549228ms step_avg:75.64ms +[2025-09-02 07:33:39] [Rank 0] step:7261/10000 train_time:549228ms step_avg:75.64ms +[2025-09-02 07:33:40] [Rank 0] step:7281/10000 train_time:550840ms step_avg:75.65ms +[2025-09-02 07:33:40] [Rank 0] step:7281/10000 train_time:550840ms step_avg:75.65ms +[2025-09-02 07:33:42] [Rank 0] step:7301/10000 train_time:552448ms step_avg:75.67ms +[2025-09-02 07:33:42] [Rank 0] step:7301/10000 train_time:552448ms step_avg:75.67ms +[2025-09-02 07:33:43] [Rank 0] step:7321/10000 train_time:554057ms step_avg:75.68ms +[2025-09-02 07:33:43] [Rank 0] step:7321/10000 train_time:554057ms step_avg:75.68ms +[2025-09-02 07:33:45] [Rank 0] step:7341/10000 train_time:555665ms step_avg:75.69ms +[2025-09-02 07:33:45] [Rank 0] step:7341/10000 train_time:555665ms step_avg:75.69ms +[2025-09-02 07:33:47] [Rank 0] step:7361/10000 train_time:557275ms step_avg:75.71ms +[2025-09-02 07:33:47] [Rank 0] step:7361/10000 train_time:557275ms step_avg:75.71ms +[2025-09-02 07:33:48] [Rank 0] step:7381/10000 train_time:558890ms step_avg:75.72ms +[2025-09-02 07:33:48] [Rank 0] step:7381/10000 train_time:558890ms step_avg:75.72ms +[2025-09-02 07:33:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:33:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:34:01] [Rank 0] PRINT: step:7400/10000 val_loss:3.8854 svd_entropy: attn_qk:H=0.7597,top10E=0.26,eRank=178.1,q75/q25=89.10 attn_vo:H=0.7858,top10E=0.15,eRank=258.7,q75/q25=inf mlp_w1:H=0.7911,top10E=0.25,eRank=212.0,q75/q25=22.24 mlp_w2:H=0.8562,top10E=0.13,eRank=302.4,q75/q25=34.33 vo_prod:H=0.6636,top10E=0.22,eRank=118.6,q75/q25=inf train_time:560643ms step_avg:75.76ms +[2025-09-02 07:34:01] [Rank 0] PRINT: step:7400/10000 val_loss:3.8854 svd_entropy: attn_qk:H=0.7597,top10E=0.26,eRank=178.1,q75/q25=89.10 attn_vo:H=0.7858,top10E=0.15,eRank=258.7,q75/q25=inf mlp_w1:H=0.7911,top10E=0.25,eRank=212.0,q75/q25=22.24 mlp_w2:H=0.8562,top10E=0.13,eRank=302.4,q75/q25=34.33 vo_prod:H=0.6636,top10E=0.22,eRank=118.6,q75/q25=inf train_time:560643ms step_avg:75.76ms +[2025-09-02 07:34:02] [Rank 0] step:7401/10000 train_time:560655ms step_avg:75.75ms +[2025-09-02 07:34:02] [Rank 0] step:7401/10000 train_time:560655ms step_avg:75.75ms +[2025-09-02 07:34:03] [Rank 0] step:7421/10000 train_time:562118ms step_avg:75.75ms +[2025-09-02 07:34:03] [Rank 0] step:7421/10000 train_time:562118ms step_avg:75.75ms +[2025-09-02 07:34:05] [Rank 0] step:7441/10000 train_time:563721ms step_avg:75.76ms +[2025-09-02 07:34:05] [Rank 0] step:7441/10000 train_time:563721ms step_avg:75.76ms +[2025-09-02 07:34:06] [Rank 0] step:7461/10000 train_time:565332ms step_avg:75.77ms +[2025-09-02 07:34:06] [Rank 0] step:7461/10000 train_time:565332ms step_avg:75.77ms +[2025-09-02 07:34:08] [Rank 0] step:7481/10000 train_time:566945ms step_avg:75.78ms +[2025-09-02 07:34:08] [Rank 0] step:7481/10000 train_time:566945ms step_avg:75.78ms +[2025-09-02 07:34:10] [Rank 0] step:7501/10000 train_time:568556ms step_avg:75.80ms +[2025-09-02 07:34:10] [Rank 0] step:7501/10000 train_time:568556ms step_avg:75.80ms +[2025-09-02 07:34:11] [Rank 0] step:7521/10000 train_time:570168ms step_avg:75.81ms +[2025-09-02 07:34:11] [Rank 0] step:7521/10000 train_time:570168ms step_avg:75.81ms +[2025-09-02 07:34:13] [Rank 0] step:7541/10000 train_time:571787ms step_avg:75.82ms +[2025-09-02 07:34:13] [Rank 0] step:7541/10000 train_time:571787ms step_avg:75.82ms +[2025-09-02 07:34:14] [Rank 0] step:7561/10000 train_time:573388ms step_avg:75.83ms +[2025-09-02 07:34:14] [Rank 0] step:7561/10000 train_time:573388ms step_avg:75.83ms +[2025-09-02 07:34:16] [Rank 0] step:7581/10000 train_time:575011ms step_avg:75.85ms +[2025-09-02 07:34:16] [Rank 0] step:7581/10000 train_time:575011ms step_avg:75.85ms +[2025-09-02 07:34:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:34:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:34:29] [Rank 0] PRINT: step:7600/10000 val_loss:3.8820 svd_entropy: attn_qk:H=0.7608,top10E=0.26,eRank=179.1,q75/q25=88.61 attn_vo:H=0.7867,top10E=0.15,eRank=259.9,q75/q25=inf mlp_w1:H=0.7924,top10E=0.25,eRank=213.7,q75/q25=22.46 mlp_w2:H=0.8569,top10E=0.13,eRank=303.8,q75/q25=34.53 vo_prod:H=0.6650,top10E=0.22,eRank=119.8,q75/q25=inf train_time:576795ms step_avg:75.89ms +[2025-09-02 07:34:29] [Rank 0] PRINT: step:7600/10000 val_loss:3.8820 svd_entropy: attn_qk:H=0.7608,top10E=0.26,eRank=179.1,q75/q25=88.61 attn_vo:H=0.7867,top10E=0.15,eRank=259.9,q75/q25=inf mlp_w1:H=0.7924,top10E=0.25,eRank=213.7,q75/q25=22.46 mlp_w2:H=0.8569,top10E=0.13,eRank=303.8,q75/q25=34.53 vo_prod:H=0.6650,top10E=0.22,eRank=119.8,q75/q25=inf train_time:576795ms step_avg:75.89ms +[2025-09-02 07:34:29] [Rank 0] step:7601/10000 train_time:576806ms step_avg:75.89ms +[2025-09-02 07:34:29] [Rank 0] step:7601/10000 train_time:576806ms step_avg:75.89ms +[2025-09-02 07:34:31] [Rank 0] step:7621/10000 train_time:578271ms step_avg:75.88ms +[2025-09-02 07:34:31] [Rank 0] step:7621/10000 train_time:578271ms step_avg:75.88ms +[2025-09-02 07:34:33] [Rank 0] step:7641/10000 train_time:579879ms step_avg:75.89ms +[2025-09-02 07:34:33] [Rank 0] step:7641/10000 train_time:579879ms step_avg:75.89ms +[2025-09-02 07:34:34] [Rank 0] step:7661/10000 train_time:581494ms step_avg:75.90ms +[2025-09-02 07:34:34] [Rank 0] step:7661/10000 train_time:581494ms step_avg:75.90ms +[2025-09-02 07:34:36] [Rank 0] step:7681/10000 train_time:583102ms step_avg:75.91ms +[2025-09-02 07:34:36] [Rank 0] step:7681/10000 train_time:583102ms step_avg:75.91ms +[2025-09-02 07:34:37] [Rank 0] step:7701/10000 train_time:584710ms step_avg:75.93ms +[2025-09-02 07:34:37] [Rank 0] step:7701/10000 train_time:584710ms step_avg:75.93ms +[2025-09-02 07:34:39] [Rank 0] step:7721/10000 train_time:586334ms step_avg:75.94ms +[2025-09-02 07:34:39] [Rank 0] step:7721/10000 train_time:586334ms step_avg:75.94ms +[2025-09-02 07:34:41] [Rank 0] step:7741/10000 train_time:587942ms step_avg:75.95ms +[2025-09-02 07:34:41] [Rank 0] step:7741/10000 train_time:587942ms step_avg:75.95ms +[2025-09-02 07:34:42] [Rank 0] step:7761/10000 train_time:589559ms step_avg:75.96ms +[2025-09-02 07:34:42] [Rank 0] step:7761/10000 train_time:589559ms step_avg:75.96ms +[2025-09-02 07:34:44] [Rank 0] step:7781/10000 train_time:591177ms step_avg:75.98ms +[2025-09-02 07:34:44] [Rank 0] step:7781/10000 train_time:591177ms step_avg:75.98ms +[2025-09-02 07:34:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:34:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:34:57] [Rank 0] PRINT: step:7800/10000 val_loss:3.8662 svd_entropy: attn_qk:H=0.7618,top10E=0.25,eRank=179.9,q75/q25=88.37 attn_vo:H=0.7876,top10E=0.15,eRank=261.0,q75/q25=inf mlp_w1:H=0.7936,top10E=0.25,eRank=215.2,q75/q25=22.66 mlp_w2:H=0.8576,top10E=0.13,eRank=305.2,q75/q25=34.57 vo_prod:H=0.6662,top10E=0.22,eRank=120.8,q75/q25=inf train_time:592965ms step_avg:76.02ms +[2025-09-02 07:34:57] [Rank 0] PRINT: step:7800/10000 val_loss:3.8662 svd_entropy: attn_qk:H=0.7618,top10E=0.25,eRank=179.9,q75/q25=88.37 attn_vo:H=0.7876,top10E=0.15,eRank=261.0,q75/q25=inf mlp_w1:H=0.7936,top10E=0.25,eRank=215.2,q75/q25=22.66 mlp_w2:H=0.8576,top10E=0.13,eRank=305.2,q75/q25=34.57 vo_prod:H=0.6662,top10E=0.22,eRank=120.8,q75/q25=inf train_time:592965ms step_avg:76.02ms +[2025-09-02 07:34:57] [Rank 0] step:7801/10000 train_time:592977ms step_avg:76.01ms +[2025-09-02 07:34:57] [Rank 0] step:7801/10000 train_time:592977ms step_avg:76.01ms +[2025-09-02 07:34:59] [Rank 0] step:7821/10000 train_time:594436ms step_avg:76.01ms +[2025-09-02 07:34:59] [Rank 0] step:7821/10000 train_time:594436ms step_avg:76.01ms +[2025-09-02 07:35:00] [Rank 0] step:7841/10000 train_time:596046ms step_avg:76.02ms +[2025-09-02 07:35:00] [Rank 0] step:7841/10000 train_time:596046ms step_avg:76.02ms +[2025-09-02 07:35:02] [Rank 0] step:7861/10000 train_time:597661ms step_avg:76.03ms +[2025-09-02 07:35:02] [Rank 0] step:7861/10000 train_time:597661ms step_avg:76.03ms +[2025-09-02 07:35:04] [Rank 0] step:7881/10000 train_time:599280ms step_avg:76.04ms +[2025-09-02 07:35:04] [Rank 0] step:7881/10000 train_time:599280ms step_avg:76.04ms +[2025-09-02 07:35:05] [Rank 0] step:7901/10000 train_time:600889ms step_avg:76.05ms +[2025-09-02 07:35:05] [Rank 0] step:7901/10000 train_time:600889ms step_avg:76.05ms +[2025-09-02 07:35:07] [Rank 0] step:7921/10000 train_time:602502ms step_avg:76.06ms +[2025-09-02 07:35:07] [Rank 0] step:7921/10000 train_time:602502ms step_avg:76.06ms +[2025-09-02 07:35:09] [Rank 0] step:7941/10000 train_time:604126ms step_avg:76.08ms +[2025-09-02 07:35:09] [Rank 0] step:7941/10000 train_time:604126ms step_avg:76.08ms +[2025-09-02 07:35:10] [Rank 0] step:7961/10000 train_time:605747ms step_avg:76.09ms +[2025-09-02 07:35:10] [Rank 0] step:7961/10000 train_time:605747ms step_avg:76.09ms +[2025-09-02 07:35:12] [Rank 0] step:7981/10000 train_time:607358ms step_avg:76.10ms +[2025-09-02 07:35:12] [Rank 0] step:7981/10000 train_time:607358ms step_avg:76.10ms +[2025-09-02 07:35:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:35:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:35:25] [Rank 0] PRINT: step:8000/10000 val_loss:3.8515 svd_entropy: attn_qk:H=0.7627,top10E=0.25,eRank=180.8,q75/q25=88.54 attn_vo:H=0.7884,top10E=0.15,eRank=262.1,q75/q25=inf mlp_w1:H=0.7946,top10E=0.25,eRank=216.5,q75/q25=22.87 mlp_w2:H=0.8583,top10E=0.13,eRank=306.6,q75/q25=34.57 vo_prod:H=0.6674,top10E=0.22,eRank=121.9,q75/q25=inf train_time:609134ms step_avg:76.14ms +[2025-09-02 07:35:25] [Rank 0] PRINT: step:8000/10000 val_loss:3.8515 svd_entropy: attn_qk:H=0.7627,top10E=0.25,eRank=180.8,q75/q25=88.54 attn_vo:H=0.7884,top10E=0.15,eRank=262.1,q75/q25=inf mlp_w1:H=0.7946,top10E=0.25,eRank=216.5,q75/q25=22.87 mlp_w2:H=0.8583,top10E=0.13,eRank=306.6,q75/q25=34.57 vo_prod:H=0.6674,top10E=0.22,eRank=121.9,q75/q25=inf train_time:609134ms step_avg:76.14ms +[2025-09-02 07:35:25] [Rank 0] step:8001/10000 train_time:609146ms step_avg:76.13ms +[2025-09-02 07:35:25] [Rank 0] step:8001/10000 train_time:609146ms step_avg:76.13ms +[2025-09-02 07:35:27] [Rank 0] step:8021/10000 train_time:610605ms step_avg:76.13ms +[2025-09-02 07:35:27] [Rank 0] step:8021/10000 train_time:610605ms step_avg:76.13ms +[2025-09-02 07:35:28] [Rank 0] step:8041/10000 train_time:612231ms step_avg:76.14ms +[2025-09-02 07:35:28] [Rank 0] step:8041/10000 train_time:612231ms step_avg:76.14ms +[2025-09-02 07:35:30] [Rank 0] step:8061/10000 train_time:613837ms step_avg:76.15ms +[2025-09-02 07:35:30] [Rank 0] step:8061/10000 train_time:613837ms step_avg:76.15ms +[2025-09-02 07:35:32] [Rank 0] step:8081/10000 train_time:615440ms step_avg:76.16ms +[2025-09-02 07:35:32] [Rank 0] step:8081/10000 train_time:615440ms step_avg:76.16ms +[2025-09-02 07:35:33] [Rank 0] step:8101/10000 train_time:617055ms step_avg:76.17ms +[2025-09-02 07:35:33] [Rank 0] step:8101/10000 train_time:617055ms step_avg:76.17ms +[2025-09-02 07:35:35] [Rank 0] step:8121/10000 train_time:618661ms step_avg:76.18ms +[2025-09-02 07:35:35] [Rank 0] step:8121/10000 train_time:618661ms step_avg:76.18ms +[2025-09-02 07:35:36] [Rank 0] step:8141/10000 train_time:620376ms step_avg:76.20ms +[2025-09-02 07:35:36] [Rank 0] step:8141/10000 train_time:620376ms step_avg:76.20ms +[2025-09-02 07:35:38] [Rank 0] step:8161/10000 train_time:622000ms step_avg:76.22ms +[2025-09-02 07:35:38] [Rank 0] step:8161/10000 train_time:622000ms step_avg:76.22ms +[2025-09-02 07:35:40] [Rank 0] step:8181/10000 train_time:623641ms step_avg:76.23ms +[2025-09-02 07:35:40] [Rank 0] step:8181/10000 train_time:623641ms step_avg:76.23ms +[2025-09-02 07:35:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:35:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:35:53] [Rank 0] PRINT: step:8200/10000 val_loss:3.8417 svd_entropy: attn_qk:H=0.7635,top10E=0.25,eRank=181.5,q75/q25=88.73 attn_vo:H=0.7891,top10E=0.15,eRank=263.0,q75/q25=inf mlp_w1:H=0.7955,top10E=0.24,eRank=217.7,q75/q25=22.99 mlp_w2:H=0.8589,top10E=0.13,eRank=307.8,q75/q25=34.55 vo_prod:H=0.6685,top10E=0.22,eRank=122.9,q75/q25=inf train_time:625469ms step_avg:76.28ms +[2025-09-02 07:35:53] [Rank 0] PRINT: step:8200/10000 val_loss:3.8417 svd_entropy: attn_qk:H=0.7635,top10E=0.25,eRank=181.5,q75/q25=88.73 attn_vo:H=0.7891,top10E=0.15,eRank=263.0,q75/q25=inf mlp_w1:H=0.7955,top10E=0.24,eRank=217.7,q75/q25=22.99 mlp_w2:H=0.8589,top10E=0.13,eRank=307.8,q75/q25=34.55 vo_prod:H=0.6685,top10E=0.22,eRank=122.9,q75/q25=inf train_time:625469ms step_avg:76.28ms +[2025-09-02 07:35:53] [Rank 0] step:8201/10000 train_time:625480ms step_avg:76.27ms +[2025-09-02 07:35:53] [Rank 0] step:8201/10000 train_time:625480ms step_avg:76.27ms +[2025-09-02 07:35:55] [Rank 0] step:8221/10000 train_time:626985ms step_avg:76.27ms +[2025-09-02 07:35:55] [Rank 0] step:8221/10000 train_time:626985ms step_avg:76.27ms +[2025-09-02 07:35:56] [Rank 0] step:8241/10000 train_time:628634ms step_avg:76.28ms +[2025-09-02 07:35:56] [Rank 0] step:8241/10000 train_time:628634ms step_avg:76.28ms +[2025-09-02 07:35:58] [Rank 0] step:8261/10000 train_time:630270ms step_avg:76.29ms +[2025-09-02 07:35:58] [Rank 0] step:8261/10000 train_time:630270ms step_avg:76.29ms +[2025-09-02 07:36:00] [Rank 0] step:8281/10000 train_time:631913ms step_avg:76.31ms +[2025-09-02 07:36:00] [Rank 0] step:8281/10000 train_time:631913ms step_avg:76.31ms +[2025-09-02 07:36:01] [Rank 0] step:8301/10000 train_time:633550ms step_avg:76.32ms +[2025-09-02 07:36:01] [Rank 0] step:8301/10000 train_time:633550ms step_avg:76.32ms +[2025-09-02 07:36:03] [Rank 0] step:8321/10000 train_time:635180ms step_avg:76.33ms +[2025-09-02 07:36:03] [Rank 0] step:8321/10000 train_time:635180ms step_avg:76.33ms +[2025-09-02 07:36:05] [Rank 0] step:8341/10000 train_time:636818ms step_avg:76.35ms +[2025-09-02 07:36:05] [Rank 0] step:8341/10000 train_time:636818ms step_avg:76.35ms +[2025-09-02 07:36:06] [Rank 0] step:8361/10000 train_time:638458ms step_avg:76.36ms +[2025-09-02 07:36:06] [Rank 0] step:8361/10000 train_time:638458ms step_avg:76.36ms +[2025-09-02 07:36:08] [Rank 0] step:8381/10000 train_time:640094ms step_avg:76.37ms +[2025-09-02 07:36:08] [Rank 0] step:8381/10000 train_time:640094ms step_avg:76.37ms +[2025-09-02 07:36:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:36:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:36:21] [Rank 0] PRINT: step:8400/10000 val_loss:3.8320 svd_entropy: attn_qk:H=0.7642,top10E=0.25,eRank=182.1,q75/q25=88.89 attn_vo:H=0.7897,top10E=0.15,eRank=263.9,q75/q25=inf mlp_w1:H=0.7963,top10E=0.24,eRank=218.7,q75/q25=23.16 mlp_w2:H=0.8594,top10E=0.13,eRank=308.9,q75/q25=34.40 vo_prod:H=0.6695,top10E=0.22,eRank=123.7,q75/q25=inf train_time:641897ms step_avg:76.42ms +[2025-09-02 07:36:21] [Rank 0] PRINT: step:8400/10000 val_loss:3.8320 svd_entropy: attn_qk:H=0.7642,top10E=0.25,eRank=182.1,q75/q25=88.89 attn_vo:H=0.7897,top10E=0.15,eRank=263.9,q75/q25=inf mlp_w1:H=0.7963,top10E=0.24,eRank=218.7,q75/q25=23.16 mlp_w2:H=0.8594,top10E=0.13,eRank=308.9,q75/q25=34.40 vo_prod:H=0.6695,top10E=0.22,eRank=123.7,q75/q25=inf train_time:641897ms step_avg:76.42ms +[2025-09-02 07:36:21] [Rank 0] step:8401/10000 train_time:641909ms step_avg:76.41ms +[2025-09-02 07:36:21] [Rank 0] step:8401/10000 train_time:641909ms step_avg:76.41ms +[2025-09-02 07:36:23] [Rank 0] step:8421/10000 train_time:643384ms step_avg:76.40ms +[2025-09-02 07:36:23] [Rank 0] step:8421/10000 train_time:643384ms step_avg:76.40ms +[2025-09-02 07:36:25] [Rank 0] step:8441/10000 train_time:645018ms step_avg:76.41ms +[2025-09-02 07:36:25] [Rank 0] step:8441/10000 train_time:645018ms step_avg:76.41ms +[2025-09-02 07:36:26] [Rank 0] step:8461/10000 train_time:646652ms step_avg:76.43ms +[2025-09-02 07:36:26] [Rank 0] step:8461/10000 train_time:646652ms step_avg:76.43ms +[2025-09-02 07:36:28] [Rank 0] step:8481/10000 train_time:648296ms step_avg:76.44ms +[2025-09-02 07:36:28] [Rank 0] step:8481/10000 train_time:648296ms step_avg:76.44ms +[2025-09-02 07:36:29] [Rank 0] step:8501/10000 train_time:649959ms step_avg:76.46ms +[2025-09-02 07:36:29] [Rank 0] step:8501/10000 train_time:649959ms step_avg:76.46ms +[2025-09-02 07:36:31] [Rank 0] step:8521/10000 train_time:651605ms step_avg:76.47ms +[2025-09-02 07:36:31] [Rank 0] step:8521/10000 train_time:651605ms step_avg:76.47ms +[2025-09-02 07:36:33] [Rank 0] step:8541/10000 train_time:653265ms step_avg:76.49ms +[2025-09-02 07:36:33] [Rank 0] step:8541/10000 train_time:653265ms step_avg:76.49ms +[2025-09-02 07:36:34] [Rank 0] step:8561/10000 train_time:654906ms step_avg:76.50ms +[2025-09-02 07:36:34] [Rank 0] step:8561/10000 train_time:654906ms step_avg:76.50ms +[2025-09-02 07:36:36] [Rank 0] step:8581/10000 train_time:656546ms step_avg:76.51ms +[2025-09-02 07:36:36] [Rank 0] step:8581/10000 train_time:656546ms step_avg:76.51ms +[2025-09-02 07:36:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:36:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:36:49] [Rank 0] PRINT: step:8600/10000 val_loss:3.8234 svd_entropy: attn_qk:H=0.7648,top10E=0.25,eRank=182.7,q75/q25=88.58 attn_vo:H=0.7903,top10E=0.15,eRank=264.6,q75/q25=inf mlp_w1:H=0.7971,top10E=0.24,eRank=219.7,q75/q25=23.18 mlp_w2:H=0.8599,top10E=0.13,eRank=309.9,q75/q25=34.51 vo_prod:H=0.6703,top10E=0.22,eRank=124.5,q75/q25=inf train_time:658345ms step_avg:76.55ms +[2025-09-02 07:36:49] [Rank 0] PRINT: step:8600/10000 val_loss:3.8234 svd_entropy: attn_qk:H=0.7648,top10E=0.25,eRank=182.7,q75/q25=88.58 attn_vo:H=0.7903,top10E=0.15,eRank=264.6,q75/q25=inf mlp_w1:H=0.7971,top10E=0.24,eRank=219.7,q75/q25=23.18 mlp_w2:H=0.8599,top10E=0.13,eRank=309.9,q75/q25=34.51 vo_prod:H=0.6703,top10E=0.22,eRank=124.5,q75/q25=inf train_time:658345ms step_avg:76.55ms +[2025-09-02 07:36:49] [Rank 0] step:8601/10000 train_time:658357ms step_avg:76.54ms +[2025-09-02 07:36:49] [Rank 0] step:8601/10000 train_time:658357ms step_avg:76.54ms +[2025-09-02 07:36:51] [Rank 0] step:8621/10000 train_time:659854ms step_avg:76.54ms +[2025-09-02 07:36:51] [Rank 0] step:8621/10000 train_time:659854ms step_avg:76.54ms +[2025-09-02 07:36:53] [Rank 0] step:8641/10000 train_time:661495ms step_avg:76.55ms +[2025-09-02 07:36:53] [Rank 0] step:8641/10000 train_time:661495ms step_avg:76.55ms +[2025-09-02 07:36:54] [Rank 0] step:8661/10000 train_time:663131ms step_avg:76.57ms +[2025-09-02 07:36:54] [Rank 0] step:8661/10000 train_time:663131ms step_avg:76.57ms +[2025-09-02 07:36:56] [Rank 0] step:8681/10000 train_time:664770ms step_avg:76.58ms +[2025-09-02 07:36:56] [Rank 0] step:8681/10000 train_time:664770ms step_avg:76.58ms +[2025-09-02 07:36:58] [Rank 0] step:8701/10000 train_time:666405ms step_avg:76.59ms +[2025-09-02 07:36:58] [Rank 0] step:8701/10000 train_time:666405ms step_avg:76.59ms +[2025-09-02 07:36:59] [Rank 0] step:8721/10000 train_time:668048ms step_avg:76.60ms +[2025-09-02 07:36:59] [Rank 0] step:8721/10000 train_time:668048ms step_avg:76.60ms +[2025-09-02 07:37:01] [Rank 0] step:8741/10000 train_time:669683ms step_avg:76.61ms +[2025-09-02 07:37:01] [Rank 0] step:8741/10000 train_time:669683ms step_avg:76.61ms +[2025-09-02 07:37:03] [Rank 0] step:8761/10000 train_time:671318ms step_avg:76.63ms +[2025-09-02 07:37:03] [Rank 0] step:8761/10000 train_time:671318ms step_avg:76.63ms +[2025-09-02 07:37:04] [Rank 0] step:8781/10000 train_time:672968ms step_avg:76.64ms +[2025-09-02 07:37:04] [Rank 0] step:8781/10000 train_time:672968ms step_avg:76.64ms +[2025-09-02 07:37:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:37:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:37:17] [Rank 0] PRINT: step:8800/10000 val_loss:3.8148 svd_entropy: attn_qk:H=0.7653,top10E=0.25,eRank=183.2,q75/q25=88.76 attn_vo:H=0.7907,top10E=0.14,eRank=265.3,q75/q25=inf mlp_w1:H=0.7977,top10E=0.24,eRank=220.5,q75/q25=23.25 mlp_w2:H=0.8604,top10E=0.13,eRank=310.9,q75/q25=34.57 vo_prod:H=0.6711,top10E=0.22,eRank=125.2,q75/q25=inf train_time:674772ms step_avg:76.68ms +[2025-09-02 07:37:17] [Rank 0] PRINT: step:8800/10000 val_loss:3.8148 svd_entropy: attn_qk:H=0.7653,top10E=0.25,eRank=183.2,q75/q25=88.76 attn_vo:H=0.7907,top10E=0.14,eRank=265.3,q75/q25=inf mlp_w1:H=0.7977,top10E=0.24,eRank=220.5,q75/q25=23.25 mlp_w2:H=0.8604,top10E=0.13,eRank=310.9,q75/q25=34.57 vo_prod:H=0.6711,top10E=0.22,eRank=125.2,q75/q25=inf train_time:674772ms step_avg:76.68ms +[2025-09-02 07:37:17] [Rank 0] step:8801/10000 train_time:674784ms step_avg:76.67ms +[2025-09-02 07:37:17] [Rank 0] step:8801/10000 train_time:674784ms step_avg:76.67ms +[2025-09-02 07:37:19] [Rank 0] step:8821/10000 train_time:676270ms step_avg:76.67ms +[2025-09-02 07:37:19] [Rank 0] step:8821/10000 train_time:676270ms step_avg:76.67ms +[2025-09-02 07:37:21] [Rank 0] step:8841/10000 train_time:677928ms step_avg:76.68ms +[2025-09-02 07:37:21] [Rank 0] step:8841/10000 train_time:677928ms step_avg:76.68ms +[2025-09-02 07:37:22] [Rank 0] step:8861/10000 train_time:679565ms step_avg:76.69ms +[2025-09-02 07:37:22] [Rank 0] step:8861/10000 train_time:679565ms step_avg:76.69ms +[2025-09-02 07:37:24] [Rank 0] step:8881/10000 train_time:681207ms step_avg:76.70ms +[2025-09-02 07:37:24] [Rank 0] step:8881/10000 train_time:681207ms step_avg:76.70ms +[2025-09-02 07:37:26] [Rank 0] step:8901/10000 train_time:682853ms step_avg:76.72ms +[2025-09-02 07:37:26] [Rank 0] step:8901/10000 train_time:682853ms step_avg:76.72ms +[2025-09-02 07:37:27] [Rank 0] step:8921/10000 train_time:684500ms step_avg:76.73ms +[2025-09-02 07:37:27] [Rank 0] step:8921/10000 train_time:684500ms step_avg:76.73ms +[2025-09-02 07:37:29] [Rank 0] step:8941/10000 train_time:686153ms step_avg:76.74ms +[2025-09-02 07:37:29] [Rank 0] step:8941/10000 train_time:686153ms step_avg:76.74ms +[2025-09-02 07:37:31] [Rank 0] step:8961/10000 train_time:687792ms step_avg:76.75ms +[2025-09-02 07:37:31] [Rank 0] step:8961/10000 train_time:687792ms step_avg:76.75ms +[2025-09-02 07:37:32] [Rank 0] step:8981/10000 train_time:689430ms step_avg:76.77ms +[2025-09-02 07:37:32] [Rank 0] step:8981/10000 train_time:689430ms step_avg:76.77ms +[2025-09-02 07:37:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:37:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:37:46] [Rank 0] PRINT: step:9000/10000 val_loss:3.8057 svd_entropy: attn_qk:H=0.7658,top10E=0.25,eRank=183.7,q75/q25=88.51 attn_vo:H=0.7912,top10E=0.14,eRank=265.9,q75/q25=inf mlp_w1:H=0.7982,top10E=0.24,eRank=221.2,q75/q25=23.31 mlp_w2:H=0.8608,top10E=0.13,eRank=311.8,q75/q25=34.40 vo_prod:H=0.6718,top10E=0.22,eRank=125.8,q75/q25=inf train_time:691234ms step_avg:76.80ms +[2025-09-02 07:37:46] [Rank 0] PRINT: step:9000/10000 val_loss:3.8057 svd_entropy: attn_qk:H=0.7658,top10E=0.25,eRank=183.7,q75/q25=88.51 attn_vo:H=0.7912,top10E=0.14,eRank=265.9,q75/q25=inf mlp_w1:H=0.7982,top10E=0.24,eRank=221.2,q75/q25=23.31 mlp_w2:H=0.8608,top10E=0.13,eRank=311.8,q75/q25=34.40 vo_prod:H=0.6718,top10E=0.22,eRank=125.8,q75/q25=inf train_time:691234ms step_avg:76.80ms +[2025-09-02 07:37:46] [Rank 0] step:9001/10000 train_time:691246ms step_avg:76.80ms +[2025-09-02 07:37:46] [Rank 0] step:9001/10000 train_time:691246ms step_avg:76.80ms +[2025-09-02 07:37:47] [Rank 0] step:9021/10000 train_time:692719ms step_avg:76.79ms +[2025-09-02 07:37:47] [Rank 0] step:9021/10000 train_time:692719ms step_avg:76.79ms +[2025-09-02 07:37:49] [Rank 0] step:9041/10000 train_time:694352ms step_avg:76.80ms +[2025-09-02 07:37:49] [Rank 0] step:9041/10000 train_time:694352ms step_avg:76.80ms +[2025-09-02 07:37:51] [Rank 0] step:9061/10000 train_time:696005ms step_avg:76.81ms +[2025-09-02 07:37:51] [Rank 0] step:9061/10000 train_time:696005ms step_avg:76.81ms +[2025-09-02 07:37:52] [Rank 0] step:9081/10000 train_time:697661ms step_avg:76.83ms +[2025-09-02 07:37:52] [Rank 0] step:9081/10000 train_time:697661ms step_avg:76.83ms +[2025-09-02 07:37:54] [Rank 0] step:9101/10000 train_time:699319ms step_avg:76.84ms +[2025-09-02 07:37:54] [Rank 0] step:9101/10000 train_time:699319ms step_avg:76.84ms +[2025-09-02 07:37:56] [Rank 0] step:9121/10000 train_time:700965ms step_avg:76.85ms +[2025-09-02 07:37:56] [Rank 0] step:9121/10000 train_time:700965ms step_avg:76.85ms +[2025-09-02 07:37:57] [Rank 0] step:9141/10000 train_time:702597ms step_avg:76.86ms +[2025-09-02 07:37:57] [Rank 0] step:9141/10000 train_time:702597ms step_avg:76.86ms +[2025-09-02 07:37:59] [Rank 0] step:9161/10000 train_time:704229ms step_avg:76.87ms +[2025-09-02 07:37:59] [Rank 0] step:9161/10000 train_time:704229ms step_avg:76.87ms +[2025-09-02 07:38:00] [Rank 0] step:9181/10000 train_time:705902ms step_avg:76.89ms +[2025-09-02 07:38:00] [Rank 0] step:9181/10000 train_time:705902ms step_avg:76.89ms +[2025-09-02 07:38:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:38:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:38:14] [Rank 0] PRINT: step:9200/10000 val_loss:3.7984 svd_entropy: attn_qk:H=0.7663,top10E=0.25,eRank=184.1,q75/q25=88.24 attn_vo:H=0.7916,top10E=0.14,eRank=266.4,q75/q25=inf mlp_w1:H=0.7987,top10E=0.24,eRank=221.8,q75/q25=23.30 mlp_w2:H=0.8612,top10E=0.13,eRank=312.5,q75/q25=34.36 vo_prod:H=0.6724,top10E=0.21,eRank=126.4,q75/q25=inf train_time:707710ms step_avg:76.92ms +[2025-09-02 07:38:14] [Rank 0] PRINT: step:9200/10000 val_loss:3.7984 svd_entropy: attn_qk:H=0.7663,top10E=0.25,eRank=184.1,q75/q25=88.24 attn_vo:H=0.7916,top10E=0.14,eRank=266.4,q75/q25=inf mlp_w1:H=0.7987,top10E=0.24,eRank=221.8,q75/q25=23.30 mlp_w2:H=0.8612,top10E=0.13,eRank=312.5,q75/q25=34.36 vo_prod:H=0.6724,top10E=0.21,eRank=126.4,q75/q25=inf train_time:707710ms step_avg:76.92ms +[2025-09-02 07:38:14] [Rank 0] step:9201/10000 train_time:707721ms step_avg:76.92ms +[2025-09-02 07:38:14] [Rank 0] step:9201/10000 train_time:707721ms step_avg:76.92ms +[2025-09-02 07:38:15] [Rank 0] step:9221/10000 train_time:709223ms step_avg:76.91ms +[2025-09-02 07:38:15] [Rank 0] step:9221/10000 train_time:709223ms step_avg:76.91ms +[2025-09-02 07:38:17] [Rank 0] step:9241/10000 train_time:710874ms step_avg:76.93ms +[2025-09-02 07:38:17] [Rank 0] step:9241/10000 train_time:710874ms step_avg:76.93ms +[2025-09-02 07:38:19] [Rank 0] step:9261/10000 train_time:712525ms step_avg:76.94ms +[2025-09-02 07:38:19] [Rank 0] step:9261/10000 train_time:712525ms step_avg:76.94ms +[2025-09-02 07:38:20] [Rank 0] step:9281/10000 train_time:714153ms step_avg:76.95ms +[2025-09-02 07:38:20] [Rank 0] step:9281/10000 train_time:714153ms step_avg:76.95ms +[2025-09-02 07:38:22] [Rank 0] step:9301/10000 train_time:715794ms step_avg:76.96ms +[2025-09-02 07:38:22] [Rank 0] step:9301/10000 train_time:715794ms step_avg:76.96ms +[2025-09-02 07:38:24] [Rank 0] step:9321/10000 train_time:717441ms step_avg:76.97ms +[2025-09-02 07:38:24] [Rank 0] step:9321/10000 train_time:717441ms step_avg:76.97ms +[2025-09-02 07:38:25] [Rank 0] step:9341/10000 train_time:719082ms step_avg:76.98ms +[2025-09-02 07:38:25] [Rank 0] step:9341/10000 train_time:719082ms step_avg:76.98ms +[2025-09-02 07:38:27] [Rank 0] step:9361/10000 train_time:720726ms step_avg:76.99ms +[2025-09-02 07:38:27] [Rank 0] step:9361/10000 train_time:720726ms step_avg:76.99ms +[2025-09-02 07:38:29] [Rank 0] step:9381/10000 train_time:722381ms step_avg:77.00ms +[2025-09-02 07:38:29] [Rank 0] step:9381/10000 train_time:722381ms step_avg:77.00ms +[2025-09-02 07:38:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:38:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:38:42] [Rank 0] PRINT: step:9400/10000 val_loss:3.7917 svd_entropy: attn_qk:H=0.7666,top10E=0.25,eRank=184.4,q75/q25=88.62 attn_vo:H=0.7919,top10E=0.14,eRank=266.8,q75/q25=inf mlp_w1:H=0.7991,top10E=0.24,eRank=222.3,q75/q25=23.33 mlp_w2:H=0.8615,top10E=0.13,eRank=313.1,q75/q25=34.30 vo_prod:H=0.6729,top10E=0.21,eRank=126.9,q75/q25=inf train_time:724193ms step_avg:77.04ms +[2025-09-02 07:38:42] [Rank 0] PRINT: step:9400/10000 val_loss:3.7917 svd_entropy: attn_qk:H=0.7666,top10E=0.25,eRank=184.4,q75/q25=88.62 attn_vo:H=0.7919,top10E=0.14,eRank=266.8,q75/q25=inf mlp_w1:H=0.7991,top10E=0.24,eRank=222.3,q75/q25=23.33 mlp_w2:H=0.8615,top10E=0.13,eRank=313.1,q75/q25=34.30 vo_prod:H=0.6729,top10E=0.21,eRank=126.9,q75/q25=inf train_time:724193ms step_avg:77.04ms +[2025-09-02 07:38:42] [Rank 0] step:9401/10000 train_time:724206ms step_avg:77.03ms +[2025-09-02 07:38:42] [Rank 0] step:9401/10000 train_time:724206ms step_avg:77.03ms +[2025-09-02 07:38:44] [Rank 0] step:9421/10000 train_time:725699ms step_avg:77.03ms +[2025-09-02 07:38:44] [Rank 0] step:9421/10000 train_time:725699ms step_avg:77.03ms +[2025-09-02 07:38:45] [Rank 0] step:9441/10000 train_time:727342ms step_avg:77.04ms +[2025-09-02 07:38:45] [Rank 0] step:9441/10000 train_time:727342ms step_avg:77.04ms +[2025-09-02 07:38:47] [Rank 0] step:9461/10000 train_time:728991ms step_avg:77.05ms +[2025-09-02 07:38:47] [Rank 0] step:9461/10000 train_time:728991ms step_avg:77.05ms +[2025-09-02 07:38:49] [Rank 0] step:9481/10000 train_time:730636ms step_avg:77.06ms +[2025-09-02 07:38:49] [Rank 0] step:9481/10000 train_time:730636ms step_avg:77.06ms +[2025-09-02 07:38:50] [Rank 0] step:9501/10000 train_time:732293ms step_avg:77.08ms +[2025-09-02 07:38:50] [Rank 0] step:9501/10000 train_time:732293ms step_avg:77.08ms +[2025-09-02 07:38:52] [Rank 0] step:9521/10000 train_time:733931ms step_avg:77.09ms +[2025-09-02 07:38:52] [Rank 0] step:9521/10000 train_time:733931ms step_avg:77.09ms +[2025-09-02 07:38:54] [Rank 0] step:9541/10000 train_time:735572ms step_avg:77.10ms +[2025-09-02 07:38:54] [Rank 0] step:9541/10000 train_time:735572ms step_avg:77.10ms +[2025-09-02 07:38:55] [Rank 0] step:9561/10000 train_time:737209ms step_avg:77.11ms +[2025-09-02 07:38:55] [Rank 0] step:9561/10000 train_time:737209ms step_avg:77.11ms +[2025-09-02 07:38:57] [Rank 0] step:9581/10000 train_time:738852ms step_avg:77.12ms +[2025-09-02 07:38:57] [Rank 0] step:9581/10000 train_time:738852ms step_avg:77.12ms +[2025-09-02 07:38:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:38:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:39:10] [Rank 0] PRINT: step:9600/10000 val_loss:3.7857 svd_entropy: attn_qk:H=0.7669,top10E=0.25,eRank=184.7,q75/q25=88.45 attn_vo:H=0.7922,top10E=0.14,eRank=267.2,q75/q25=inf mlp_w1:H=0.7994,top10E=0.24,eRank=222.7,q75/q25=23.25 mlp_w2:H=0.8617,top10E=0.13,eRank=313.7,q75/q25=34.32 vo_prod:H=0.6734,top10E=0.21,eRank=127.3,q75/q25=inf train_time:740671ms step_avg:77.15ms +[2025-09-02 07:39:10] [Rank 0] PRINT: step:9600/10000 val_loss:3.7857 svd_entropy: attn_qk:H=0.7669,top10E=0.25,eRank=184.7,q75/q25=88.45 attn_vo:H=0.7922,top10E=0.14,eRank=267.2,q75/q25=inf mlp_w1:H=0.7994,top10E=0.24,eRank=222.7,q75/q25=23.25 mlp_w2:H=0.8617,top10E=0.13,eRank=313.7,q75/q25=34.32 vo_prod:H=0.6734,top10E=0.21,eRank=127.3,q75/q25=inf train_time:740671ms step_avg:77.15ms +[2025-09-02 07:39:10] [Rank 0] step:9601/10000 train_time:740682ms step_avg:77.15ms +[2025-09-02 07:39:10] [Rank 0] step:9601/10000 train_time:740682ms step_avg:77.15ms +[2025-09-02 07:39:12] [Rank 0] step:9621/10000 train_time:742188ms step_avg:77.14ms +[2025-09-02 07:39:12] [Rank 0] step:9621/10000 train_time:742188ms step_avg:77.14ms +[2025-09-02 07:39:14] [Rank 0] step:9641/10000 train_time:743834ms step_avg:77.15ms +[2025-09-02 07:39:14] [Rank 0] step:9641/10000 train_time:743834ms step_avg:77.15ms +[2025-09-02 07:39:15] [Rank 0] step:9661/10000 train_time:745506ms step_avg:77.17ms +[2025-09-02 07:39:15] [Rank 0] step:9661/10000 train_time:745506ms step_avg:77.17ms +[2025-09-02 07:39:17] [Rank 0] step:9681/10000 train_time:747172ms step_avg:77.18ms +[2025-09-02 07:39:17] [Rank 0] step:9681/10000 train_time:747172ms step_avg:77.18ms +[2025-09-02 07:39:19] [Rank 0] step:9701/10000 train_time:748855ms step_avg:77.19ms +[2025-09-02 07:39:19] [Rank 0] step:9701/10000 train_time:748855ms step_avg:77.19ms +[2025-09-02 07:39:20] [Rank 0] step:9721/10000 train_time:750519ms step_avg:77.21ms +[2025-09-02 07:39:20] [Rank 0] step:9721/10000 train_time:750519ms step_avg:77.21ms +[2025-09-02 07:39:22] [Rank 0] step:9741/10000 train_time:752210ms step_avg:77.22ms +[2025-09-02 07:39:22] [Rank 0] step:9741/10000 train_time:752210ms step_avg:77.22ms +[2025-09-02 07:39:24] [Rank 0] step:9761/10000 train_time:753881ms step_avg:77.23ms +[2025-09-02 07:39:24] [Rank 0] step:9761/10000 train_time:753881ms step_avg:77.23ms +[2025-09-02 07:39:25] [Rank 0] step:9781/10000 train_time:755567ms step_avg:77.25ms +[2025-09-02 07:39:25] [Rank 0] step:9781/10000 train_time:755567ms step_avg:77.25ms +[2025-09-02 07:39:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:39:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:39:39] [Rank 0] PRINT: step:9800/10000 val_loss:3.7802 svd_entropy: attn_qk:H=0.7671,top10E=0.25,eRank=184.9,q75/q25=88.20 attn_vo:H=0.7924,top10E=0.14,eRank=267.5,q75/q25=inf mlp_w1:H=0.7996,top10E=0.24,eRank=223.0,q75/q25=23.29 mlp_w2:H=0.8620,top10E=0.12,eRank=314.2,q75/q25=34.29 vo_prod:H=0.6738,top10E=0.21,eRank=127.6,q75/q25=inf train_time:757417ms step_avg:77.29ms +[2025-09-02 07:39:39] [Rank 0] PRINT: step:9800/10000 val_loss:3.7802 svd_entropy: attn_qk:H=0.7671,top10E=0.25,eRank=184.9,q75/q25=88.20 attn_vo:H=0.7924,top10E=0.14,eRank=267.5,q75/q25=inf mlp_w1:H=0.7996,top10E=0.24,eRank=223.0,q75/q25=23.29 mlp_w2:H=0.8620,top10E=0.12,eRank=314.2,q75/q25=34.29 vo_prod:H=0.6738,top10E=0.21,eRank=127.6,q75/q25=inf train_time:757417ms step_avg:77.29ms +[2025-09-02 07:39:39] [Rank 0] step:9801/10000 train_time:757429ms step_avg:77.28ms +[2025-09-02 07:39:39] [Rank 0] step:9801/10000 train_time:757429ms step_avg:77.28ms +[2025-09-02 07:39:41] [Rank 0] step:9821/10000 train_time:758956ms step_avg:77.28ms +[2025-09-02 07:39:41] [Rank 0] step:9821/10000 train_time:758956ms step_avg:77.28ms +[2025-09-02 07:39:42] [Rank 0] step:9841/10000 train_time:760641ms step_avg:77.29ms +[2025-09-02 07:39:42] [Rank 0] step:9841/10000 train_time:760641ms step_avg:77.29ms +[2025-09-02 07:39:44] [Rank 0] step:9861/10000 train_time:762303ms step_avg:77.30ms +[2025-09-02 07:39:44] [Rank 0] step:9861/10000 train_time:762303ms step_avg:77.30ms +[2025-09-02 07:39:46] [Rank 0] step:9881/10000 train_time:763960ms step_avg:77.32ms +[2025-09-02 07:39:46] [Rank 0] step:9881/10000 train_time:763960ms step_avg:77.32ms +[2025-09-02 07:39:47] [Rank 0] step:9901/10000 train_time:765635ms step_avg:77.33ms +[2025-09-02 07:39:47] [Rank 0] step:9901/10000 train_time:765635ms step_avg:77.33ms +[2025-09-02 07:39:49] [Rank 0] step:9921/10000 train_time:767304ms step_avg:77.34ms +[2025-09-02 07:39:49] [Rank 0] step:9921/10000 train_time:767304ms step_avg:77.34ms +[2025-09-02 07:39:51] [Rank 0] step:9941/10000 train_time:768981ms step_avg:77.35ms +[2025-09-02 07:39:51] [Rank 0] step:9941/10000 train_time:768981ms step_avg:77.35ms +[2025-09-02 07:39:52] [Rank 0] step:9961/10000 train_time:770651ms step_avg:77.37ms +[2025-09-02 07:39:52] [Rank 0] step:9961/10000 train_time:770651ms step_avg:77.37ms +[2025-09-02 07:39:54] [Rank 0] step:9981/10000 train_time:772321ms step_avg:77.38ms +[2025-09-02 07:39:54] [Rank 0] step:9981/10000 train_time:772321ms step_avg:77.38ms +[2025-09-02 07:39:56] [Rank 0] step:10000/10000 train_time:773917ms step_avg:77.39ms +[2025-09-02 07:39:56] [Rank 0] step:10000/10000 train_time:773917ms step_avg:77.39ms +[2025-09-02 07:39:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:39:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:40:08] [Rank 0] PRINT: step:10000/10000 val_loss:3.7741 svd_entropy: attn_qk:H=0.7672,top10E=0.25,eRank=185.0,q75/q25=88.21 attn_vo:H=0.7925,top10E=0.14,eRank=267.7,q75/q25=inf mlp_w1:H=0.7998,top10E=0.24,eRank=223.3,q75/q25=23.28 mlp_w2:H=0.8621,top10E=0.12,eRank=314.5,q75/q25=34.27 vo_prod:H=0.6741,top10E=0.21,eRank=127.9,q75/q25=inf train_time:774177ms step_avg:77.42ms +[2025-09-02 07:40:08] [Rank 0] PRINT: step:10000/10000 val_loss:3.7741 svd_entropy: attn_qk:H=0.7672,top10E=0.25,eRank=185.0,q75/q25=88.21 attn_vo:H=0.7925,top10E=0.14,eRank=267.7,q75/q25=inf mlp_w1:H=0.7998,top10E=0.24,eRank=223.3,q75/q25=23.28 mlp_w2:H=0.8621,top10E=0.12,eRank=314.5,q75/q25=34.27 vo_prod:H=0.6741,top10E=0.21,eRank=127.9,q75/q25=inf train_time:774177ms step_avg:77.42ms +[2025-09-02 07:40:08] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 07:40:08 2025 --- +[2025-09-02 07:40:08] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 07:40:08 2025 --- +[2025-09-02 07:40:08] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14416 MiB +[2025-09-02 07:40:08] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14416 MiB diff --git a/logs_svd_qkvo/mode_14_param_qkvo_seed_44/config.json b/logs_svd_qkvo/mode_14_param_qkvo_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..cb0fa510ad7a922c9b8327caecc0cea6273d577e --- /dev/null +++ b/logs_svd_qkvo/mode_14_param_qkvo_seed_44/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 14, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "0916fa89-b41d-4d81-a31c-2334c36194c1", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_14_param_qkvo_seed_44/training_log_0916fa89-b41d-4d81-a31c-2334c36194c1.txt b/logs_svd_qkvo/mode_14_param_qkvo_seed_44/training_log_0916fa89-b41d-4d81-a31c-2334c36194c1.txt new file mode 100644 index 0000000000000000000000000000000000000000..24db36317c54d2fc99d4f91a1dcbe6ac6d9bca6e --- /dev/null +++ b/logs_svd_qkvo/mode_14_param_qkvo_seed_44/training_log_0916fa89-b41d-4d81-a31c-2334c36194c1.txt @@ -0,0 +1,2984 @@ +[2025-09-02 08:29:02] [Rank 0] PRINT: --- Script Start: Tue Sep 2 08:29:02 2025 --- +[2025-09-02 08:29:02] [Rank 0] PRINT: --- Script Start: Tue Sep 2 08:29:02 2025 --- +[2025-09-02 08:29:02] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=14, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 08:29:02] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=14, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 08:29:02] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 08:29:02] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 08:29:02] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-02 08:29:02] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-02 08:29:02] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_14_param_qkvo_seed_44 +[2025-09-02 08:29:02] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_14_param_qkvo_seed_44 +[2025-09-02 08:29:02] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 08:29:02] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 08:29:02] [Rank 0] PRINT: Constructing model... +[2025-09-02 08:29:02] [Rank 0] PRINT: Constructing model... +[2025-09-02 08:29:04] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 08:29:04] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 08:29:04] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 08:29:04] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 08:29:04] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 08:29:04] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 08:29:04] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 14 +[2025-09-02 08:29:04] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 14 +[2025-09-02 08:29:04] [Rank 0] PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 08:29:04] [Rank 0] PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 08:29:04] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 08:29:04] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 08:29:04] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 08:29:04] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 08:29:04] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 08:29:04] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 08:29:04] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 08:29:04] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 08:29:04] [Rank 0] PRINT: Starting warmup... +[2025-09-02 08:29:04] [Rank 0] PRINT: Starting warmup... +[2025-09-02 08:29:47] [Rank 0] PRINT: Warmup complete. +[2025-09-02 08:29:47] [Rank 0] PRINT: Warmup complete. +[2025-09-02 08:29:47] [Rank 0] PRINT: Starting training... +[2025-09-02 08:29:47] [Rank 0] PRINT: Starting training... +[2025-09-02 08:29:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:29:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:30:03] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.6,q75/q25=10.29 attn_vo:H=0.4624,top10E=0.02,eRank=233.0,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.7,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 08:30:03] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.6,q75/q25=10.29 attn_vo:H=0.4624,top10E=0.02,eRank=233.0,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.7,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 08:30:04] [Rank 0] step:21/10000 train_time:1312ms step_avg:62.49ms +[2025-09-02 08:30:04] [Rank 0] step:21/10000 train_time:1312ms step_avg:62.49ms +[2025-09-02 08:30:05] [Rank 0] step:41/10000 train_time:2714ms step_avg:66.20ms +[2025-09-02 08:30:05] [Rank 0] step:41/10000 train_time:2714ms step_avg:66.20ms +[2025-09-02 08:30:07] [Rank 0] step:61/10000 train_time:4119ms step_avg:67.52ms +[2025-09-02 08:30:07] [Rank 0] step:61/10000 train_time:4119ms step_avg:67.52ms +[2025-09-02 08:30:08] [Rank 0] step:81/10000 train_time:5526ms step_avg:68.22ms +[2025-09-02 08:30:08] [Rank 0] step:81/10000 train_time:5526ms step_avg:68.22ms +[2025-09-02 08:30:10] [Rank 0] step:101/10000 train_time:6932ms step_avg:68.64ms +[2025-09-02 08:30:10] [Rank 0] step:101/10000 train_time:6932ms step_avg:68.64ms +[2025-09-02 08:30:11] [Rank 0] step:121/10000 train_time:8341ms step_avg:68.93ms +[2025-09-02 08:30:11] [Rank 0] step:121/10000 train_time:8341ms step_avg:68.93ms +[2025-09-02 08:30:13] [Rank 0] step:141/10000 train_time:9749ms step_avg:69.14ms +[2025-09-02 08:30:13] [Rank 0] step:141/10000 train_time:9749ms step_avg:69.14ms +[2025-09-02 08:30:14] [Rank 0] step:161/10000 train_time:11157ms step_avg:69.30ms +[2025-09-02 08:30:14] [Rank 0] step:161/10000 train_time:11157ms step_avg:69.30ms +[2025-09-02 08:30:15] [Rank 0] step:181/10000 train_time:12567ms step_avg:69.43ms +[2025-09-02 08:30:15] [Rank 0] step:181/10000 train_time:12567ms step_avg:69.43ms +[2025-09-02 08:30:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:30:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:30:28] [Rank 0] PRINT: step:200/10000 val_loss:6.4198 svd_entropy: attn_qk:H=0.4813,top10E=0.74,eRank=70.7,q75/q25=12.31 attn_vo:H=0.5258,top10E=0.56,eRank=97.6,q75/q25=inf mlp_w1:H=0.4118,top10E=0.75,eRank=19.7,q75/q25=2.76 mlp_w2:H=0.1547,top10E=0.95,eRank=4.0,q75/q25=678.26 vo_prod:H=0.2596,top10E=0.85,eRank=9.6,q75/q25=inf train_time:14118ms step_avg:70.59ms +[2025-09-02 08:30:28] [Rank 0] PRINT: step:200/10000 val_loss:6.4198 svd_entropy: attn_qk:H=0.4813,top10E=0.74,eRank=70.7,q75/q25=12.31 attn_vo:H=0.5258,top10E=0.56,eRank=97.6,q75/q25=inf mlp_w1:H=0.4118,top10E=0.75,eRank=19.7,q75/q25=2.76 mlp_w2:H=0.1547,top10E=0.95,eRank=4.0,q75/q25=678.26 vo_prod:H=0.2596,top10E=0.85,eRank=9.6,q75/q25=inf train_time:14118ms step_avg:70.59ms +[2025-09-02 08:30:29] [Rank 0] step:201/10000 train_time:14130ms step_avg:70.30ms +[2025-09-02 08:30:29] [Rank 0] step:201/10000 train_time:14130ms step_avg:70.30ms +[2025-09-02 08:30:30] [Rank 0] step:221/10000 train_time:15420ms step_avg:69.78ms +[2025-09-02 08:30:30] [Rank 0] step:221/10000 train_time:15420ms step_avg:69.78ms +[2025-09-02 08:30:31] [Rank 0] step:241/10000 train_time:16829ms step_avg:69.83ms +[2025-09-02 08:30:31] [Rank 0] step:241/10000 train_time:16829ms step_avg:69.83ms +[2025-09-02 08:30:33] [Rank 0] step:261/10000 train_time:18239ms step_avg:69.88ms +[2025-09-02 08:30:33] [Rank 0] step:261/10000 train_time:18239ms step_avg:69.88ms +[2025-09-02 08:30:34] [Rank 0] step:281/10000 train_time:19649ms step_avg:69.93ms +[2025-09-02 08:30:34] [Rank 0] step:281/10000 train_time:19649ms step_avg:69.93ms +[2025-09-02 08:30:36] [Rank 0] step:301/10000 train_time:21059ms step_avg:69.96ms +[2025-09-02 08:30:36] [Rank 0] step:301/10000 train_time:21059ms step_avg:69.96ms +[2025-09-02 08:30:37] [Rank 0] step:321/10000 train_time:22470ms step_avg:70.00ms +[2025-09-02 08:30:37] [Rank 0] step:321/10000 train_time:22470ms step_avg:70.00ms +[2025-09-02 08:30:38] [Rank 0] step:341/10000 train_time:23881ms step_avg:70.03ms +[2025-09-02 08:30:38] [Rank 0] step:341/10000 train_time:23881ms step_avg:70.03ms +[2025-09-02 08:30:40] [Rank 0] step:361/10000 train_time:25292ms step_avg:70.06ms +[2025-09-02 08:30:40] [Rank 0] step:361/10000 train_time:25292ms step_avg:70.06ms +[2025-09-02 08:30:41] [Rank 0] step:381/10000 train_time:26702ms step_avg:70.08ms +[2025-09-02 08:30:41] [Rank 0] step:381/10000 train_time:26702ms step_avg:70.08ms +[2025-09-02 08:30:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:30:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:30:54] [Rank 0] PRINT: step:400/10000 val_loss:5.9352 svd_entropy: attn_qk:H=0.5346,top10E=0.64,eRank=79.7,q75/q25=13.48 attn_vo:H=0.5495,top10E=0.50,eRank=92.3,q75/q25=inf mlp_w1:H=0.4509,top10E=0.69,eRank=32.1,q75/q25=3.26 mlp_w2:H=0.5268,top10E=0.62,eRank=35.0,q75/q25=13.47 vo_prod:H=0.3699,top10E=0.75,eRank=17.3,q75/q25=inf train_time:28255ms step_avg:70.64ms +[2025-09-02 08:30:54] [Rank 0] PRINT: step:400/10000 val_loss:5.9352 svd_entropy: attn_qk:H=0.5346,top10E=0.64,eRank=79.7,q75/q25=13.48 attn_vo:H=0.5495,top10E=0.50,eRank=92.3,q75/q25=inf mlp_w1:H=0.4509,top10E=0.69,eRank=32.1,q75/q25=3.26 mlp_w2:H=0.5268,top10E=0.62,eRank=35.0,q75/q25=13.47 vo_prod:H=0.3699,top10E=0.75,eRank=17.3,q75/q25=inf train_time:28255ms step_avg:70.64ms +[2025-09-02 08:30:54] [Rank 0] step:401/10000 train_time:28268ms step_avg:70.49ms +[2025-09-02 08:30:54] [Rank 0] step:401/10000 train_time:28268ms step_avg:70.49ms +[2025-09-02 08:30:56] [Rank 0] step:421/10000 train_time:29561ms step_avg:70.22ms +[2025-09-02 08:30:56] [Rank 0] step:421/10000 train_time:29561ms step_avg:70.22ms +[2025-09-02 08:30:57] [Rank 0] step:441/10000 train_time:30971ms step_avg:70.23ms +[2025-09-02 08:30:57] [Rank 0] step:441/10000 train_time:30971ms step_avg:70.23ms +[2025-09-02 08:30:59] [Rank 0] step:461/10000 train_time:32382ms step_avg:70.24ms +[2025-09-02 08:30:59] [Rank 0] step:461/10000 train_time:32382ms step_avg:70.24ms +[2025-09-02 08:31:00] [Rank 0] step:481/10000 train_time:33792ms step_avg:70.25ms +[2025-09-02 08:31:00] [Rank 0] step:481/10000 train_time:33792ms step_avg:70.25ms +[2025-09-02 08:31:01] [Rank 0] step:501/10000 train_time:35201ms step_avg:70.26ms +[2025-09-02 08:31:01] [Rank 0] step:501/10000 train_time:35201ms step_avg:70.26ms +[2025-09-02 08:31:03] [Rank 0] step:521/10000 train_time:36613ms step_avg:70.27ms +[2025-09-02 08:31:03] [Rank 0] step:521/10000 train_time:36613ms step_avg:70.27ms +[2025-09-02 08:31:04] [Rank 0] step:541/10000 train_time:38024ms step_avg:70.28ms +[2025-09-02 08:31:04] [Rank 0] step:541/10000 train_time:38024ms step_avg:70.28ms +[2025-09-02 08:31:06] [Rank 0] step:561/10000 train_time:39436ms step_avg:70.30ms +[2025-09-02 08:31:06] [Rank 0] step:561/10000 train_time:39436ms step_avg:70.30ms +[2025-09-02 08:31:07] [Rank 0] step:581/10000 train_time:40848ms step_avg:70.31ms +[2025-09-02 08:31:07] [Rank 0] step:581/10000 train_time:40848ms step_avg:70.31ms +[2025-09-02 08:31:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:31:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:31:20] [Rank 0] PRINT: step:600/10000 val_loss:5.6600 svd_entropy: attn_qk:H=0.5696,top10E=0.57,eRank=87.1,q75/q25=14.91 attn_vo:H=0.5804,top10E=0.44,eRank=102.8,q75/q25=inf mlp_w1:H=0.4921,top10E=0.63,eRank=42.9,q75/q25=3.60 mlp_w2:H=0.6295,top10E=0.46,eRank=67.9,q75/q25=9.33 vo_prod:H=0.4233,top10E=0.66,eRank=23.6,q75/q25=inf train_time:42401ms step_avg:70.67ms +[2025-09-02 08:31:20] [Rank 0] PRINT: step:600/10000 val_loss:5.6600 svd_entropy: attn_qk:H=0.5696,top10E=0.57,eRank=87.1,q75/q25=14.91 attn_vo:H=0.5804,top10E=0.44,eRank=102.8,q75/q25=inf mlp_w1:H=0.4921,top10E=0.63,eRank=42.9,q75/q25=3.60 mlp_w2:H=0.6295,top10E=0.46,eRank=67.9,q75/q25=9.33 vo_prod:H=0.4233,top10E=0.66,eRank=23.6,q75/q25=inf train_time:42401ms step_avg:70.67ms +[2025-09-02 08:31:20] [Rank 0] step:601/10000 train_time:42413ms step_avg:70.57ms +[2025-09-02 08:31:20] [Rank 0] step:601/10000 train_time:42413ms step_avg:70.57ms +[2025-09-02 08:31:22] [Rank 0] step:621/10000 train_time:43703ms step_avg:70.38ms +[2025-09-02 08:31:22] [Rank 0] step:621/10000 train_time:43703ms step_avg:70.38ms +[2025-09-02 08:31:23] [Rank 0] step:641/10000 train_time:45116ms step_avg:70.38ms +[2025-09-02 08:31:23] [Rank 0] step:641/10000 train_time:45116ms step_avg:70.38ms +[2025-09-02 08:31:24] [Rank 0] step:661/10000 train_time:46528ms step_avg:70.39ms +[2025-09-02 08:31:24] [Rank 0] step:661/10000 train_time:46528ms step_avg:70.39ms +[2025-09-02 08:31:26] [Rank 0] step:681/10000 train_time:47940ms step_avg:70.40ms +[2025-09-02 08:31:26] [Rank 0] step:681/10000 train_time:47940ms step_avg:70.40ms +[2025-09-02 08:31:27] [Rank 0] step:701/10000 train_time:49352ms step_avg:70.40ms +[2025-09-02 08:31:27] [Rank 0] step:701/10000 train_time:49352ms step_avg:70.40ms +[2025-09-02 08:31:29] [Rank 0] step:721/10000 train_time:50765ms step_avg:70.41ms +[2025-09-02 08:31:29] [Rank 0] step:721/10000 train_time:50765ms step_avg:70.41ms +[2025-09-02 08:31:30] [Rank 0] step:741/10000 train_time:52180ms step_avg:70.42ms +[2025-09-02 08:31:30] [Rank 0] step:741/10000 train_time:52180ms step_avg:70.42ms +[2025-09-02 08:31:31] [Rank 0] step:761/10000 train_time:53604ms step_avg:70.44ms +[2025-09-02 08:31:31] [Rank 0] step:761/10000 train_time:53604ms step_avg:70.44ms +[2025-09-02 08:31:33] [Rank 0] step:781/10000 train_time:55032ms step_avg:70.46ms +[2025-09-02 08:31:33] [Rank 0] step:781/10000 train_time:55032ms step_avg:70.46ms +[2025-09-02 08:31:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:31:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:31:46] [Rank 0] PRINT: step:800/10000 val_loss:5.4289 svd_entropy: attn_qk:H=0.5959,top10E=0.52,eRank=93.3,q75/q25=16.76 attn_vo:H=0.6076,top10E=0.40,eRank=115.7,q75/q25=inf mlp_w1:H=0.5258,top10E=0.59,eRank=51.5,q75/q25=3.90 mlp_w2:H=0.6856,top10E=0.37,eRank=96.4,q75/q25=8.36 vo_prod:H=0.4612,top10E=0.58,eRank=29.8,q75/q25=inf train_time:56601ms step_avg:70.75ms +[2025-09-02 08:31:46] [Rank 0] PRINT: step:800/10000 val_loss:5.4289 svd_entropy: attn_qk:H=0.5959,top10E=0.52,eRank=93.3,q75/q25=16.76 attn_vo:H=0.6076,top10E=0.40,eRank=115.7,q75/q25=inf mlp_w1:H=0.5258,top10E=0.59,eRank=51.5,q75/q25=3.90 mlp_w2:H=0.6856,top10E=0.37,eRank=96.4,q75/q25=8.36 vo_prod:H=0.4612,top10E=0.58,eRank=29.8,q75/q25=inf train_time:56601ms step_avg:70.75ms +[2025-09-02 08:31:46] [Rank 0] step:801/10000 train_time:56613ms step_avg:70.68ms +[2025-09-02 08:31:46] [Rank 0] step:801/10000 train_time:56613ms step_avg:70.68ms +[2025-09-02 08:31:47] [Rank 0] step:821/10000 train_time:57904ms step_avg:70.53ms +[2025-09-02 08:31:47] [Rank 0] step:821/10000 train_time:57904ms step_avg:70.53ms +[2025-09-02 08:31:49] [Rank 0] step:841/10000 train_time:59327ms step_avg:70.54ms +[2025-09-02 08:31:49] [Rank 0] step:841/10000 train_time:59327ms step_avg:70.54ms +[2025-09-02 08:31:50] [Rank 0] step:861/10000 train_time:60752ms step_avg:70.56ms +[2025-09-02 08:31:50] [Rank 0] step:861/10000 train_time:60752ms step_avg:70.56ms +[2025-09-02 08:31:52] [Rank 0] step:881/10000 train_time:62177ms step_avg:70.57ms +[2025-09-02 08:31:52] [Rank 0] step:881/10000 train_time:62177ms step_avg:70.57ms +[2025-09-02 08:31:53] [Rank 0] step:901/10000 train_time:63602ms step_avg:70.59ms +[2025-09-02 08:31:53] [Rank 0] step:901/10000 train_time:63602ms step_avg:70.59ms +[2025-09-02 08:31:55] [Rank 0] step:921/10000 train_time:65028ms step_avg:70.61ms +[2025-09-02 08:31:55] [Rank 0] step:921/10000 train_time:65028ms step_avg:70.61ms +[2025-09-02 08:31:56] [Rank 0] step:941/10000 train_time:66454ms step_avg:70.62ms +[2025-09-02 08:31:56] [Rank 0] step:941/10000 train_time:66454ms step_avg:70.62ms +[2025-09-02 08:31:57] [Rank 0] step:961/10000 train_time:67880ms step_avg:70.63ms +[2025-09-02 08:31:57] [Rank 0] step:961/10000 train_time:67880ms step_avg:70.63ms +[2025-09-02 08:31:59] [Rank 0] step:981/10000 train_time:69306ms step_avg:70.65ms +[2025-09-02 08:31:59] [Rank 0] step:981/10000 train_time:69306ms step_avg:70.65ms +[2025-09-02 08:32:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:32:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:32:12] [Rank 0] PRINT: step:1000/10000 val_loss:5.2553 svd_entropy: attn_qk:H=0.6165,top10E=0.48,eRank=99.2,q75/q25=19.14 attn_vo:H=0.6304,top10E=0.37,eRank=129.6,q75/q25=inf mlp_w1:H=0.5518,top10E=0.56,eRank=58.7,q75/q25=4.22 mlp_w2:H=0.7149,top10E=0.32,eRank=116.8,q75/q25=9.00 vo_prod:H=0.4864,top10E=0.53,eRank=35.0,q75/q25=inf train_time:70876ms step_avg:70.88ms +[2025-09-02 08:32:12] [Rank 0] PRINT: step:1000/10000 val_loss:5.2553 svd_entropy: attn_qk:H=0.6165,top10E=0.48,eRank=99.2,q75/q25=19.14 attn_vo:H=0.6304,top10E=0.37,eRank=129.6,q75/q25=inf mlp_w1:H=0.5518,top10E=0.56,eRank=58.7,q75/q25=4.22 mlp_w2:H=0.7149,top10E=0.32,eRank=116.8,q75/q25=9.00 vo_prod:H=0.4864,top10E=0.53,eRank=35.0,q75/q25=inf train_time:70876ms step_avg:70.88ms +[2025-09-02 08:32:12] [Rank 0] step:1001/10000 train_time:70888ms step_avg:70.82ms +[2025-09-02 08:32:12] [Rank 0] step:1001/10000 train_time:70888ms step_avg:70.82ms +[2025-09-02 08:32:13] [Rank 0] step:1021/10000 train_time:72198ms step_avg:70.71ms +[2025-09-02 08:32:13] [Rank 0] step:1021/10000 train_time:72198ms step_avg:70.71ms +[2025-09-02 08:32:15] [Rank 0] step:1041/10000 train_time:73622ms step_avg:70.72ms +[2025-09-02 08:32:15] [Rank 0] step:1041/10000 train_time:73622ms step_avg:70.72ms +[2025-09-02 08:32:16] [Rank 0] step:1061/10000 train_time:75048ms step_avg:70.73ms +[2025-09-02 08:32:16] [Rank 0] step:1061/10000 train_time:75048ms step_avg:70.73ms +[2025-09-02 08:32:18] [Rank 0] step:1081/10000 train_time:76472ms step_avg:70.74ms +[2025-09-02 08:32:18] [Rank 0] step:1081/10000 train_time:76472ms step_avg:70.74ms +[2025-09-02 08:32:19] [Rank 0] step:1101/10000 train_time:77896ms step_avg:70.75ms +[2025-09-02 08:32:19] [Rank 0] step:1101/10000 train_time:77896ms step_avg:70.75ms +[2025-09-02 08:32:21] [Rank 0] step:1121/10000 train_time:79322ms step_avg:70.76ms +[2025-09-02 08:32:21] [Rank 0] step:1121/10000 train_time:79322ms step_avg:70.76ms +[2025-09-02 08:32:22] [Rank 0] step:1141/10000 train_time:80749ms step_avg:70.77ms +[2025-09-02 08:32:22] [Rank 0] step:1141/10000 train_time:80749ms step_avg:70.77ms +[2025-09-02 08:32:23] [Rank 0] step:1161/10000 train_time:82176ms step_avg:70.78ms +[2025-09-02 08:32:23] [Rank 0] step:1161/10000 train_time:82176ms step_avg:70.78ms +[2025-09-02 08:32:25] [Rank 0] step:1181/10000 train_time:83602ms step_avg:70.79ms +[2025-09-02 08:32:25] [Rank 0] step:1181/10000 train_time:83602ms step_avg:70.79ms +[2025-09-02 08:32:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:32:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:32:38] [Rank 0] PRINT: step:1200/10000 val_loss:5.0980 svd_entropy: attn_qk:H=0.6328,top10E=0.45,eRank=104.6,q75/q25=22.36 attn_vo:H=0.6500,top10E=0.35,eRank=142.9,q75/q25=inf mlp_w1:H=0.5737,top10E=0.54,eRank=65.3,q75/q25=4.60 mlp_w2:H=0.7346,top10E=0.29,eRank=133.2,q75/q25=10.12 vo_prod:H=0.5074,top10E=0.49,eRank=40.2,q75/q25=inf train_time:85170ms step_avg:70.98ms +[2025-09-02 08:32:38] [Rank 0] PRINT: step:1200/10000 val_loss:5.0980 svd_entropy: attn_qk:H=0.6328,top10E=0.45,eRank=104.6,q75/q25=22.36 attn_vo:H=0.6500,top10E=0.35,eRank=142.9,q75/q25=inf mlp_w1:H=0.5737,top10E=0.54,eRank=65.3,q75/q25=4.60 mlp_w2:H=0.7346,top10E=0.29,eRank=133.2,q75/q25=10.12 vo_prod:H=0.5074,top10E=0.49,eRank=40.2,q75/q25=inf train_time:85170ms step_avg:70.98ms +[2025-09-02 08:32:38] [Rank 0] step:1201/10000 train_time:85183ms step_avg:70.93ms +[2025-09-02 08:32:38] [Rank 0] step:1201/10000 train_time:85183ms step_avg:70.93ms +[2025-09-02 08:32:39] [Rank 0] step:1221/10000 train_time:86468ms step_avg:70.82ms +[2025-09-02 08:32:39] [Rank 0] step:1221/10000 train_time:86468ms step_avg:70.82ms +[2025-09-02 08:32:41] [Rank 0] step:1241/10000 train_time:87891ms step_avg:70.82ms +[2025-09-02 08:32:41] [Rank 0] step:1241/10000 train_time:87891ms step_avg:70.82ms +[2025-09-02 08:32:42] [Rank 0] step:1261/10000 train_time:89314ms step_avg:70.83ms +[2025-09-02 08:32:42] [Rank 0] step:1261/10000 train_time:89314ms step_avg:70.83ms +[2025-09-02 08:32:44] [Rank 0] step:1281/10000 train_time:90739ms step_avg:70.83ms +[2025-09-02 08:32:44] [Rank 0] step:1281/10000 train_time:90739ms step_avg:70.83ms +[2025-09-02 08:32:45] [Rank 0] step:1301/10000 train_time:92164ms step_avg:70.84ms +[2025-09-02 08:32:45] [Rank 0] step:1301/10000 train_time:92164ms step_avg:70.84ms +[2025-09-02 08:32:46] [Rank 0] step:1321/10000 train_time:93589ms step_avg:70.85ms +[2025-09-02 08:32:46] [Rank 0] step:1321/10000 train_time:93589ms step_avg:70.85ms +[2025-09-02 08:32:48] [Rank 0] step:1341/10000 train_time:95015ms step_avg:70.85ms +[2025-09-02 08:32:48] [Rank 0] step:1341/10000 train_time:95015ms step_avg:70.85ms +[2025-09-02 08:32:49] [Rank 0] step:1361/10000 train_time:96442ms step_avg:70.86ms +[2025-09-02 08:32:49] [Rank 0] step:1361/10000 train_time:96442ms step_avg:70.86ms +[2025-09-02 08:32:51] [Rank 0] step:1381/10000 train_time:97868ms step_avg:70.87ms +[2025-09-02 08:32:51] [Rank 0] step:1381/10000 train_time:97868ms step_avg:70.87ms +[2025-09-02 08:32:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:32:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:33:04] [Rank 0] PRINT: step:1400/10000 val_loss:4.9582 svd_entropy: attn_qk:H=0.6462,top10E=0.43,eRank=109.6,q75/q25=26.39 attn_vo:H=0.6665,top10E=0.32,eRank=154.3,q75/q25=inf mlp_w1:H=0.5936,top10E=0.51,eRank=72.0,q75/q25=5.02 mlp_w2:H=0.7508,top10E=0.27,eRank=148.4,q75/q25=11.38 vo_prod:H=0.5251,top10E=0.46,eRank=45.1,q75/q25=inf train_time:99435ms step_avg:71.03ms +[2025-09-02 08:33:04] [Rank 0] PRINT: step:1400/10000 val_loss:4.9582 svd_entropy: attn_qk:H=0.6462,top10E=0.43,eRank=109.6,q75/q25=26.39 attn_vo:H=0.6665,top10E=0.32,eRank=154.3,q75/q25=inf mlp_w1:H=0.5936,top10E=0.51,eRank=72.0,q75/q25=5.02 mlp_w2:H=0.7508,top10E=0.27,eRank=148.4,q75/q25=11.38 vo_prod:H=0.5251,top10E=0.46,eRank=45.1,q75/q25=inf train_time:99435ms step_avg:71.03ms +[2025-09-02 08:33:04] [Rank 0] step:1401/10000 train_time:99447ms step_avg:70.98ms +[2025-09-02 08:33:04] [Rank 0] step:1401/10000 train_time:99447ms step_avg:70.98ms +[2025-09-02 08:33:06] [Rank 0] step:1421/10000 train_time:100743ms step_avg:70.90ms +[2025-09-02 08:33:06] [Rank 0] step:1421/10000 train_time:100743ms step_avg:70.90ms +[2025-09-02 08:33:07] [Rank 0] step:1441/10000 train_time:102169ms step_avg:70.90ms +[2025-09-02 08:33:07] [Rank 0] step:1441/10000 train_time:102169ms step_avg:70.90ms +[2025-09-02 08:33:08] [Rank 0] step:1461/10000 train_time:103594ms step_avg:70.91ms +[2025-09-02 08:33:08] [Rank 0] step:1461/10000 train_time:103594ms step_avg:70.91ms +[2025-09-02 08:33:10] [Rank 0] step:1481/10000 train_time:105020ms step_avg:70.91ms +[2025-09-02 08:33:10] [Rank 0] step:1481/10000 train_time:105020ms step_avg:70.91ms +[2025-09-02 08:33:11] [Rank 0] step:1501/10000 train_time:106454ms step_avg:70.92ms +[2025-09-02 08:33:11] [Rank 0] step:1501/10000 train_time:106454ms step_avg:70.92ms +[2025-09-02 08:33:13] [Rank 0] step:1521/10000 train_time:107890ms step_avg:70.93ms +[2025-09-02 08:33:13] [Rank 0] step:1521/10000 train_time:107890ms step_avg:70.93ms +[2025-09-02 08:33:14] [Rank 0] step:1541/10000 train_time:109327ms step_avg:70.95ms +[2025-09-02 08:33:14] [Rank 0] step:1541/10000 train_time:109327ms step_avg:70.95ms +[2025-09-02 08:33:16] [Rank 0] step:1561/10000 train_time:110764ms step_avg:70.96ms +[2025-09-02 08:33:16] [Rank 0] step:1561/10000 train_time:110764ms step_avg:70.96ms +[2025-09-02 08:33:17] [Rank 0] step:1581/10000 train_time:112201ms step_avg:70.97ms +[2025-09-02 08:33:17] [Rank 0] step:1581/10000 train_time:112201ms step_avg:70.97ms +[2025-09-02 08:33:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:33:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:33:30] [Rank 0] PRINT: step:1600/10000 val_loss:4.8147 svd_entropy: attn_qk:H=0.6572,top10E=0.41,eRank=113.8,q75/q25=31.18 attn_vo:H=0.6804,top10E=0.30,eRank=164.0,q75/q25=inf mlp_w1:H=0.6112,top10E=0.49,eRank=78.6,q75/q25=5.47 mlp_w2:H=0.7634,top10E=0.25,eRank=161.5,q75/q25=12.69 vo_prod:H=0.5402,top10E=0.43,eRank=50.0,q75/q25=inf train_time:113782ms step_avg:71.11ms +[2025-09-02 08:33:30] [Rank 0] PRINT: step:1600/10000 val_loss:4.8147 svd_entropy: attn_qk:H=0.6572,top10E=0.41,eRank=113.8,q75/q25=31.18 attn_vo:H=0.6804,top10E=0.30,eRank=164.0,q75/q25=inf mlp_w1:H=0.6112,top10E=0.49,eRank=78.6,q75/q25=5.47 mlp_w2:H=0.7634,top10E=0.25,eRank=161.5,q75/q25=12.69 vo_prod:H=0.5402,top10E=0.43,eRank=50.0,q75/q25=inf train_time:113782ms step_avg:71.11ms +[2025-09-02 08:33:30] [Rank 0] step:1601/10000 train_time:113795ms step_avg:71.08ms +[2025-09-02 08:33:30] [Rank 0] step:1601/10000 train_time:113795ms step_avg:71.08ms +[2025-09-02 08:33:32] [Rank 0] step:1621/10000 train_time:115106ms step_avg:71.01ms +[2025-09-02 08:33:32] [Rank 0] step:1621/10000 train_time:115106ms step_avg:71.01ms +[2025-09-02 08:33:33] [Rank 0] step:1641/10000 train_time:116539ms step_avg:71.02ms +[2025-09-02 08:33:33] [Rank 0] step:1641/10000 train_time:116539ms step_avg:71.02ms +[2025-09-02 08:33:35] [Rank 0] step:1661/10000 train_time:117974ms step_avg:71.03ms +[2025-09-02 08:33:35] [Rank 0] step:1661/10000 train_time:117974ms step_avg:71.03ms +[2025-09-02 08:33:36] [Rank 0] step:1681/10000 train_time:119409ms step_avg:71.03ms +[2025-09-02 08:33:36] [Rank 0] step:1681/10000 train_time:119409ms step_avg:71.03ms +[2025-09-02 08:33:37] [Rank 0] step:1701/10000 train_time:120844ms step_avg:71.04ms +[2025-09-02 08:33:37] [Rank 0] step:1701/10000 train_time:120844ms step_avg:71.04ms +[2025-09-02 08:33:39] [Rank 0] step:1721/10000 train_time:122280ms step_avg:71.05ms +[2025-09-02 08:33:39] [Rank 0] step:1721/10000 train_time:122280ms step_avg:71.05ms +[2025-09-02 08:33:40] [Rank 0] step:1741/10000 train_time:123717ms step_avg:71.06ms +[2025-09-02 08:33:40] [Rank 0] step:1741/10000 train_time:123717ms step_avg:71.06ms +[2025-09-02 08:33:42] [Rank 0] step:1761/10000 train_time:125153ms step_avg:71.07ms +[2025-09-02 08:33:42] [Rank 0] step:1761/10000 train_time:125153ms step_avg:71.07ms +[2025-09-02 08:33:43] [Rank 0] step:1781/10000 train_time:126591ms step_avg:71.08ms +[2025-09-02 08:33:43] [Rank 0] step:1781/10000 train_time:126591ms step_avg:71.08ms +[2025-09-02 08:33:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:33:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:33:56] [Rank 0] PRINT: step:1800/10000 val_loss:4.7100 svd_entropy: attn_qk:H=0.6673,top10E=0.39,eRank=118.1,q75/q25=35.80 attn_vo:H=0.6921,top10E=0.29,eRank=172.5,q75/q25=inf mlp_w1:H=0.6272,top10E=0.47,eRank=85.1,q75/q25=5.91 mlp_w2:H=0.7752,top10E=0.23,eRank=174.8,q75/q25=13.61 vo_prod:H=0.5531,top10E=0.40,eRank=54.7,q75/q25=inf train_time:128171ms step_avg:71.21ms +[2025-09-02 08:33:56] [Rank 0] PRINT: step:1800/10000 val_loss:4.7100 svd_entropy: attn_qk:H=0.6673,top10E=0.39,eRank=118.1,q75/q25=35.80 attn_vo:H=0.6921,top10E=0.29,eRank=172.5,q75/q25=inf mlp_w1:H=0.6272,top10E=0.47,eRank=85.1,q75/q25=5.91 mlp_w2:H=0.7752,top10E=0.23,eRank=174.8,q75/q25=13.61 vo_prod:H=0.5531,top10E=0.40,eRank=54.7,q75/q25=inf train_time:128171ms step_avg:71.21ms +[2025-09-02 08:33:56] [Rank 0] step:1801/10000 train_time:128183ms step_avg:71.17ms +[2025-09-02 08:33:56] [Rank 0] step:1801/10000 train_time:128183ms step_avg:71.17ms +[2025-09-02 08:33:58] [Rank 0] step:1821/10000 train_time:129492ms step_avg:71.11ms +[2025-09-02 08:33:58] [Rank 0] step:1821/10000 train_time:129492ms step_avg:71.11ms +[2025-09-02 08:33:59] [Rank 0] step:1841/10000 train_time:130926ms step_avg:71.12ms +[2025-09-02 08:33:59] [Rank 0] step:1841/10000 train_time:130926ms step_avg:71.12ms +[2025-09-02 08:34:01] [Rank 0] step:1861/10000 train_time:132361ms step_avg:71.12ms +[2025-09-02 08:34:01] [Rank 0] step:1861/10000 train_time:132361ms step_avg:71.12ms +[2025-09-02 08:34:02] [Rank 0] step:1881/10000 train_time:133796ms step_avg:71.13ms +[2025-09-02 08:34:02] [Rank 0] step:1881/10000 train_time:133796ms step_avg:71.13ms +[2025-09-02 08:34:04] [Rank 0] step:1901/10000 train_time:135232ms step_avg:71.14ms +[2025-09-02 08:34:04] [Rank 0] step:1901/10000 train_time:135232ms step_avg:71.14ms +[2025-09-02 08:34:05] [Rank 0] step:1921/10000 train_time:136668ms step_avg:71.14ms +[2025-09-02 08:34:05] [Rank 0] step:1921/10000 train_time:136668ms step_avg:71.14ms +[2025-09-02 08:34:06] [Rank 0] step:1941/10000 train_time:138104ms step_avg:71.15ms +[2025-09-02 08:34:06] [Rank 0] step:1941/10000 train_time:138104ms step_avg:71.15ms +[2025-09-02 08:34:08] [Rank 0] step:1961/10000 train_time:139539ms step_avg:71.16ms +[2025-09-02 08:34:08] [Rank 0] step:1961/10000 train_time:139539ms step_avg:71.16ms +[2025-09-02 08:34:09] [Rank 0] step:1981/10000 train_time:140977ms step_avg:71.16ms +[2025-09-02 08:34:09] [Rank 0] step:1981/10000 train_time:140977ms step_avg:71.16ms +[2025-09-02 08:34:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:34:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:34:22] [Rank 0] PRINT: step:2000/10000 val_loss:4.6392 svd_entropy: attn_qk:H=0.6760,top10E=0.37,eRank=122.0,q75/q25=40.95 attn_vo:H=0.7022,top10E=0.27,eRank=180.0,q75/q25=inf mlp_w1:H=0.6418,top10E=0.45,eRank=91.6,q75/q25=6.33 mlp_w2:H=0.7850,top10E=0.22,eRank=186.6,q75/q25=14.48 vo_prod:H=0.5640,top10E=0.38,eRank=59.0,q75/q25=inf train_time:142557ms step_avg:71.28ms +[2025-09-02 08:34:22] [Rank 0] PRINT: step:2000/10000 val_loss:4.6392 svd_entropy: attn_qk:H=0.6760,top10E=0.37,eRank=122.0,q75/q25=40.95 attn_vo:H=0.7022,top10E=0.27,eRank=180.0,q75/q25=inf mlp_w1:H=0.6418,top10E=0.45,eRank=91.6,q75/q25=6.33 mlp_w2:H=0.7850,top10E=0.22,eRank=186.6,q75/q25=14.48 vo_prod:H=0.5640,top10E=0.38,eRank=59.0,q75/q25=inf train_time:142557ms step_avg:71.28ms +[2025-09-02 08:34:22] [Rank 0] step:2001/10000 train_time:142569ms step_avg:71.25ms +[2025-09-02 08:34:22] [Rank 0] step:2001/10000 train_time:142569ms step_avg:71.25ms +[2025-09-02 08:34:24] [Rank 0] step:2021/10000 train_time:143879ms step_avg:71.19ms +[2025-09-02 08:34:24] [Rank 0] step:2021/10000 train_time:143879ms step_avg:71.19ms +[2025-09-02 08:34:25] [Rank 0] step:2041/10000 train_time:145426ms step_avg:71.25ms +[2025-09-02 08:34:25] [Rank 0] step:2041/10000 train_time:145426ms step_avg:71.25ms +[2025-09-02 08:34:27] [Rank 0] step:2061/10000 train_time:146862ms step_avg:71.26ms +[2025-09-02 08:34:27] [Rank 0] step:2061/10000 train_time:146862ms step_avg:71.26ms +[2025-09-02 08:34:28] [Rank 0] step:2081/10000 train_time:148297ms step_avg:71.26ms +[2025-09-02 08:34:28] [Rank 0] step:2081/10000 train_time:148297ms step_avg:71.26ms +[2025-09-02 08:34:30] [Rank 0] step:2101/10000 train_time:149733ms step_avg:71.27ms +[2025-09-02 08:34:30] [Rank 0] step:2101/10000 train_time:149733ms step_avg:71.27ms +[2025-09-02 08:34:31] [Rank 0] step:2121/10000 train_time:151169ms step_avg:71.27ms +[2025-09-02 08:34:31] [Rank 0] step:2121/10000 train_time:151169ms step_avg:71.27ms +[2025-09-02 08:34:33] [Rank 0] step:2141/10000 train_time:152606ms step_avg:71.28ms +[2025-09-02 08:34:33] [Rank 0] step:2141/10000 train_time:152606ms step_avg:71.28ms +[2025-09-02 08:34:34] [Rank 0] step:2161/10000 train_time:154043ms step_avg:71.28ms +[2025-09-02 08:34:34] [Rank 0] step:2161/10000 train_time:154043ms step_avg:71.28ms +[2025-09-02 08:34:35] [Rank 0] step:2181/10000 train_time:155479ms step_avg:71.29ms +[2025-09-02 08:34:35] [Rank 0] step:2181/10000 train_time:155479ms step_avg:71.29ms +[2025-09-02 08:34:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:34:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:34:48] [Rank 0] PRINT: step:2200/10000 val_loss:4.5620 svd_entropy: attn_qk:H=0.6835,top10E=0.36,eRank=125.6,q75/q25=45.68 attn_vo:H=0.7104,top10E=0.26,eRank=186.5,q75/q25=inf mlp_w1:H=0.6555,top10E=0.44,eRank=98.2,q75/q25=6.74 mlp_w2:H=0.7947,top10E=0.21,eRank=199.0,q75/q25=14.93 vo_prod:H=0.5732,top10E=0.36,eRank=62.9,q75/q25=inf train_time:157061ms step_avg:71.39ms +[2025-09-02 08:34:48] [Rank 0] PRINT: step:2200/10000 val_loss:4.5620 svd_entropy: attn_qk:H=0.6835,top10E=0.36,eRank=125.6,q75/q25=45.68 attn_vo:H=0.7104,top10E=0.26,eRank=186.5,q75/q25=inf mlp_w1:H=0.6555,top10E=0.44,eRank=98.2,q75/q25=6.74 mlp_w2:H=0.7947,top10E=0.21,eRank=199.0,q75/q25=14.93 vo_prod:H=0.5732,top10E=0.36,eRank=62.9,q75/q25=inf train_time:157061ms step_avg:71.39ms +[2025-09-02 08:34:49] [Rank 0] step:2201/10000 train_time:157073ms step_avg:71.36ms +[2025-09-02 08:34:49] [Rank 0] step:2201/10000 train_time:157073ms step_avg:71.36ms +[2025-09-02 08:34:50] [Rank 0] step:2221/10000 train_time:158378ms step_avg:71.31ms +[2025-09-02 08:34:50] [Rank 0] step:2221/10000 train_time:158378ms step_avg:71.31ms +[2025-09-02 08:34:51] [Rank 0] step:2241/10000 train_time:159848ms step_avg:71.33ms +[2025-09-02 08:34:51] [Rank 0] step:2241/10000 train_time:159848ms step_avg:71.33ms +[2025-09-02 08:34:53] [Rank 0] step:2261/10000 train_time:161328ms step_avg:71.35ms +[2025-09-02 08:34:53] [Rank 0] step:2261/10000 train_time:161328ms step_avg:71.35ms +[2025-09-02 08:34:54] [Rank 0] step:2281/10000 train_time:162808ms step_avg:71.38ms +[2025-09-02 08:34:54] [Rank 0] step:2281/10000 train_time:162808ms step_avg:71.38ms +[2025-09-02 08:34:56] [Rank 0] step:2301/10000 train_time:164289ms step_avg:71.40ms +[2025-09-02 08:34:56] [Rank 0] step:2301/10000 train_time:164289ms step_avg:71.40ms +[2025-09-02 08:34:57] [Rank 0] step:2321/10000 train_time:165771ms step_avg:71.42ms +[2025-09-02 08:34:57] [Rank 0] step:2321/10000 train_time:165771ms step_avg:71.42ms +[2025-09-02 08:34:59] [Rank 0] step:2341/10000 train_time:167252ms step_avg:71.44ms +[2025-09-02 08:34:59] [Rank 0] step:2341/10000 train_time:167252ms step_avg:71.44ms +[2025-09-02 08:35:00] [Rank 0] step:2361/10000 train_time:168734ms step_avg:71.47ms +[2025-09-02 08:35:00] [Rank 0] step:2361/10000 train_time:168734ms step_avg:71.47ms +[2025-09-02 08:35:02] [Rank 0] step:2381/10000 train_time:170218ms step_avg:71.49ms +[2025-09-02 08:35:02] [Rank 0] step:2381/10000 train_time:170218ms step_avg:71.49ms +[2025-09-02 08:35:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:35:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:35:15] [Rank 0] PRINT: step:2400/10000 val_loss:4.4867 svd_entropy: attn_qk:H=0.6900,top10E=0.35,eRank=128.9,q75/q25=50.72 attn_vo:H=0.7180,top10E=0.25,eRank=192.7,q75/q25=inf mlp_w1:H=0.6683,top10E=0.42,eRank=105.1,q75/q25=7.16 mlp_w2:H=0.8034,top10E=0.20,eRank=210.8,q75/q25=15.25 vo_prod:H=0.5816,top10E=0.35,eRank=66.8,q75/q25=inf train_time:171847ms step_avg:71.60ms +[2025-09-02 08:35:15] [Rank 0] PRINT: step:2400/10000 val_loss:4.4867 svd_entropy: attn_qk:H=0.6900,top10E=0.35,eRank=128.9,q75/q25=50.72 attn_vo:H=0.7180,top10E=0.25,eRank=192.7,q75/q25=inf mlp_w1:H=0.6683,top10E=0.42,eRank=105.1,q75/q25=7.16 mlp_w2:H=0.8034,top10E=0.20,eRank=210.8,q75/q25=15.25 vo_prod:H=0.5816,top10E=0.35,eRank=66.8,q75/q25=inf train_time:171847ms step_avg:71.60ms +[2025-09-02 08:35:15] [Rank 0] step:2401/10000 train_time:171859ms step_avg:71.58ms +[2025-09-02 08:35:15] [Rank 0] step:2401/10000 train_time:171859ms step_avg:71.58ms +[2025-09-02 08:35:16] [Rank 0] step:2421/10000 train_time:173215ms step_avg:71.55ms +[2025-09-02 08:35:16] [Rank 0] step:2421/10000 train_time:173215ms step_avg:71.55ms +[2025-09-02 08:35:18] [Rank 0] step:2441/10000 train_time:174694ms step_avg:71.57ms +[2025-09-02 08:35:18] [Rank 0] step:2441/10000 train_time:174694ms step_avg:71.57ms +[2025-09-02 08:35:19] [Rank 0] step:2461/10000 train_time:176174ms step_avg:71.59ms +[2025-09-02 08:35:19] [Rank 0] step:2461/10000 train_time:176174ms step_avg:71.59ms +[2025-09-02 08:35:21] [Rank 0] step:2481/10000 train_time:177655ms step_avg:71.61ms +[2025-09-02 08:35:21] [Rank 0] step:2481/10000 train_time:177655ms step_avg:71.61ms +[2025-09-02 08:35:22] [Rank 0] step:2501/10000 train_time:179136ms step_avg:71.63ms +[2025-09-02 08:35:22] [Rank 0] step:2501/10000 train_time:179136ms step_avg:71.63ms +[2025-09-02 08:35:24] [Rank 0] step:2521/10000 train_time:180617ms step_avg:71.65ms +[2025-09-02 08:35:24] [Rank 0] step:2521/10000 train_time:180617ms step_avg:71.65ms +[2025-09-02 08:35:25] [Rank 0] step:2541/10000 train_time:182098ms step_avg:71.66ms +[2025-09-02 08:35:25] [Rank 0] step:2541/10000 train_time:182098ms step_avg:71.66ms +[2025-09-02 08:35:27] [Rank 0] step:2561/10000 train_time:183580ms step_avg:71.68ms +[2025-09-02 08:35:27] [Rank 0] step:2561/10000 train_time:183580ms step_avg:71.68ms +[2025-09-02 08:35:28] [Rank 0] step:2581/10000 train_time:185062ms step_avg:71.70ms +[2025-09-02 08:35:28] [Rank 0] step:2581/10000 train_time:185062ms step_avg:71.70ms +[2025-09-02 08:35:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:35:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:35:41] [Rank 0] PRINT: step:2600/10000 val_loss:4.4302 svd_entropy: attn_qk:H=0.6962,top10E=0.34,eRank=132.2,q75/q25=55.25 attn_vo:H=0.7248,top10E=0.24,eRank=198.4,q75/q25=inf mlp_w1:H=0.6792,top10E=0.41,eRank=111.3,q75/q25=7.58 mlp_w2:H=0.8107,top10E=0.19,eRank=221.1,q75/q25=15.61 vo_prod:H=0.5892,top10E=0.33,eRank=70.5,q75/q25=inf train_time:186691ms step_avg:71.80ms +[2025-09-02 08:35:41] [Rank 0] PRINT: step:2600/10000 val_loss:4.4302 svd_entropy: attn_qk:H=0.6962,top10E=0.34,eRank=132.2,q75/q25=55.25 attn_vo:H=0.7248,top10E=0.24,eRank=198.4,q75/q25=inf mlp_w1:H=0.6792,top10E=0.41,eRank=111.3,q75/q25=7.58 mlp_w2:H=0.8107,top10E=0.19,eRank=221.1,q75/q25=15.61 vo_prod:H=0.5892,top10E=0.33,eRank=70.5,q75/q25=inf train_time:186691ms step_avg:71.80ms +[2025-09-02 08:35:41] [Rank 0] step:2601/10000 train_time:186704ms step_avg:71.78ms +[2025-09-02 08:35:41] [Rank 0] step:2601/10000 train_time:186704ms step_avg:71.78ms +[2025-09-02 08:35:43] [Rank 0] step:2621/10000 train_time:188040ms step_avg:71.74ms +[2025-09-02 08:35:43] [Rank 0] step:2621/10000 train_time:188040ms step_avg:71.74ms +[2025-09-02 08:35:44] [Rank 0] step:2641/10000 train_time:189517ms step_avg:71.76ms +[2025-09-02 08:35:44] [Rank 0] step:2641/10000 train_time:189517ms step_avg:71.76ms +[2025-09-02 08:35:46] [Rank 0] step:2661/10000 train_time:190998ms step_avg:71.78ms +[2025-09-02 08:35:46] [Rank 0] step:2661/10000 train_time:190998ms step_avg:71.78ms +[2025-09-02 08:35:47] [Rank 0] step:2681/10000 train_time:192477ms step_avg:71.79ms +[2025-09-02 08:35:47] [Rank 0] step:2681/10000 train_time:192477ms step_avg:71.79ms +[2025-09-02 08:35:49] [Rank 0] step:2701/10000 train_time:193957ms step_avg:71.81ms +[2025-09-02 08:35:49] [Rank 0] step:2701/10000 train_time:193957ms step_avg:71.81ms +[2025-09-02 08:35:50] [Rank 0] step:2721/10000 train_time:195437ms step_avg:71.83ms +[2025-09-02 08:35:50] [Rank 0] step:2721/10000 train_time:195437ms step_avg:71.83ms +[2025-09-02 08:35:52] [Rank 0] step:2741/10000 train_time:196917ms step_avg:71.84ms +[2025-09-02 08:35:52] [Rank 0] step:2741/10000 train_time:196917ms step_avg:71.84ms +[2025-09-02 08:35:53] [Rank 0] step:2761/10000 train_time:198398ms step_avg:71.86ms +[2025-09-02 08:35:53] [Rank 0] step:2761/10000 train_time:198398ms step_avg:71.86ms +[2025-09-02 08:35:55] [Rank 0] step:2781/10000 train_time:199880ms step_avg:71.87ms +[2025-09-02 08:35:55] [Rank 0] step:2781/10000 train_time:199880ms step_avg:71.87ms +[2025-09-02 08:35:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:35:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:36:08] [Rank 0] PRINT: step:2800/10000 val_loss:4.3879 svd_entropy: attn_qk:H=0.7023,top10E=0.33,eRank=135.5,q75/q25=59.50 attn_vo:H=0.7307,top10E=0.23,eRank=203.6,q75/q25=inf mlp_w1:H=0.6891,top10E=0.39,eRank=117.5,q75/q25=7.92 mlp_w2:H=0.8163,top10E=0.18,eRank=229.7,q75/q25=15.83 vo_prod:H=0.5957,top10E=0.32,eRank=73.9,q75/q25=inf train_time:201510ms step_avg:71.97ms +[2025-09-02 08:36:08] [Rank 0] PRINT: step:2800/10000 val_loss:4.3879 svd_entropy: attn_qk:H=0.7023,top10E=0.33,eRank=135.5,q75/q25=59.50 attn_vo:H=0.7307,top10E=0.23,eRank=203.6,q75/q25=inf mlp_w1:H=0.6891,top10E=0.39,eRank=117.5,q75/q25=7.92 mlp_w2:H=0.8163,top10E=0.18,eRank=229.7,q75/q25=15.83 vo_prod:H=0.5957,top10E=0.32,eRank=73.9,q75/q25=inf train_time:201510ms step_avg:71.97ms +[2025-09-02 08:36:08] [Rank 0] step:2801/10000 train_time:201522ms step_avg:71.95ms +[2025-09-02 08:36:08] [Rank 0] step:2801/10000 train_time:201522ms step_avg:71.95ms +[2025-09-02 08:36:09] [Rank 0] step:2821/10000 train_time:202859ms step_avg:71.91ms +[2025-09-02 08:36:09] [Rank 0] step:2821/10000 train_time:202859ms step_avg:71.91ms +[2025-09-02 08:36:11] [Rank 0] step:2841/10000 train_time:204336ms step_avg:71.92ms +[2025-09-02 08:36:11] [Rank 0] step:2841/10000 train_time:204336ms step_avg:71.92ms +[2025-09-02 08:36:12] [Rank 0] step:2861/10000 train_time:205816ms step_avg:71.94ms +[2025-09-02 08:36:12] [Rank 0] step:2861/10000 train_time:205816ms step_avg:71.94ms +[2025-09-02 08:36:14] [Rank 0] step:2881/10000 train_time:207295ms step_avg:71.95ms +[2025-09-02 08:36:14] [Rank 0] step:2881/10000 train_time:207295ms step_avg:71.95ms +[2025-09-02 08:36:15] [Rank 0] step:2901/10000 train_time:208774ms step_avg:71.97ms +[2025-09-02 08:36:15] [Rank 0] step:2901/10000 train_time:208774ms step_avg:71.97ms +[2025-09-02 08:36:17] [Rank 0] step:2921/10000 train_time:210253ms step_avg:71.98ms +[2025-09-02 08:36:17] [Rank 0] step:2921/10000 train_time:210253ms step_avg:71.98ms +[2025-09-02 08:36:18] [Rank 0] step:2941/10000 train_time:211733ms step_avg:71.99ms +[2025-09-02 08:36:18] [Rank 0] step:2941/10000 train_time:211733ms step_avg:71.99ms +[2025-09-02 08:36:20] [Rank 0] step:2961/10000 train_time:213213ms step_avg:72.01ms +[2025-09-02 08:36:20] [Rank 0] step:2961/10000 train_time:213213ms step_avg:72.01ms +[2025-09-02 08:36:21] [Rank 0] step:2981/10000 train_time:214701ms step_avg:72.02ms +[2025-09-02 08:36:21] [Rank 0] step:2981/10000 train_time:214701ms step_avg:72.02ms +[2025-09-02 08:36:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:36:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:36:34] [Rank 0] PRINT: step:3000/10000 val_loss:4.3399 svd_entropy: attn_qk:H=0.7076,top10E=0.32,eRank=138.5,q75/q25=63.50 attn_vo:H=0.7361,top10E=0.22,eRank=208.5,q75/q25=inf mlp_w1:H=0.6981,top10E=0.38,eRank=123.5,q75/q25=8.32 mlp_w2:H=0.8212,top10E=0.17,eRank=237.4,q75/q25=16.15 vo_prod:H=0.6012,top10E=0.31,eRank=77.0,q75/q25=inf train_time:216340ms step_avg:72.11ms +[2025-09-02 08:36:34] [Rank 0] PRINT: step:3000/10000 val_loss:4.3399 svd_entropy: attn_qk:H=0.7076,top10E=0.32,eRank=138.5,q75/q25=63.50 attn_vo:H=0.7361,top10E=0.22,eRank=208.5,q75/q25=inf mlp_w1:H=0.6981,top10E=0.38,eRank=123.5,q75/q25=8.32 mlp_w2:H=0.8212,top10E=0.17,eRank=237.4,q75/q25=16.15 vo_prod:H=0.6012,top10E=0.31,eRank=77.0,q75/q25=inf train_time:216340ms step_avg:72.11ms +[2025-09-02 08:36:34] [Rank 0] step:3001/10000 train_time:216352ms step_avg:72.09ms +[2025-09-02 08:36:34] [Rank 0] step:3001/10000 train_time:216352ms step_avg:72.09ms +[2025-09-02 08:36:36] [Rank 0] step:3021/10000 train_time:217694ms step_avg:72.06ms +[2025-09-02 08:36:36] [Rank 0] step:3021/10000 train_time:217694ms step_avg:72.06ms +[2025-09-02 08:36:37] [Rank 0] step:3041/10000 train_time:219181ms step_avg:72.08ms +[2025-09-02 08:36:37] [Rank 0] step:3041/10000 train_time:219181ms step_avg:72.08ms +[2025-09-02 08:36:39] [Rank 0] step:3061/10000 train_time:220669ms step_avg:72.09ms +[2025-09-02 08:36:39] [Rank 0] step:3061/10000 train_time:220669ms step_avg:72.09ms +[2025-09-02 08:36:40] [Rank 0] step:3081/10000 train_time:222156ms step_avg:72.11ms +[2025-09-02 08:36:40] [Rank 0] step:3081/10000 train_time:222156ms step_avg:72.11ms +[2025-09-02 08:36:42] [Rank 0] step:3101/10000 train_time:223645ms step_avg:72.12ms +[2025-09-02 08:36:42] [Rank 0] step:3101/10000 train_time:223645ms step_avg:72.12ms +[2025-09-02 08:36:43] [Rank 0] step:3121/10000 train_time:225133ms step_avg:72.13ms +[2025-09-02 08:36:43] [Rank 0] step:3121/10000 train_time:225133ms step_avg:72.13ms +[2025-09-02 08:36:45] [Rank 0] step:3141/10000 train_time:226622ms step_avg:72.15ms +[2025-09-02 08:36:45] [Rank 0] step:3141/10000 train_time:226622ms step_avg:72.15ms +[2025-09-02 08:36:46] [Rank 0] step:3161/10000 train_time:228111ms step_avg:72.16ms +[2025-09-02 08:36:46] [Rank 0] step:3161/10000 train_time:228111ms step_avg:72.16ms +[2025-09-02 08:36:48] [Rank 0] step:3181/10000 train_time:229600ms step_avg:72.18ms +[2025-09-02 08:36:48] [Rank 0] step:3181/10000 train_time:229600ms step_avg:72.18ms +[2025-09-02 08:36:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:36:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:37:01] [Rank 0] PRINT: step:3200/10000 val_loss:4.3057 svd_entropy: attn_qk:H=0.7125,top10E=0.32,eRank=141.4,q75/q25=67.34 attn_vo:H=0.7410,top10E=0.21,eRank=213.1,q75/q25=inf mlp_w1:H=0.7065,top10E=0.37,eRank=129.3,q75/q25=8.70 mlp_w2:H=0.8256,top10E=0.17,eRank=244.5,q75/q25=16.32 vo_prod:H=0.6062,top10E=0.31,eRank=80.0,q75/q25=inf train_time:231240ms step_avg:72.26ms +[2025-09-02 08:37:01] [Rank 0] PRINT: step:3200/10000 val_loss:4.3057 svd_entropy: attn_qk:H=0.7125,top10E=0.32,eRank=141.4,q75/q25=67.34 attn_vo:H=0.7410,top10E=0.21,eRank=213.1,q75/q25=inf mlp_w1:H=0.7065,top10E=0.37,eRank=129.3,q75/q25=8.70 mlp_w2:H=0.8256,top10E=0.17,eRank=244.5,q75/q25=16.32 vo_prod:H=0.6062,top10E=0.31,eRank=80.0,q75/q25=inf train_time:231240ms step_avg:72.26ms +[2025-09-02 08:37:01] [Rank 0] step:3201/10000 train_time:231252ms step_avg:72.24ms +[2025-09-02 08:37:01] [Rank 0] step:3201/10000 train_time:231252ms step_avg:72.24ms +[2025-09-02 08:37:02] [Rank 0] step:3221/10000 train_time:232608ms step_avg:72.22ms +[2025-09-02 08:37:02] [Rank 0] step:3221/10000 train_time:232608ms step_avg:72.22ms +[2025-09-02 08:37:04] [Rank 0] step:3241/10000 train_time:234095ms step_avg:72.23ms +[2025-09-02 08:37:04] [Rank 0] step:3241/10000 train_time:234095ms step_avg:72.23ms +[2025-09-02 08:37:05] [Rank 0] step:3261/10000 train_time:235584ms step_avg:72.24ms +[2025-09-02 08:37:05] [Rank 0] step:3261/10000 train_time:235584ms step_avg:72.24ms +[2025-09-02 08:37:07] [Rank 0] step:3281/10000 train_time:237074ms step_avg:72.26ms +[2025-09-02 08:37:07] [Rank 0] step:3281/10000 train_time:237074ms step_avg:72.26ms +[2025-09-02 08:37:08] [Rank 0] step:3301/10000 train_time:238563ms step_avg:72.27ms +[2025-09-02 08:37:08] [Rank 0] step:3301/10000 train_time:238563ms step_avg:72.27ms +[2025-09-02 08:37:10] [Rank 0] step:3321/10000 train_time:240051ms step_avg:72.28ms +[2025-09-02 08:37:10] [Rank 0] step:3321/10000 train_time:240051ms step_avg:72.28ms +[2025-09-02 08:37:11] [Rank 0] step:3341/10000 train_time:241541ms step_avg:72.30ms +[2025-09-02 08:37:11] [Rank 0] step:3341/10000 train_time:241541ms step_avg:72.30ms +[2025-09-02 08:37:13] [Rank 0] step:3361/10000 train_time:243031ms step_avg:72.31ms +[2025-09-02 08:37:13] [Rank 0] step:3361/10000 train_time:243031ms step_avg:72.31ms +[2025-09-02 08:37:14] [Rank 0] step:3381/10000 train_time:244522ms step_avg:72.32ms +[2025-09-02 08:37:14] [Rank 0] step:3381/10000 train_time:244522ms step_avg:72.32ms +[2025-09-02 08:37:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:37:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:37:27] [Rank 0] PRINT: step:3400/10000 val_loss:4.2634 svd_entropy: attn_qk:H=0.7171,top10E=0.31,eRank=144.2,q75/q25=70.45 attn_vo:H=0.7456,top10E=0.21,eRank=217.6,q75/q25=inf mlp_w1:H=0.7148,top10E=0.36,eRank=135.4,q75/q25=9.09 mlp_w2:H=0.8298,top10E=0.16,eRank=251.5,q75/q25=16.64 vo_prod:H=0.6115,top10E=0.30,eRank=83.1,q75/q25=inf train_time:246162ms step_avg:72.40ms +[2025-09-02 08:37:27] [Rank 0] PRINT: step:3400/10000 val_loss:4.2634 svd_entropy: attn_qk:H=0.7171,top10E=0.31,eRank=144.2,q75/q25=70.45 attn_vo:H=0.7456,top10E=0.21,eRank=217.6,q75/q25=inf mlp_w1:H=0.7148,top10E=0.36,eRank=135.4,q75/q25=9.09 mlp_w2:H=0.8298,top10E=0.16,eRank=251.5,q75/q25=16.64 vo_prod:H=0.6115,top10E=0.30,eRank=83.1,q75/q25=inf train_time:246162ms step_avg:72.40ms +[2025-09-02 08:37:27] [Rank 0] step:3401/10000 train_time:246174ms step_avg:72.38ms +[2025-09-02 08:37:27] [Rank 0] step:3401/10000 train_time:246174ms step_avg:72.38ms +[2025-09-02 08:37:29] [Rank 0] step:3421/10000 train_time:247520ms step_avg:72.35ms +[2025-09-02 08:37:29] [Rank 0] step:3421/10000 train_time:247520ms step_avg:72.35ms +[2025-09-02 08:37:30] [Rank 0] step:3441/10000 train_time:249007ms step_avg:72.36ms +[2025-09-02 08:37:30] [Rank 0] step:3441/10000 train_time:249007ms step_avg:72.36ms +[2025-09-02 08:37:32] [Rank 0] step:3461/10000 train_time:250494ms step_avg:72.38ms +[2025-09-02 08:37:32] [Rank 0] step:3461/10000 train_time:250494ms step_avg:72.38ms +[2025-09-02 08:37:33] [Rank 0] step:3481/10000 train_time:251984ms step_avg:72.39ms +[2025-09-02 08:37:33] [Rank 0] step:3481/10000 train_time:251984ms step_avg:72.39ms +[2025-09-02 08:37:35] [Rank 0] step:3501/10000 train_time:253473ms step_avg:72.40ms +[2025-09-02 08:37:35] [Rank 0] step:3501/10000 train_time:253473ms step_avg:72.40ms +[2025-09-02 08:37:36] [Rank 0] step:3521/10000 train_time:254963ms step_avg:72.41ms +[2025-09-02 08:37:36] [Rank 0] step:3521/10000 train_time:254963ms step_avg:72.41ms +[2025-09-02 08:37:38] [Rank 0] step:3541/10000 train_time:256451ms step_avg:72.42ms +[2025-09-02 08:37:38] [Rank 0] step:3541/10000 train_time:256451ms step_avg:72.42ms +[2025-09-02 08:37:39] [Rank 0] step:3561/10000 train_time:257940ms step_avg:72.43ms +[2025-09-02 08:37:39] [Rank 0] step:3561/10000 train_time:257940ms step_avg:72.43ms +[2025-09-02 08:37:41] [Rank 0] step:3581/10000 train_time:259429ms step_avg:72.45ms +[2025-09-02 08:37:41] [Rank 0] step:3581/10000 train_time:259429ms step_avg:72.45ms +[2025-09-02 08:37:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:37:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:37:54] [Rank 0] PRINT: step:3600/10000 val_loss:4.2476 svd_entropy: attn_qk:H=0.7212,top10E=0.30,eRank=146.9,q75/q25=73.64 attn_vo:H=0.7497,top10E=0.20,eRank=221.6,q75/q25=inf mlp_w1:H=0.7218,top10E=0.35,eRank=140.9,q75/q25=9.48 mlp_w2:H=0.8331,top10E=0.16,eRank=257.1,q75/q25=17.01 vo_prod:H=0.6162,top10E=0.29,eRank=86.0,q75/q25=inf train_time:261068ms step_avg:72.52ms +[2025-09-02 08:37:54] [Rank 0] PRINT: step:3600/10000 val_loss:4.2476 svd_entropy: attn_qk:H=0.7212,top10E=0.30,eRank=146.9,q75/q25=73.64 attn_vo:H=0.7497,top10E=0.20,eRank=221.6,q75/q25=inf mlp_w1:H=0.7218,top10E=0.35,eRank=140.9,q75/q25=9.48 mlp_w2:H=0.8331,top10E=0.16,eRank=257.1,q75/q25=17.01 vo_prod:H=0.6162,top10E=0.29,eRank=86.0,q75/q25=inf train_time:261068ms step_avg:72.52ms +[2025-09-02 08:37:54] [Rank 0] step:3601/10000 train_time:261080ms step_avg:72.50ms +[2025-09-02 08:37:54] [Rank 0] step:3601/10000 train_time:261080ms step_avg:72.50ms +[2025-09-02 08:37:55] [Rank 0] step:3621/10000 train_time:262432ms step_avg:72.48ms +[2025-09-02 08:37:55] [Rank 0] step:3621/10000 train_time:262432ms step_avg:72.48ms +[2025-09-02 08:37:57] [Rank 0] step:3641/10000 train_time:263919ms step_avg:72.49ms +[2025-09-02 08:37:57] [Rank 0] step:3641/10000 train_time:263919ms step_avg:72.49ms +[2025-09-02 08:37:58] [Rank 0] step:3661/10000 train_time:265407ms step_avg:72.50ms +[2025-09-02 08:37:58] [Rank 0] step:3661/10000 train_time:265407ms step_avg:72.50ms +[2025-09-02 08:38:00] [Rank 0] step:3681/10000 train_time:266895ms step_avg:72.51ms +[2025-09-02 08:38:00] [Rank 0] step:3681/10000 train_time:266895ms step_avg:72.51ms +[2025-09-02 08:38:01] [Rank 0] step:3701/10000 train_time:268382ms step_avg:72.52ms +[2025-09-02 08:38:01] [Rank 0] step:3701/10000 train_time:268382ms step_avg:72.52ms +[2025-09-02 08:38:03] [Rank 0] step:3721/10000 train_time:269897ms step_avg:72.53ms +[2025-09-02 08:38:03] [Rank 0] step:3721/10000 train_time:269897ms step_avg:72.53ms +[2025-09-02 08:38:04] [Rank 0] step:3741/10000 train_time:271422ms step_avg:72.55ms +[2025-09-02 08:38:04] [Rank 0] step:3741/10000 train_time:271422ms step_avg:72.55ms +[2025-09-02 08:38:06] [Rank 0] step:3761/10000 train_time:272946ms step_avg:72.57ms +[2025-09-02 08:38:06] [Rank 0] step:3761/10000 train_time:272946ms step_avg:72.57ms +[2025-09-02 08:38:07] [Rank 0] step:3781/10000 train_time:274469ms step_avg:72.59ms +[2025-09-02 08:38:07] [Rank 0] step:3781/10000 train_time:274469ms step_avg:72.59ms +[2025-09-02 08:38:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:38:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:38:21] [Rank 0] PRINT: step:3800/10000 val_loss:4.1937 svd_entropy: attn_qk:H=0.7249,top10E=0.30,eRank=149.4,q75/q25=76.32 attn_vo:H=0.7534,top10E=0.20,eRank=225.4,q75/q25=inf mlp_w1:H=0.7288,top10E=0.34,eRank=146.6,q75/q25=9.87 mlp_w2:H=0.8360,top10E=0.16,eRank=262.3,q75/q25=17.11 vo_prod:H=0.6206,top10E=0.28,eRank=88.8,q75/q25=inf train_time:276146ms step_avg:72.67ms +[2025-09-02 08:38:21] [Rank 0] PRINT: step:3800/10000 val_loss:4.1937 svd_entropy: attn_qk:H=0.7249,top10E=0.30,eRank=149.4,q75/q25=76.32 attn_vo:H=0.7534,top10E=0.20,eRank=225.4,q75/q25=inf mlp_w1:H=0.7288,top10E=0.34,eRank=146.6,q75/q25=9.87 mlp_w2:H=0.8360,top10E=0.16,eRank=262.3,q75/q25=17.11 vo_prod:H=0.6206,top10E=0.28,eRank=88.8,q75/q25=inf train_time:276146ms step_avg:72.67ms +[2025-09-02 08:38:21] [Rank 0] step:3801/10000 train_time:276158ms step_avg:72.65ms +[2025-09-02 08:38:21] [Rank 0] step:3801/10000 train_time:276158ms step_avg:72.65ms +[2025-09-02 08:38:22] [Rank 0] step:3821/10000 train_time:277555ms step_avg:72.64ms +[2025-09-02 08:38:22] [Rank 0] step:3821/10000 train_time:277555ms step_avg:72.64ms +[2025-09-02 08:38:24] [Rank 0] step:3841/10000 train_time:279080ms step_avg:72.66ms +[2025-09-02 08:38:24] [Rank 0] step:3841/10000 train_time:279080ms step_avg:72.66ms +[2025-09-02 08:38:25] [Rank 0] step:3861/10000 train_time:280603ms step_avg:72.68ms +[2025-09-02 08:38:25] [Rank 0] step:3861/10000 train_time:280603ms step_avg:72.68ms +[2025-09-02 08:38:27] [Rank 0] step:3881/10000 train_time:282125ms step_avg:72.69ms +[2025-09-02 08:38:27] [Rank 0] step:3881/10000 train_time:282125ms step_avg:72.69ms +[2025-09-02 08:38:28] [Rank 0] step:3901/10000 train_time:283649ms step_avg:72.71ms +[2025-09-02 08:38:28] [Rank 0] step:3901/10000 train_time:283649ms step_avg:72.71ms +[2025-09-02 08:38:30] [Rank 0] step:3921/10000 train_time:285174ms step_avg:72.73ms +[2025-09-02 08:38:30] [Rank 0] step:3921/10000 train_time:285174ms step_avg:72.73ms +[2025-09-02 08:38:31] [Rank 0] step:3941/10000 train_time:286699ms step_avg:72.75ms +[2025-09-02 08:38:31] [Rank 0] step:3941/10000 train_time:286699ms step_avg:72.75ms +[2025-09-02 08:38:33] [Rank 0] step:3961/10000 train_time:288221ms step_avg:72.76ms +[2025-09-02 08:38:33] [Rank 0] step:3961/10000 train_time:288221ms step_avg:72.76ms +[2025-09-02 08:38:34] [Rank 0] step:3981/10000 train_time:289746ms step_avg:72.78ms +[2025-09-02 08:38:34] [Rank 0] step:3981/10000 train_time:289746ms step_avg:72.78ms +[2025-09-02 08:38:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:38:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:38:48] [Rank 0] PRINT: step:4000/10000 val_loss:4.1688 svd_entropy: attn_qk:H=0.7285,top10E=0.29,eRank=151.9,q75/q25=78.01 attn_vo:H=0.7568,top10E=0.19,eRank=228.8,q75/q25=inf mlp_w1:H=0.7354,top10E=0.33,eRank=152.4,q75/q25=10.18 mlp_w2:H=0.8387,top10E=0.15,eRank=267.1,q75/q25=17.24 vo_prod:H=0.6249,top10E=0.28,eRank=91.5,q75/q25=inf train_time:291423ms step_avg:72.86ms +[2025-09-02 08:38:48] [Rank 0] PRINT: step:4000/10000 val_loss:4.1688 svd_entropy: attn_qk:H=0.7285,top10E=0.29,eRank=151.9,q75/q25=78.01 attn_vo:H=0.7568,top10E=0.19,eRank=228.8,q75/q25=inf mlp_w1:H=0.7354,top10E=0.33,eRank=152.4,q75/q25=10.18 mlp_w2:H=0.8387,top10E=0.15,eRank=267.1,q75/q25=17.24 vo_prod:H=0.6249,top10E=0.28,eRank=91.5,q75/q25=inf train_time:291423ms step_avg:72.86ms +[2025-09-02 08:38:48] [Rank 0] step:4001/10000 train_time:291435ms step_avg:72.84ms +[2025-09-02 08:38:48] [Rank 0] step:4001/10000 train_time:291435ms step_avg:72.84ms +[2025-09-02 08:38:49] [Rank 0] step:4021/10000 train_time:292818ms step_avg:72.82ms +[2025-09-02 08:38:49] [Rank 0] step:4021/10000 train_time:292818ms step_avg:72.82ms +[2025-09-02 08:38:51] [Rank 0] step:4041/10000 train_time:294343ms step_avg:72.84ms +[2025-09-02 08:38:51] [Rank 0] step:4041/10000 train_time:294343ms step_avg:72.84ms +[2025-09-02 08:38:52] [Rank 0] step:4061/10000 train_time:295868ms step_avg:72.86ms +[2025-09-02 08:38:52] [Rank 0] step:4061/10000 train_time:295868ms step_avg:72.86ms +[2025-09-02 08:38:54] [Rank 0] step:4081/10000 train_time:297497ms step_avg:72.90ms +[2025-09-02 08:38:54] [Rank 0] step:4081/10000 train_time:297497ms step_avg:72.90ms +[2025-09-02 08:38:55] [Rank 0] step:4101/10000 train_time:299023ms step_avg:72.91ms +[2025-09-02 08:38:55] [Rank 0] step:4101/10000 train_time:299023ms step_avg:72.91ms +[2025-09-02 08:38:57] [Rank 0] step:4121/10000 train_time:300549ms step_avg:72.93ms +[2025-09-02 08:38:57] [Rank 0] step:4121/10000 train_time:300549ms step_avg:72.93ms +[2025-09-02 08:38:58] [Rank 0] step:4141/10000 train_time:302076ms step_avg:72.95ms +[2025-09-02 08:38:58] [Rank 0] step:4141/10000 train_time:302076ms step_avg:72.95ms +[2025-09-02 08:39:00] [Rank 0] step:4161/10000 train_time:303600ms step_avg:72.96ms +[2025-09-02 08:39:00] [Rank 0] step:4161/10000 train_time:303600ms step_avg:72.96ms +[2025-09-02 08:39:01] [Rank 0] step:4181/10000 train_time:305129ms step_avg:72.98ms +[2025-09-02 08:39:01] [Rank 0] step:4181/10000 train_time:305129ms step_avg:72.98ms +[2025-09-02 08:39:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:39:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:39:15] [Rank 0] PRINT: step:4200/10000 val_loss:4.1515 svd_entropy: attn_qk:H=0.7319,top10E=0.29,eRank=154.3,q75/q25=79.67 attn_vo:H=0.7599,top10E=0.19,eRank=232.1,q75/q25=inf mlp_w1:H=0.7416,top10E=0.33,eRank=157.9,q75/q25=10.50 mlp_w2:H=0.8413,top10E=0.15,eRank=271.8,q75/q25=17.53 vo_prod:H=0.6287,top10E=0.27,eRank=93.9,q75/q25=inf train_time:306809ms step_avg:73.05ms +[2025-09-02 08:39:15] [Rank 0] PRINT: step:4200/10000 val_loss:4.1515 svd_entropy: attn_qk:H=0.7319,top10E=0.29,eRank=154.3,q75/q25=79.67 attn_vo:H=0.7599,top10E=0.19,eRank=232.1,q75/q25=inf mlp_w1:H=0.7416,top10E=0.33,eRank=157.9,q75/q25=10.50 mlp_w2:H=0.8413,top10E=0.15,eRank=271.8,q75/q25=17.53 vo_prod:H=0.6287,top10E=0.27,eRank=93.9,q75/q25=inf train_time:306809ms step_avg:73.05ms +[2025-09-02 08:39:15] [Rank 0] step:4201/10000 train_time:306820ms step_avg:73.03ms +[2025-09-02 08:39:15] [Rank 0] step:4201/10000 train_time:306820ms step_avg:73.03ms +[2025-09-02 08:39:16] [Rank 0] step:4221/10000 train_time:308202ms step_avg:73.02ms +[2025-09-02 08:39:16] [Rank 0] step:4221/10000 train_time:308202ms step_avg:73.02ms +[2025-09-02 08:39:18] [Rank 0] step:4241/10000 train_time:309728ms step_avg:73.03ms +[2025-09-02 08:39:18] [Rank 0] step:4241/10000 train_time:309728ms step_avg:73.03ms +[2025-09-02 08:39:19] [Rank 0] step:4261/10000 train_time:311251ms step_avg:73.05ms +[2025-09-02 08:39:19] [Rank 0] step:4261/10000 train_time:311251ms step_avg:73.05ms +[2025-09-02 08:39:21] [Rank 0] step:4281/10000 train_time:312774ms step_avg:73.06ms +[2025-09-02 08:39:21] [Rank 0] step:4281/10000 train_time:312774ms step_avg:73.06ms +[2025-09-02 08:39:22] [Rank 0] step:4301/10000 train_time:314298ms step_avg:73.08ms +[2025-09-02 08:39:22] [Rank 0] step:4301/10000 train_time:314298ms step_avg:73.08ms +[2025-09-02 08:39:24] [Rank 0] step:4321/10000 train_time:315825ms step_avg:73.09ms +[2025-09-02 08:39:24] [Rank 0] step:4321/10000 train_time:315825ms step_avg:73.09ms +[2025-09-02 08:39:25] [Rank 0] step:4341/10000 train_time:317349ms step_avg:73.10ms +[2025-09-02 08:39:25] [Rank 0] step:4341/10000 train_time:317349ms step_avg:73.10ms +[2025-09-02 08:39:27] [Rank 0] step:4361/10000 train_time:318874ms step_avg:73.12ms +[2025-09-02 08:39:27] [Rank 0] step:4361/10000 train_time:318874ms step_avg:73.12ms +[2025-09-02 08:39:28] [Rank 0] step:4381/10000 train_time:320396ms step_avg:73.13ms +[2025-09-02 08:39:28] [Rank 0] step:4381/10000 train_time:320396ms step_avg:73.13ms +[2025-09-02 08:39:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:39:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:39:41] [Rank 0] PRINT: step:4400/10000 val_loss:4.1499 svd_entropy: attn_qk:H=0.7350,top10E=0.28,eRank=156.5,q75/q25=81.08 attn_vo:H=0.7630,top10E=0.18,eRank=235.4,q75/q25=inf mlp_w1:H=0.7472,top10E=0.32,eRank=163.2,q75/q25=10.97 mlp_w2:H=0.8434,top10E=0.15,eRank=275.6,q75/q25=17.81 vo_prod:H=0.6324,top10E=0.27,eRank=96.4,q75/q25=inf train_time:322074ms step_avg:73.20ms +[2025-09-02 08:39:41] [Rank 0] PRINT: step:4400/10000 val_loss:4.1499 svd_entropy: attn_qk:H=0.7350,top10E=0.28,eRank=156.5,q75/q25=81.08 attn_vo:H=0.7630,top10E=0.18,eRank=235.4,q75/q25=inf mlp_w1:H=0.7472,top10E=0.32,eRank=163.2,q75/q25=10.97 mlp_w2:H=0.8434,top10E=0.15,eRank=275.6,q75/q25=17.81 vo_prod:H=0.6324,top10E=0.27,eRank=96.4,q75/q25=inf train_time:322074ms step_avg:73.20ms +[2025-09-02 08:39:42] [Rank 0] step:4401/10000 train_time:322085ms step_avg:73.18ms +[2025-09-02 08:39:42] [Rank 0] step:4401/10000 train_time:322085ms step_avg:73.18ms +[2025-09-02 08:39:43] [Rank 0] step:4421/10000 train_time:323459ms step_avg:73.16ms +[2025-09-02 08:39:43] [Rank 0] step:4421/10000 train_time:323459ms step_avg:73.16ms +[2025-09-02 08:39:45] [Rank 0] step:4441/10000 train_time:324980ms step_avg:73.18ms +[2025-09-02 08:39:45] [Rank 0] step:4441/10000 train_time:324980ms step_avg:73.18ms +[2025-09-02 08:39:46] [Rank 0] step:4461/10000 train_time:326508ms step_avg:73.19ms +[2025-09-02 08:39:46] [Rank 0] step:4461/10000 train_time:326508ms step_avg:73.19ms +[2025-09-02 08:39:48] [Rank 0] step:4481/10000 train_time:328038ms step_avg:73.21ms +[2025-09-02 08:39:48] [Rank 0] step:4481/10000 train_time:328038ms step_avg:73.21ms +[2025-09-02 08:39:49] [Rank 0] step:4501/10000 train_time:329568ms step_avg:73.22ms +[2025-09-02 08:39:49] [Rank 0] step:4501/10000 train_time:329568ms step_avg:73.22ms +[2025-09-02 08:39:51] [Rank 0] step:4521/10000 train_time:331097ms step_avg:73.24ms +[2025-09-02 08:39:51] [Rank 0] step:4521/10000 train_time:331097ms step_avg:73.24ms +[2025-09-02 08:39:52] [Rank 0] step:4541/10000 train_time:332627ms step_avg:73.25ms +[2025-09-02 08:39:52] [Rank 0] step:4541/10000 train_time:332627ms step_avg:73.25ms +[2025-09-02 08:39:54] [Rank 0] step:4561/10000 train_time:334157ms step_avg:73.26ms +[2025-09-02 08:39:54] [Rank 0] step:4561/10000 train_time:334157ms step_avg:73.26ms +[2025-09-02 08:39:55] [Rank 0] step:4581/10000 train_time:335688ms step_avg:73.28ms +[2025-09-02 08:39:55] [Rank 0] step:4581/10000 train_time:335688ms step_avg:73.28ms +[2025-09-02 08:39:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:39:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:40:08] [Rank 0] PRINT: step:4600/10000 val_loss:4.0945 svd_entropy: attn_qk:H=0.7381,top10E=0.28,eRank=158.8,q75/q25=83.29 attn_vo:H=0.7659,top10E=0.18,eRank=238.6,q75/q25=inf mlp_w1:H=0.7521,top10E=0.31,eRank=168.0,q75/q25=11.37 mlp_w2:H=0.8454,top10E=0.15,eRank=279.3,q75/q25=18.08 vo_prod:H=0.6360,top10E=0.26,eRank=98.9,q75/q25=inf train_time:337374ms step_avg:73.34ms +[2025-09-02 08:40:08] [Rank 0] PRINT: step:4600/10000 val_loss:4.0945 svd_entropy: attn_qk:H=0.7381,top10E=0.28,eRank=158.8,q75/q25=83.29 attn_vo:H=0.7659,top10E=0.18,eRank=238.6,q75/q25=inf mlp_w1:H=0.7521,top10E=0.31,eRank=168.0,q75/q25=11.37 mlp_w2:H=0.8454,top10E=0.15,eRank=279.3,q75/q25=18.08 vo_prod:H=0.6360,top10E=0.26,eRank=98.9,q75/q25=inf train_time:337374ms step_avg:73.34ms +[2025-09-02 08:40:09] [Rank 0] step:4601/10000 train_time:337385ms step_avg:73.33ms +[2025-09-02 08:40:09] [Rank 0] step:4601/10000 train_time:337385ms step_avg:73.33ms +[2025-09-02 08:40:10] [Rank 0] step:4621/10000 train_time:338779ms step_avg:73.31ms +[2025-09-02 08:40:10] [Rank 0] step:4621/10000 train_time:338779ms step_avg:73.31ms +[2025-09-02 08:40:12] [Rank 0] step:4641/10000 train_time:340313ms step_avg:73.33ms +[2025-09-02 08:40:12] [Rank 0] step:4641/10000 train_time:340313ms step_avg:73.33ms +[2025-09-02 08:40:13] [Rank 0] step:4661/10000 train_time:341842ms step_avg:73.34ms +[2025-09-02 08:40:13] [Rank 0] step:4661/10000 train_time:341842ms step_avg:73.34ms +[2025-09-02 08:40:15] [Rank 0] step:4681/10000 train_time:343371ms step_avg:73.35ms +[2025-09-02 08:40:15] [Rank 0] step:4681/10000 train_time:343371ms step_avg:73.35ms +[2025-09-02 08:40:16] [Rank 0] step:4701/10000 train_time:344902ms step_avg:73.37ms +[2025-09-02 08:40:16] [Rank 0] step:4701/10000 train_time:344902ms step_avg:73.37ms +[2025-09-02 08:40:18] [Rank 0] step:4721/10000 train_time:346431ms step_avg:73.38ms +[2025-09-02 08:40:18] [Rank 0] step:4721/10000 train_time:346431ms step_avg:73.38ms +[2025-09-02 08:40:19] [Rank 0] step:4741/10000 train_time:347962ms step_avg:73.39ms +[2025-09-02 08:40:19] [Rank 0] step:4741/10000 train_time:347962ms step_avg:73.39ms +[2025-09-02 08:40:21] [Rank 0] step:4761/10000 train_time:349492ms step_avg:73.41ms +[2025-09-02 08:40:21] [Rank 0] step:4761/10000 train_time:349492ms step_avg:73.41ms +[2025-09-02 08:40:22] [Rank 0] step:4781/10000 train_time:351022ms step_avg:73.42ms +[2025-09-02 08:40:22] [Rank 0] step:4781/10000 train_time:351022ms step_avg:73.42ms +[2025-09-02 08:40:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:40:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:40:35] [Rank 0] PRINT: step:4800/10000 val_loss:4.0815 svd_entropy: attn_qk:H=0.7409,top10E=0.27,eRank=161.0,q75/q25=84.79 attn_vo:H=0.7685,top10E=0.18,eRank=241.6,q75/q25=inf mlp_w1:H=0.7567,top10E=0.31,eRank=172.7,q75/q25=11.81 mlp_w2:H=0.8472,top10E=0.14,eRank=282.6,q75/q25=18.32 vo_prod:H=0.6394,top10E=0.26,eRank=101.3,q75/q25=inf train_time:352710ms step_avg:73.48ms +[2025-09-02 08:40:35] [Rank 0] PRINT: step:4800/10000 val_loss:4.0815 svd_entropy: attn_qk:H=0.7409,top10E=0.27,eRank=161.0,q75/q25=84.79 attn_vo:H=0.7685,top10E=0.18,eRank=241.6,q75/q25=inf mlp_w1:H=0.7567,top10E=0.31,eRank=172.7,q75/q25=11.81 mlp_w2:H=0.8472,top10E=0.14,eRank=282.6,q75/q25=18.32 vo_prod:H=0.6394,top10E=0.26,eRank=101.3,q75/q25=inf train_time:352710ms step_avg:73.48ms +[2025-09-02 08:40:36] [Rank 0] step:4801/10000 train_time:352721ms step_avg:73.47ms +[2025-09-02 08:40:36] [Rank 0] step:4801/10000 train_time:352721ms step_avg:73.47ms +[2025-09-02 08:40:37] [Rank 0] step:4821/10000 train_time:354120ms step_avg:73.45ms +[2025-09-02 08:40:37] [Rank 0] step:4821/10000 train_time:354120ms step_avg:73.45ms +[2025-09-02 08:40:39] [Rank 0] step:4841/10000 train_time:355648ms step_avg:73.47ms +[2025-09-02 08:40:39] [Rank 0] step:4841/10000 train_time:355648ms step_avg:73.47ms +[2025-09-02 08:40:40] [Rank 0] step:4861/10000 train_time:357181ms step_avg:73.48ms +[2025-09-02 08:40:40] [Rank 0] step:4861/10000 train_time:357181ms step_avg:73.48ms +[2025-09-02 08:40:42] [Rank 0] step:4881/10000 train_time:358710ms step_avg:73.49ms +[2025-09-02 08:40:42] [Rank 0] step:4881/10000 train_time:358710ms step_avg:73.49ms +[2025-09-02 08:40:43] [Rank 0] step:4901/10000 train_time:360238ms step_avg:73.50ms +[2025-09-02 08:40:43] [Rank 0] step:4901/10000 train_time:360238ms step_avg:73.50ms +[2025-09-02 08:40:45] [Rank 0] step:4921/10000 train_time:361771ms step_avg:73.52ms +[2025-09-02 08:40:45] [Rank 0] step:4921/10000 train_time:361771ms step_avg:73.52ms +[2025-09-02 08:40:46] [Rank 0] step:4941/10000 train_time:363305ms step_avg:73.53ms +[2025-09-02 08:40:46] [Rank 0] step:4941/10000 train_time:363305ms step_avg:73.53ms +[2025-09-02 08:40:48] [Rank 0] step:4961/10000 train_time:364835ms step_avg:73.54ms +[2025-09-02 08:40:48] [Rank 0] step:4961/10000 train_time:364835ms step_avg:73.54ms +[2025-09-02 08:40:49] [Rank 0] step:4981/10000 train_time:366369ms step_avg:73.55ms +[2025-09-02 08:40:49] [Rank 0] step:4981/10000 train_time:366369ms step_avg:73.55ms +[2025-09-02 08:40:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:40:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:41:02] [Rank 0] PRINT: step:5000/10000 val_loss:4.0611 svd_entropy: attn_qk:H=0.7435,top10E=0.27,eRank=163.0,q75/q25=86.07 attn_vo:H=0.7710,top10E=0.17,eRank=244.5,q75/q25=inf mlp_w1:H=0.7611,top10E=0.30,eRank=177.2,q75/q25=12.17 mlp_w2:H=0.8488,top10E=0.14,eRank=285.6,q75/q25=18.70 vo_prod:H=0.6424,top10E=0.25,eRank=103.4,q75/q25=inf train_time:368056ms step_avg:73.61ms +[2025-09-02 08:41:02] [Rank 0] PRINT: step:5000/10000 val_loss:4.0611 svd_entropy: attn_qk:H=0.7435,top10E=0.27,eRank=163.0,q75/q25=86.07 attn_vo:H=0.7710,top10E=0.17,eRank=244.5,q75/q25=inf mlp_w1:H=0.7611,top10E=0.30,eRank=177.2,q75/q25=12.17 mlp_w2:H=0.8488,top10E=0.14,eRank=285.6,q75/q25=18.70 vo_prod:H=0.6424,top10E=0.25,eRank=103.4,q75/q25=inf train_time:368056ms step_avg:73.61ms +[2025-09-02 08:41:03] [Rank 0] step:5001/10000 train_time:368067ms step_avg:73.60ms +[2025-09-02 08:41:03] [Rank 0] step:5001/10000 train_time:368067ms step_avg:73.60ms +[2025-09-02 08:41:04] [Rank 0] step:5021/10000 train_time:369470ms step_avg:73.58ms +[2025-09-02 08:41:04] [Rank 0] step:5021/10000 train_time:369470ms step_avg:73.58ms +[2025-09-02 08:41:06] [Rank 0] step:5041/10000 train_time:371001ms step_avg:73.60ms +[2025-09-02 08:41:06] [Rank 0] step:5041/10000 train_time:371001ms step_avg:73.60ms +[2025-09-02 08:41:07] [Rank 0] step:5061/10000 train_time:372530ms step_avg:73.61ms +[2025-09-02 08:41:07] [Rank 0] step:5061/10000 train_time:372530ms step_avg:73.61ms +[2025-09-02 08:41:09] [Rank 0] step:5081/10000 train_time:374060ms step_avg:73.62ms +[2025-09-02 08:41:09] [Rank 0] step:5081/10000 train_time:374060ms step_avg:73.62ms +[2025-09-02 08:41:10] [Rank 0] step:5101/10000 train_time:375591ms step_avg:73.63ms +[2025-09-02 08:41:10] [Rank 0] step:5101/10000 train_time:375591ms step_avg:73.63ms +[2025-09-02 08:41:12] [Rank 0] step:5121/10000 train_time:377124ms step_avg:73.64ms +[2025-09-02 08:41:12] [Rank 0] step:5121/10000 train_time:377124ms step_avg:73.64ms +[2025-09-02 08:41:13] [Rank 0] step:5141/10000 train_time:378659ms step_avg:73.65ms +[2025-09-02 08:41:13] [Rank 0] step:5141/10000 train_time:378659ms step_avg:73.65ms +[2025-09-02 08:41:15] [Rank 0] step:5161/10000 train_time:380191ms step_avg:73.67ms +[2025-09-02 08:41:15] [Rank 0] step:5161/10000 train_time:380191ms step_avg:73.67ms +[2025-09-02 08:41:16] [Rank 0] step:5181/10000 train_time:381726ms step_avg:73.68ms +[2025-09-02 08:41:16] [Rank 0] step:5181/10000 train_time:381726ms step_avg:73.68ms +[2025-09-02 08:41:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:41:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:41:30] [Rank 0] PRINT: step:5200/10000 val_loss:4.0395 svd_entropy: attn_qk:H=0.7460,top10E=0.27,eRank=165.0,q75/q25=86.27 attn_vo:H=0.7734,top10E=0.17,eRank=247.2,q75/q25=inf mlp_w1:H=0.7653,top10E=0.29,eRank=181.6,q75/q25=12.54 mlp_w2:H=0.8502,top10E=0.14,eRank=288.4,q75/q25=18.90 vo_prod:H=0.6452,top10E=0.25,eRank=105.5,q75/q25=inf train_time:383439ms step_avg:73.74ms +[2025-09-02 08:41:30] [Rank 0] PRINT: step:5200/10000 val_loss:4.0395 svd_entropy: attn_qk:H=0.7460,top10E=0.27,eRank=165.0,q75/q25=86.27 attn_vo:H=0.7734,top10E=0.17,eRank=247.2,q75/q25=inf mlp_w1:H=0.7653,top10E=0.29,eRank=181.6,q75/q25=12.54 mlp_w2:H=0.8502,top10E=0.14,eRank=288.4,q75/q25=18.90 vo_prod:H=0.6452,top10E=0.25,eRank=105.5,q75/q25=inf train_time:383439ms step_avg:73.74ms +[2025-09-02 08:41:30] [Rank 0] step:5201/10000 train_time:383451ms step_avg:73.73ms +[2025-09-02 08:41:30] [Rank 0] step:5201/10000 train_time:383451ms step_avg:73.73ms +[2025-09-02 08:41:31] [Rank 0] step:5221/10000 train_time:384887ms step_avg:73.72ms +[2025-09-02 08:41:31] [Rank 0] step:5221/10000 train_time:384887ms step_avg:73.72ms +[2025-09-02 08:41:33] [Rank 0] step:5241/10000 train_time:386447ms step_avg:73.74ms +[2025-09-02 08:41:33] [Rank 0] step:5241/10000 train_time:386447ms step_avg:73.74ms +[2025-09-02 08:41:35] [Rank 0] step:5261/10000 train_time:388009ms step_avg:73.75ms +[2025-09-02 08:41:35] [Rank 0] step:5261/10000 train_time:388009ms step_avg:73.75ms +[2025-09-02 08:41:36] [Rank 0] step:5281/10000 train_time:389573ms step_avg:73.77ms +[2025-09-02 08:41:36] [Rank 0] step:5281/10000 train_time:389573ms step_avg:73.77ms +[2025-09-02 08:41:38] [Rank 0] step:5301/10000 train_time:391147ms step_avg:73.79ms +[2025-09-02 08:41:38] [Rank 0] step:5301/10000 train_time:391147ms step_avg:73.79ms +[2025-09-02 08:41:39] [Rank 0] step:5321/10000 train_time:392709ms step_avg:73.80ms +[2025-09-02 08:41:39] [Rank 0] step:5321/10000 train_time:392709ms step_avg:73.80ms +[2025-09-02 08:41:41] [Rank 0] step:5341/10000 train_time:394269ms step_avg:73.82ms +[2025-09-02 08:41:41] [Rank 0] step:5341/10000 train_time:394269ms step_avg:73.82ms +[2025-09-02 08:41:42] [Rank 0] step:5361/10000 train_time:395835ms step_avg:73.84ms +[2025-09-02 08:41:42] [Rank 0] step:5361/10000 train_time:395835ms step_avg:73.84ms +[2025-09-02 08:41:44] [Rank 0] step:5381/10000 train_time:397405ms step_avg:73.85ms +[2025-09-02 08:41:44] [Rank 0] step:5381/10000 train_time:397405ms step_avg:73.85ms +[2025-09-02 08:41:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:41:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:41:57] [Rank 0] PRINT: step:5400/10000 val_loss:4.0236 svd_entropy: attn_qk:H=0.7483,top10E=0.26,eRank=166.8,q75/q25=87.06 attn_vo:H=0.7755,top10E=0.17,eRank=249.8,q75/q25=inf mlp_w1:H=0.7691,top10E=0.29,eRank=185.8,q75/q25=12.95 mlp_w2:H=0.8515,top10E=0.14,eRank=290.9,q75/q25=19.24 vo_prod:H=0.6480,top10E=0.25,eRank=107.5,q75/q25=inf train_time:399124ms step_avg:73.91ms +[2025-09-02 08:41:57] [Rank 0] PRINT: step:5400/10000 val_loss:4.0236 svd_entropy: attn_qk:H=0.7483,top10E=0.26,eRank=166.8,q75/q25=87.06 attn_vo:H=0.7755,top10E=0.17,eRank=249.8,q75/q25=inf mlp_w1:H=0.7691,top10E=0.29,eRank=185.8,q75/q25=12.95 mlp_w2:H=0.8515,top10E=0.14,eRank=290.9,q75/q25=19.24 vo_prod:H=0.6480,top10E=0.25,eRank=107.5,q75/q25=inf train_time:399124ms step_avg:73.91ms +[2025-09-02 08:41:57] [Rank 0] step:5401/10000 train_time:399135ms step_avg:73.90ms +[2025-09-02 08:41:57] [Rank 0] step:5401/10000 train_time:399135ms step_avg:73.90ms +[2025-09-02 08:41:59] [Rank 0] step:5421/10000 train_time:400548ms step_avg:73.89ms +[2025-09-02 08:41:59] [Rank 0] step:5421/10000 train_time:400548ms step_avg:73.89ms +[2025-09-02 08:42:00] [Rank 0] step:5441/10000 train_time:402107ms step_avg:73.90ms +[2025-09-02 08:42:00] [Rank 0] step:5441/10000 train_time:402107ms step_avg:73.90ms +[2025-09-02 08:42:02] [Rank 0] step:5461/10000 train_time:403671ms step_avg:73.92ms +[2025-09-02 08:42:02] [Rank 0] step:5461/10000 train_time:403671ms step_avg:73.92ms +[2025-09-02 08:42:03] [Rank 0] step:5481/10000 train_time:405237ms step_avg:73.93ms +[2025-09-02 08:42:03] [Rank 0] step:5481/10000 train_time:405237ms step_avg:73.93ms +[2025-09-02 08:42:05] [Rank 0] step:5501/10000 train_time:406805ms step_avg:73.95ms +[2025-09-02 08:42:05] [Rank 0] step:5501/10000 train_time:406805ms step_avg:73.95ms +[2025-09-02 08:42:07] [Rank 0] step:5521/10000 train_time:408373ms step_avg:73.97ms +[2025-09-02 08:42:07] [Rank 0] step:5521/10000 train_time:408373ms step_avg:73.97ms +[2025-09-02 08:42:08] [Rank 0] step:5541/10000 train_time:409935ms step_avg:73.98ms +[2025-09-02 08:42:08] [Rank 0] step:5541/10000 train_time:409935ms step_avg:73.98ms +[2025-09-02 08:42:10] [Rank 0] step:5561/10000 train_time:411501ms step_avg:74.00ms +[2025-09-02 08:42:10] [Rank 0] step:5561/10000 train_time:411501ms step_avg:74.00ms +[2025-09-02 08:42:11] [Rank 0] step:5581/10000 train_time:413068ms step_avg:74.01ms +[2025-09-02 08:42:11] [Rank 0] step:5581/10000 train_time:413068ms step_avg:74.01ms +[2025-09-02 08:42:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:42:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:42:24] [Rank 0] PRINT: step:5600/10000 val_loss:4.0067 svd_entropy: attn_qk:H=0.7505,top10E=0.26,eRank=168.6,q75/q25=87.76 attn_vo:H=0.7776,top10E=0.16,eRank=252.2,q75/q25=inf mlp_w1:H=0.7726,top10E=0.28,eRank=189.6,q75/q25=13.34 mlp_w2:H=0.8527,top10E=0.14,eRank=293.3,q75/q25=19.51 vo_prod:H=0.6507,top10E=0.24,eRank=109.6,q75/q25=inf train_time:414788ms step_avg:74.07ms +[2025-09-02 08:42:24] [Rank 0] PRINT: step:5600/10000 val_loss:4.0067 svd_entropy: attn_qk:H=0.7505,top10E=0.26,eRank=168.6,q75/q25=87.76 attn_vo:H=0.7776,top10E=0.16,eRank=252.2,q75/q25=inf mlp_w1:H=0.7726,top10E=0.28,eRank=189.6,q75/q25=13.34 mlp_w2:H=0.8527,top10E=0.14,eRank=293.3,q75/q25=19.51 vo_prod:H=0.6507,top10E=0.24,eRank=109.6,q75/q25=inf train_time:414788ms step_avg:74.07ms +[2025-09-02 08:42:24] [Rank 0] step:5601/10000 train_time:414800ms step_avg:74.06ms +[2025-09-02 08:42:24] [Rank 0] step:5601/10000 train_time:414800ms step_avg:74.06ms +[2025-09-02 08:42:26] [Rank 0] step:5621/10000 train_time:416230ms step_avg:74.05ms +[2025-09-02 08:42:26] [Rank 0] step:5621/10000 train_time:416230ms step_avg:74.05ms +[2025-09-02 08:42:28] [Rank 0] step:5641/10000 train_time:417791ms step_avg:74.06ms +[2025-09-02 08:42:28] [Rank 0] step:5641/10000 train_time:417791ms step_avg:74.06ms +[2025-09-02 08:42:29] [Rank 0] step:5661/10000 train_time:419350ms step_avg:74.08ms +[2025-09-02 08:42:29] [Rank 0] step:5661/10000 train_time:419350ms step_avg:74.08ms +[2025-09-02 08:42:31] [Rank 0] step:5681/10000 train_time:420916ms step_avg:74.09ms +[2025-09-02 08:42:31] [Rank 0] step:5681/10000 train_time:420916ms step_avg:74.09ms +[2025-09-02 08:42:32] [Rank 0] step:5701/10000 train_time:422476ms step_avg:74.11ms +[2025-09-02 08:42:32] [Rank 0] step:5701/10000 train_time:422476ms step_avg:74.11ms +[2025-09-02 08:42:34] [Rank 0] step:5721/10000 train_time:424042ms step_avg:74.12ms +[2025-09-02 08:42:34] [Rank 0] step:5721/10000 train_time:424042ms step_avg:74.12ms +[2025-09-02 08:42:35] [Rank 0] step:5741/10000 train_time:425603ms step_avg:74.13ms +[2025-09-02 08:42:35] [Rank 0] step:5741/10000 train_time:425603ms step_avg:74.13ms +[2025-09-02 08:42:37] [Rank 0] step:5761/10000 train_time:427166ms step_avg:74.15ms +[2025-09-02 08:42:37] [Rank 0] step:5761/10000 train_time:427166ms step_avg:74.15ms +[2025-09-02 08:42:39] [Rank 0] step:5781/10000 train_time:428729ms step_avg:74.16ms +[2025-09-02 08:42:39] [Rank 0] step:5781/10000 train_time:428729ms step_avg:74.16ms +[2025-09-02 08:42:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:42:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:42:52] [Rank 0] PRINT: step:5800/10000 val_loss:3.9975 svd_entropy: attn_qk:H=0.7526,top10E=0.26,eRank=170.4,q75/q25=88.55 attn_vo:H=0.7796,top10E=0.16,eRank=254.6,q75/q25=inf mlp_w1:H=0.7759,top10E=0.28,eRank=193.4,q75/q25=13.77 mlp_w2:H=0.8539,top10E=0.14,eRank=295.6,q75/q25=19.75 vo_prod:H=0.6532,top10E=0.24,eRank=111.5,q75/q25=inf train_time:430452ms step_avg:74.22ms +[2025-09-02 08:42:52] [Rank 0] PRINT: step:5800/10000 val_loss:3.9975 svd_entropy: attn_qk:H=0.7526,top10E=0.26,eRank=170.4,q75/q25=88.55 attn_vo:H=0.7796,top10E=0.16,eRank=254.6,q75/q25=inf mlp_w1:H=0.7759,top10E=0.28,eRank=193.4,q75/q25=13.77 mlp_w2:H=0.8539,top10E=0.14,eRank=295.6,q75/q25=19.75 vo_prod:H=0.6532,top10E=0.24,eRank=111.5,q75/q25=inf train_time:430452ms step_avg:74.22ms +[2025-09-02 08:42:52] [Rank 0] step:5801/10000 train_time:430463ms step_avg:74.20ms +[2025-09-02 08:42:52] [Rank 0] step:5801/10000 train_time:430463ms step_avg:74.20ms +[2025-09-02 08:42:53] [Rank 0] step:5821/10000 train_time:431885ms step_avg:74.19ms +[2025-09-02 08:42:53] [Rank 0] step:5821/10000 train_time:431885ms step_avg:74.19ms +[2025-09-02 08:42:55] [Rank 0] step:5841/10000 train_time:433445ms step_avg:74.21ms +[2025-09-02 08:42:55] [Rank 0] step:5841/10000 train_time:433445ms step_avg:74.21ms +[2025-09-02 08:42:56] [Rank 0] step:5861/10000 train_time:435012ms step_avg:74.22ms +[2025-09-02 08:42:56] [Rank 0] step:5861/10000 train_time:435012ms step_avg:74.22ms +[2025-09-02 08:42:58] [Rank 0] step:5881/10000 train_time:436579ms step_avg:74.24ms +[2025-09-02 08:42:58] [Rank 0] step:5881/10000 train_time:436579ms step_avg:74.24ms +[2025-09-02 08:43:00] [Rank 0] step:5901/10000 train_time:438142ms step_avg:74.25ms +[2025-09-02 08:43:00] [Rank 0] step:5901/10000 train_time:438142ms step_avg:74.25ms +[2025-09-02 08:43:01] [Rank 0] step:5921/10000 train_time:439707ms step_avg:74.26ms +[2025-09-02 08:43:01] [Rank 0] step:5921/10000 train_time:439707ms step_avg:74.26ms +[2025-09-02 08:43:03] [Rank 0] step:5941/10000 train_time:441275ms step_avg:74.28ms +[2025-09-02 08:43:03] [Rank 0] step:5941/10000 train_time:441275ms step_avg:74.28ms +[2025-09-02 08:43:04] [Rank 0] step:5961/10000 train_time:442845ms step_avg:74.29ms +[2025-09-02 08:43:04] [Rank 0] step:5961/10000 train_time:442845ms step_avg:74.29ms +[2025-09-02 08:43:06] [Rank 0] step:5981/10000 train_time:444413ms step_avg:74.30ms +[2025-09-02 08:43:06] [Rank 0] step:5981/10000 train_time:444413ms step_avg:74.30ms +[2025-09-02 08:43:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:43:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:43:19] [Rank 0] PRINT: step:6000/10000 val_loss:3.9728 svd_entropy: attn_qk:H=0.7546,top10E=0.26,eRank=172.2,q75/q25=89.14 attn_vo:H=0.7814,top10E=0.16,eRank=256.9,q75/q25=inf mlp_w1:H=0.7791,top10E=0.27,eRank=197.1,q75/q25=14.12 mlp_w2:H=0.8550,top10E=0.13,eRank=297.9,q75/q25=19.97 vo_prod:H=0.6555,top10E=0.24,eRank=113.3,q75/q25=inf train_time:446135ms step_avg:74.36ms +[2025-09-02 08:43:19] [Rank 0] PRINT: step:6000/10000 val_loss:3.9728 svd_entropy: attn_qk:H=0.7546,top10E=0.26,eRank=172.2,q75/q25=89.14 attn_vo:H=0.7814,top10E=0.16,eRank=256.9,q75/q25=inf mlp_w1:H=0.7791,top10E=0.27,eRank=197.1,q75/q25=14.12 mlp_w2:H=0.8550,top10E=0.13,eRank=297.9,q75/q25=19.97 vo_prod:H=0.6555,top10E=0.24,eRank=113.3,q75/q25=inf train_time:446135ms step_avg:74.36ms +[2025-09-02 08:43:19] [Rank 0] step:6001/10000 train_time:446146ms step_avg:74.35ms +[2025-09-02 08:43:19] [Rank 0] step:6001/10000 train_time:446146ms step_avg:74.35ms +[2025-09-02 08:43:21] [Rank 0] step:6021/10000 train_time:447560ms step_avg:74.33ms +[2025-09-02 08:43:21] [Rank 0] step:6021/10000 train_time:447560ms step_avg:74.33ms +[2025-09-02 08:43:22] [Rank 0] step:6041/10000 train_time:449129ms step_avg:74.35ms +[2025-09-02 08:43:22] [Rank 0] step:6041/10000 train_time:449129ms step_avg:74.35ms +[2025-09-02 08:43:24] [Rank 0] step:6061/10000 train_time:450702ms step_avg:74.36ms +[2025-09-02 08:43:24] [Rank 0] step:6061/10000 train_time:450702ms step_avg:74.36ms +[2025-09-02 08:43:25] [Rank 0] step:6081/10000 train_time:452271ms step_avg:74.37ms +[2025-09-02 08:43:25] [Rank 0] step:6081/10000 train_time:452271ms step_avg:74.37ms +[2025-09-02 08:43:27] [Rank 0] step:6101/10000 train_time:453841ms step_avg:74.39ms +[2025-09-02 08:43:27] [Rank 0] step:6101/10000 train_time:453841ms step_avg:74.39ms +[2025-09-02 08:43:29] [Rank 0] step:6121/10000 train_time:455672ms step_avg:74.44ms +[2025-09-02 08:43:29] [Rank 0] step:6121/10000 train_time:455672ms step_avg:74.44ms +[2025-09-02 08:43:30] [Rank 0] step:6141/10000 train_time:457251ms step_avg:74.46ms +[2025-09-02 08:43:30] [Rank 0] step:6141/10000 train_time:457251ms step_avg:74.46ms +[2025-09-02 08:43:32] [Rank 0] step:6161/10000 train_time:458821ms step_avg:74.47ms +[2025-09-02 08:43:32] [Rank 0] step:6161/10000 train_time:458821ms step_avg:74.47ms +[2025-09-02 08:43:33] [Rank 0] step:6181/10000 train_time:460390ms step_avg:74.48ms +[2025-09-02 08:43:33] [Rank 0] step:6181/10000 train_time:460390ms step_avg:74.48ms +[2025-09-02 08:43:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:43:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:43:46] [Rank 0] PRINT: step:6200/10000 val_loss:3.9576 svd_entropy: attn_qk:H=0.7565,top10E=0.25,eRank=173.8,q75/q25=88.92 attn_vo:H=0.7832,top10E=0.16,eRank=259.1,q75/q25=inf mlp_w1:H=0.7820,top10E=0.27,eRank=200.5,q75/q25=14.55 mlp_w2:H=0.8560,top10E=0.13,eRank=299.9,q75/q25=20.25 vo_prod:H=0.6577,top10E=0.23,eRank=115.2,q75/q25=inf train_time:462118ms step_avg:74.54ms +[2025-09-02 08:43:46] [Rank 0] PRINT: step:6200/10000 val_loss:3.9576 svd_entropy: attn_qk:H=0.7565,top10E=0.25,eRank=173.8,q75/q25=88.92 attn_vo:H=0.7832,top10E=0.16,eRank=259.1,q75/q25=inf mlp_w1:H=0.7820,top10E=0.27,eRank=200.5,q75/q25=14.55 mlp_w2:H=0.8560,top10E=0.13,eRank=299.9,q75/q25=20.25 vo_prod:H=0.6577,top10E=0.23,eRank=115.2,q75/q25=inf train_time:462118ms step_avg:74.54ms +[2025-09-02 08:43:47] [Rank 0] step:6201/10000 train_time:462131ms step_avg:74.53ms +[2025-09-02 08:43:47] [Rank 0] step:6201/10000 train_time:462131ms step_avg:74.53ms +[2025-09-02 08:43:48] [Rank 0] step:6221/10000 train_time:463547ms step_avg:74.51ms +[2025-09-02 08:43:48] [Rank 0] step:6221/10000 train_time:463547ms step_avg:74.51ms +[2025-09-02 08:43:50] [Rank 0] step:6241/10000 train_time:465112ms step_avg:74.53ms +[2025-09-02 08:43:50] [Rank 0] step:6241/10000 train_time:465112ms step_avg:74.53ms +[2025-09-02 08:43:51] [Rank 0] step:6261/10000 train_time:466681ms step_avg:74.54ms +[2025-09-02 08:43:51] [Rank 0] step:6261/10000 train_time:466681ms step_avg:74.54ms +[2025-09-02 08:43:53] [Rank 0] step:6281/10000 train_time:468308ms step_avg:74.56ms +[2025-09-02 08:43:53] [Rank 0] step:6281/10000 train_time:468308ms step_avg:74.56ms +[2025-09-02 08:43:54] [Rank 0] step:6301/10000 train_time:469877ms step_avg:74.57ms +[2025-09-02 08:43:54] [Rank 0] step:6301/10000 train_time:469877ms step_avg:74.57ms +[2025-09-02 08:43:56] [Rank 0] step:6321/10000 train_time:471444ms step_avg:74.58ms +[2025-09-02 08:43:56] [Rank 0] step:6321/10000 train_time:471444ms step_avg:74.58ms +[2025-09-02 08:43:58] [Rank 0] step:6341/10000 train_time:473015ms step_avg:74.60ms +[2025-09-02 08:43:58] [Rank 0] step:6341/10000 train_time:473015ms step_avg:74.60ms +[2025-09-02 08:43:59] [Rank 0] step:6361/10000 train_time:474587ms step_avg:74.61ms +[2025-09-02 08:43:59] [Rank 0] step:6361/10000 train_time:474587ms step_avg:74.61ms +[2025-09-02 08:44:01] [Rank 0] step:6381/10000 train_time:476159ms step_avg:74.62ms +[2025-09-02 08:44:01] [Rank 0] step:6381/10000 train_time:476159ms step_avg:74.62ms +[2025-09-02 08:44:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:44:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:44:14] [Rank 0] PRINT: step:6400/10000 val_loss:3.9417 svd_entropy: attn_qk:H=0.7583,top10E=0.25,eRank=175.4,q75/q25=89.51 attn_vo:H=0.7848,top10E=0.16,eRank=261.1,q75/q25=inf mlp_w1:H=0.7846,top10E=0.27,eRank=203.5,q75/q25=14.93 mlp_w2:H=0.8570,top10E=0.13,eRank=302.0,q75/q25=20.38 vo_prod:H=0.6597,top10E=0.23,eRank=116.7,q75/q25=inf train_time:477885ms step_avg:74.67ms +[2025-09-02 08:44:14] [Rank 0] PRINT: step:6400/10000 val_loss:3.9417 svd_entropy: attn_qk:H=0.7583,top10E=0.25,eRank=175.4,q75/q25=89.51 attn_vo:H=0.7848,top10E=0.16,eRank=261.1,q75/q25=inf mlp_w1:H=0.7846,top10E=0.27,eRank=203.5,q75/q25=14.93 mlp_w2:H=0.8570,top10E=0.13,eRank=302.0,q75/q25=20.38 vo_prod:H=0.6597,top10E=0.23,eRank=116.7,q75/q25=inf train_time:477885ms step_avg:74.67ms +[2025-09-02 08:44:14] [Rank 0] step:6401/10000 train_time:477897ms step_avg:74.66ms +[2025-09-02 08:44:14] [Rank 0] step:6401/10000 train_time:477897ms step_avg:74.66ms +[2025-09-02 08:44:16] [Rank 0] step:6421/10000 train_time:479312ms step_avg:74.65ms +[2025-09-02 08:44:16] [Rank 0] step:6421/10000 train_time:479312ms step_avg:74.65ms +[2025-09-02 08:44:17] [Rank 0] step:6441/10000 train_time:480880ms step_avg:74.66ms +[2025-09-02 08:44:17] [Rank 0] step:6441/10000 train_time:480880ms step_avg:74.66ms +[2025-09-02 08:44:19] [Rank 0] step:6461/10000 train_time:482448ms step_avg:74.67ms +[2025-09-02 08:44:19] [Rank 0] step:6461/10000 train_time:482448ms step_avg:74.67ms +[2025-09-02 08:44:20] [Rank 0] step:6481/10000 train_time:484027ms step_avg:74.68ms +[2025-09-02 08:44:20] [Rank 0] step:6481/10000 train_time:484027ms step_avg:74.68ms +[2025-09-02 08:44:22] [Rank 0] step:6501/10000 train_time:485593ms step_avg:74.70ms +[2025-09-02 08:44:22] [Rank 0] step:6501/10000 train_time:485593ms step_avg:74.70ms +[2025-09-02 08:44:23] [Rank 0] step:6521/10000 train_time:487156ms step_avg:74.71ms +[2025-09-02 08:44:23] [Rank 0] step:6521/10000 train_time:487156ms step_avg:74.71ms +[2025-09-02 08:44:25] [Rank 0] step:6541/10000 train_time:488726ms step_avg:74.72ms +[2025-09-02 08:44:25] [Rank 0] step:6541/10000 train_time:488726ms step_avg:74.72ms +[2025-09-02 08:44:27] [Rank 0] step:6561/10000 train_time:490298ms step_avg:74.73ms +[2025-09-02 08:44:27] [Rank 0] step:6561/10000 train_time:490298ms step_avg:74.73ms +[2025-09-02 08:44:28] [Rank 0] step:6581/10000 train_time:491867ms step_avg:74.74ms +[2025-09-02 08:44:28] [Rank 0] step:6581/10000 train_time:491867ms step_avg:74.74ms +[2025-09-02 08:44:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:44:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:44:41] [Rank 0] PRINT: step:6600/10000 val_loss:3.9321 svd_entropy: attn_qk:H=0.7599,top10E=0.25,eRank=176.9,q75/q25=89.57 attn_vo:H=0.7863,top10E=0.15,eRank=262.9,q75/q25=inf mlp_w1:H=0.7869,top10E=0.26,eRank=206.3,q75/q25=15.21 mlp_w2:H=0.8579,top10E=0.13,eRank=303.8,q75/q25=20.51 vo_prod:H=0.6616,top10E=0.23,eRank=118.3,q75/q25=inf train_time:493592ms step_avg:74.79ms +[2025-09-02 08:44:41] [Rank 0] PRINT: step:6600/10000 val_loss:3.9321 svd_entropy: attn_qk:H=0.7599,top10E=0.25,eRank=176.9,q75/q25=89.57 attn_vo:H=0.7863,top10E=0.15,eRank=262.9,q75/q25=inf mlp_w1:H=0.7869,top10E=0.26,eRank=206.3,q75/q25=15.21 mlp_w2:H=0.8579,top10E=0.13,eRank=303.8,q75/q25=20.51 vo_prod:H=0.6616,top10E=0.23,eRank=118.3,q75/q25=inf train_time:493592ms step_avg:74.79ms +[2025-09-02 08:44:41] [Rank 0] step:6601/10000 train_time:493604ms step_avg:74.78ms +[2025-09-02 08:44:41] [Rank 0] step:6601/10000 train_time:493604ms step_avg:74.78ms +[2025-09-02 08:44:43] [Rank 0] step:6621/10000 train_time:495026ms step_avg:74.77ms +[2025-09-02 08:44:43] [Rank 0] step:6621/10000 train_time:495026ms step_avg:74.77ms +[2025-09-02 08:44:45] [Rank 0] step:6641/10000 train_time:496598ms step_avg:74.78ms +[2025-09-02 08:44:45] [Rank 0] step:6641/10000 train_time:496598ms step_avg:74.78ms +[2025-09-02 08:44:46] [Rank 0] step:6661/10000 train_time:498166ms step_avg:74.79ms +[2025-09-02 08:44:46] [Rank 0] step:6661/10000 train_time:498166ms step_avg:74.79ms +[2025-09-02 08:44:48] [Rank 0] step:6681/10000 train_time:499748ms step_avg:74.80ms +[2025-09-02 08:44:48] [Rank 0] step:6681/10000 train_time:499748ms step_avg:74.80ms +[2025-09-02 08:44:49] [Rank 0] step:6701/10000 train_time:501351ms step_avg:74.82ms +[2025-09-02 08:44:49] [Rank 0] step:6701/10000 train_time:501351ms step_avg:74.82ms +[2025-09-02 08:44:51] [Rank 0] step:6721/10000 train_time:502949ms step_avg:74.83ms +[2025-09-02 08:44:51] [Rank 0] step:6721/10000 train_time:502949ms step_avg:74.83ms +[2025-09-02 08:44:53] [Rank 0] step:6741/10000 train_time:504542ms step_avg:74.85ms +[2025-09-02 08:44:53] [Rank 0] step:6741/10000 train_time:504542ms step_avg:74.85ms +[2025-09-02 08:44:54] [Rank 0] step:6761/10000 train_time:506139ms step_avg:74.86ms +[2025-09-02 08:44:54] [Rank 0] step:6761/10000 train_time:506139ms step_avg:74.86ms +[2025-09-02 08:44:56] [Rank 0] step:6781/10000 train_time:507741ms step_avg:74.88ms +[2025-09-02 08:44:56] [Rank 0] step:6781/10000 train_time:507741ms step_avg:74.88ms +[2025-09-02 08:44:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:44:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:45:09] [Rank 0] PRINT: step:6800/10000 val_loss:3.9145 svd_entropy: attn_qk:H=0.7613,top10E=0.25,eRank=178.1,q75/q25=89.78 attn_vo:H=0.7876,top10E=0.15,eRank=264.7,q75/q25=inf mlp_w1:H=0.7891,top10E=0.26,eRank=209.0,q75/q25=15.44 mlp_w2:H=0.8589,top10E=0.13,eRank=305.7,q75/q25=20.64 vo_prod:H=0.6635,top10E=0.23,eRank=119.9,q75/q25=inf train_time:509501ms step_avg:74.93ms +[2025-09-02 08:45:09] [Rank 0] PRINT: step:6800/10000 val_loss:3.9145 svd_entropy: attn_qk:H=0.7613,top10E=0.25,eRank=178.1,q75/q25=89.78 attn_vo:H=0.7876,top10E=0.15,eRank=264.7,q75/q25=inf mlp_w1:H=0.7891,top10E=0.26,eRank=209.0,q75/q25=15.44 mlp_w2:H=0.8589,top10E=0.13,eRank=305.7,q75/q25=20.64 vo_prod:H=0.6635,top10E=0.23,eRank=119.9,q75/q25=inf train_time:509501ms step_avg:74.93ms +[2025-09-02 08:45:09] [Rank 0] step:6801/10000 train_time:509513ms step_avg:74.92ms +[2025-09-02 08:45:09] [Rank 0] step:6801/10000 train_time:509513ms step_avg:74.92ms +[2025-09-02 08:45:11] [Rank 0] step:6821/10000 train_time:510969ms step_avg:74.91ms +[2025-09-02 08:45:11] [Rank 0] step:6821/10000 train_time:510969ms step_avg:74.91ms +[2025-09-02 08:45:12] [Rank 0] step:6841/10000 train_time:512560ms step_avg:74.92ms +[2025-09-02 08:45:12] [Rank 0] step:6841/10000 train_time:512560ms step_avg:74.92ms +[2025-09-02 08:45:14] [Rank 0] step:6861/10000 train_time:514159ms step_avg:74.94ms +[2025-09-02 08:45:14] [Rank 0] step:6861/10000 train_time:514159ms step_avg:74.94ms +[2025-09-02 08:45:15] [Rank 0] step:6881/10000 train_time:515755ms step_avg:74.95ms +[2025-09-02 08:45:15] [Rank 0] step:6881/10000 train_time:515755ms step_avg:74.95ms +[2025-09-02 08:45:17] [Rank 0] step:6901/10000 train_time:517352ms step_avg:74.97ms +[2025-09-02 08:45:17] [Rank 0] step:6901/10000 train_time:517352ms step_avg:74.97ms +[2025-09-02 08:45:19] [Rank 0] step:6921/10000 train_time:518948ms step_avg:74.98ms +[2025-09-02 08:45:19] [Rank 0] step:6921/10000 train_time:518948ms step_avg:74.98ms +[2025-09-02 08:45:20] [Rank 0] step:6941/10000 train_time:520553ms step_avg:75.00ms +[2025-09-02 08:45:20] [Rank 0] step:6941/10000 train_time:520553ms step_avg:75.00ms +[2025-09-02 08:45:22] [Rank 0] step:6961/10000 train_time:522168ms step_avg:75.01ms +[2025-09-02 08:45:22] [Rank 0] step:6961/10000 train_time:522168ms step_avg:75.01ms +[2025-09-02 08:45:23] [Rank 0] step:6981/10000 train_time:523770ms step_avg:75.03ms +[2025-09-02 08:45:23] [Rank 0] step:6981/10000 train_time:523770ms step_avg:75.03ms +[2025-09-02 08:45:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:45:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:45:37] [Rank 0] PRINT: step:7000/10000 val_loss:3.8993 svd_entropy: attn_qk:H=0.7626,top10E=0.25,eRank=179.3,q75/q25=89.81 attn_vo:H=0.7888,top10E=0.15,eRank=266.2,q75/q25=inf mlp_w1:H=0.7910,top10E=0.26,eRank=211.2,q75/q25=15.70 mlp_w2:H=0.8597,top10E=0.13,eRank=307.5,q75/q25=20.73 vo_prod:H=0.6651,top10E=0.23,eRank=121.3,q75/q25=inf train_time:525535ms step_avg:75.08ms +[2025-09-02 08:45:37] [Rank 0] PRINT: step:7000/10000 val_loss:3.8993 svd_entropy: attn_qk:H=0.7626,top10E=0.25,eRank=179.3,q75/q25=89.81 attn_vo:H=0.7888,top10E=0.15,eRank=266.2,q75/q25=inf mlp_w1:H=0.7910,top10E=0.26,eRank=211.2,q75/q25=15.70 mlp_w2:H=0.8597,top10E=0.13,eRank=307.5,q75/q25=20.73 vo_prod:H=0.6651,top10E=0.23,eRank=121.3,q75/q25=inf train_time:525535ms step_avg:75.08ms +[2025-09-02 08:45:37] [Rank 0] step:7001/10000 train_time:525547ms step_avg:75.07ms +[2025-09-02 08:45:37] [Rank 0] step:7001/10000 train_time:525547ms step_avg:75.07ms +[2025-09-02 08:45:38] [Rank 0] step:7021/10000 train_time:526993ms step_avg:75.06ms +[2025-09-02 08:45:38] [Rank 0] step:7021/10000 train_time:526993ms step_avg:75.06ms +[2025-09-02 08:45:40] [Rank 0] step:7041/10000 train_time:528592ms step_avg:75.07ms +[2025-09-02 08:45:40] [Rank 0] step:7041/10000 train_time:528592ms step_avg:75.07ms +[2025-09-02 08:45:42] [Rank 0] step:7061/10000 train_time:530187ms step_avg:75.09ms +[2025-09-02 08:45:42] [Rank 0] step:7061/10000 train_time:530187ms step_avg:75.09ms +[2025-09-02 08:45:43] [Rank 0] step:7081/10000 train_time:531786ms step_avg:75.10ms +[2025-09-02 08:45:43] [Rank 0] step:7081/10000 train_time:531786ms step_avg:75.10ms +[2025-09-02 08:45:45] [Rank 0] step:7101/10000 train_time:533385ms step_avg:75.11ms +[2025-09-02 08:45:45] [Rank 0] step:7101/10000 train_time:533385ms step_avg:75.11ms +[2025-09-02 08:45:46] [Rank 0] step:7121/10000 train_time:534984ms step_avg:75.13ms +[2025-09-02 08:45:46] [Rank 0] step:7121/10000 train_time:534984ms step_avg:75.13ms +[2025-09-02 08:45:48] [Rank 0] step:7141/10000 train_time:536585ms step_avg:75.14ms +[2025-09-02 08:45:48] [Rank 0] step:7141/10000 train_time:536585ms step_avg:75.14ms +[2025-09-02 08:45:50] [Rank 0] step:7161/10000 train_time:538185ms step_avg:75.15ms +[2025-09-02 08:45:50] [Rank 0] step:7161/10000 train_time:538185ms step_avg:75.15ms +[2025-09-02 08:45:51] [Rank 0] step:7181/10000 train_time:539785ms step_avg:75.17ms +[2025-09-02 08:45:51] [Rank 0] step:7181/10000 train_time:539785ms step_avg:75.17ms +[2025-09-02 08:45:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:45:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:46:04] [Rank 0] PRINT: step:7200/10000 val_loss:3.8903 svd_entropy: attn_qk:H=0.7637,top10E=0.24,eRank=180.4,q75/q25=89.60 attn_vo:H=0.7899,top10E=0.15,eRank=267.7,q75/q25=inf mlp_w1:H=0.7927,top10E=0.25,eRank=213.4,q75/q25=15.89 mlp_w2:H=0.8605,top10E=0.13,eRank=309.1,q75/q25=20.85 vo_prod:H=0.6667,top10E=0.22,eRank=122.6,q75/q25=inf train_time:541551ms step_avg:75.22ms +[2025-09-02 08:46:04] [Rank 0] PRINT: step:7200/10000 val_loss:3.8903 svd_entropy: attn_qk:H=0.7637,top10E=0.24,eRank=180.4,q75/q25=89.60 attn_vo:H=0.7899,top10E=0.15,eRank=267.7,q75/q25=inf mlp_w1:H=0.7927,top10E=0.25,eRank=213.4,q75/q25=15.89 mlp_w2:H=0.8605,top10E=0.13,eRank=309.1,q75/q25=20.85 vo_prod:H=0.6667,top10E=0.22,eRank=122.6,q75/q25=inf train_time:541551ms step_avg:75.22ms +[2025-09-02 08:46:04] [Rank 0] step:7201/10000 train_time:541564ms step_avg:75.21ms +[2025-09-02 08:46:04] [Rank 0] step:7201/10000 train_time:541564ms step_avg:75.21ms +[2025-09-02 08:46:06] [Rank 0] step:7221/10000 train_time:543010ms step_avg:75.20ms +[2025-09-02 08:46:06] [Rank 0] step:7221/10000 train_time:543010ms step_avg:75.20ms +[2025-09-02 08:46:08] [Rank 0] step:7241/10000 train_time:544603ms step_avg:75.21ms +[2025-09-02 08:46:08] [Rank 0] step:7241/10000 train_time:544603ms step_avg:75.21ms +[2025-09-02 08:46:09] [Rank 0] step:7261/10000 train_time:546196ms step_avg:75.22ms +[2025-09-02 08:46:09] [Rank 0] step:7261/10000 train_time:546196ms step_avg:75.22ms +[2025-09-02 08:46:11] [Rank 0] step:7281/10000 train_time:547801ms step_avg:75.24ms +[2025-09-02 08:46:11] [Rank 0] step:7281/10000 train_time:547801ms step_avg:75.24ms +[2025-09-02 08:46:12] [Rank 0] step:7301/10000 train_time:549397ms step_avg:75.25ms +[2025-09-02 08:46:12] [Rank 0] step:7301/10000 train_time:549397ms step_avg:75.25ms +[2025-09-02 08:46:14] [Rank 0] step:7321/10000 train_time:551010ms step_avg:75.26ms +[2025-09-02 08:46:14] [Rank 0] step:7321/10000 train_time:551010ms step_avg:75.26ms +[2025-09-02 08:46:16] [Rank 0] step:7341/10000 train_time:552607ms step_avg:75.28ms +[2025-09-02 08:46:16] [Rank 0] step:7341/10000 train_time:552607ms step_avg:75.28ms +[2025-09-02 08:46:17] [Rank 0] step:7361/10000 train_time:554209ms step_avg:75.29ms +[2025-09-02 08:46:17] [Rank 0] step:7361/10000 train_time:554209ms step_avg:75.29ms +[2025-09-02 08:46:19] [Rank 0] step:7381/10000 train_time:555815ms step_avg:75.30ms +[2025-09-02 08:46:19] [Rank 0] step:7381/10000 train_time:555815ms step_avg:75.30ms +[2025-09-02 08:46:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:46:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:46:32] [Rank 0] PRINT: step:7400/10000 val_loss:3.8697 svd_entropy: attn_qk:H=0.7648,top10E=0.24,eRank=181.4,q75/q25=89.74 attn_vo:H=0.7909,top10E=0.15,eRank=268.9,q75/q25=inf mlp_w1:H=0.7942,top10E=0.25,eRank=215.3,q75/q25=16.12 mlp_w2:H=0.8612,top10E=0.13,eRank=310.5,q75/q25=20.90 vo_prod:H=0.6680,top10E=0.22,eRank=123.8,q75/q25=inf train_time:557562ms step_avg:75.35ms +[2025-09-02 08:46:32] [Rank 0] PRINT: step:7400/10000 val_loss:3.8697 svd_entropy: attn_qk:H=0.7648,top10E=0.24,eRank=181.4,q75/q25=89.74 attn_vo:H=0.7909,top10E=0.15,eRank=268.9,q75/q25=inf mlp_w1:H=0.7942,top10E=0.25,eRank=215.3,q75/q25=16.12 mlp_w2:H=0.8612,top10E=0.13,eRank=310.5,q75/q25=20.90 vo_prod:H=0.6680,top10E=0.22,eRank=123.8,q75/q25=inf train_time:557562ms step_avg:75.35ms +[2025-09-02 08:46:32] [Rank 0] step:7401/10000 train_time:557574ms step_avg:75.34ms +[2025-09-02 08:46:32] [Rank 0] step:7401/10000 train_time:557574ms step_avg:75.34ms +[2025-09-02 08:46:34] [Rank 0] step:7421/10000 train_time:559040ms step_avg:75.33ms +[2025-09-02 08:46:34] [Rank 0] step:7421/10000 train_time:559040ms step_avg:75.33ms +[2025-09-02 08:46:35] [Rank 0] step:7441/10000 train_time:560637ms step_avg:75.34ms +[2025-09-02 08:46:35] [Rank 0] step:7441/10000 train_time:560637ms step_avg:75.34ms +[2025-09-02 08:46:37] [Rank 0] step:7461/10000 train_time:562237ms step_avg:75.36ms +[2025-09-02 08:46:37] [Rank 0] step:7461/10000 train_time:562237ms step_avg:75.36ms +[2025-09-02 08:46:39] [Rank 0] step:7481/10000 train_time:563841ms step_avg:75.37ms +[2025-09-02 08:46:39] [Rank 0] step:7481/10000 train_time:563841ms step_avg:75.37ms +[2025-09-02 08:46:40] [Rank 0] step:7501/10000 train_time:565444ms step_avg:75.38ms +[2025-09-02 08:46:40] [Rank 0] step:7501/10000 train_time:565444ms step_avg:75.38ms +[2025-09-02 08:46:42] [Rank 0] step:7521/10000 train_time:567051ms step_avg:75.40ms +[2025-09-02 08:46:42] [Rank 0] step:7521/10000 train_time:567051ms step_avg:75.40ms +[2025-09-02 08:46:43] [Rank 0] step:7541/10000 train_time:568665ms step_avg:75.41ms +[2025-09-02 08:46:43] [Rank 0] step:7541/10000 train_time:568665ms step_avg:75.41ms +[2025-09-02 08:46:45] [Rank 0] step:7561/10000 train_time:570257ms step_avg:75.42ms +[2025-09-02 08:46:45] [Rank 0] step:7561/10000 train_time:570257ms step_avg:75.42ms +[2025-09-02 08:46:47] [Rank 0] step:7581/10000 train_time:571868ms step_avg:75.43ms +[2025-09-02 08:46:47] [Rank 0] step:7581/10000 train_time:571868ms step_avg:75.43ms +[2025-09-02 08:46:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:46:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:47:00] [Rank 0] PRINT: step:7600/10000 val_loss:3.8669 svd_entropy: attn_qk:H=0.7658,top10E=0.24,eRank=182.3,q75/q25=89.24 attn_vo:H=0.7918,top10E=0.15,eRank=270.1,q75/q25=inf mlp_w1:H=0.7956,top10E=0.25,eRank=217.1,q75/q25=16.20 mlp_w2:H=0.8619,top10E=0.13,eRank=311.9,q75/q25=20.93 vo_prod:H=0.6693,top10E=0.22,eRank=124.9,q75/q25=inf train_time:573640ms step_avg:75.48ms +[2025-09-02 08:47:00] [Rank 0] PRINT: step:7600/10000 val_loss:3.8669 svd_entropy: attn_qk:H=0.7658,top10E=0.24,eRank=182.3,q75/q25=89.24 attn_vo:H=0.7918,top10E=0.15,eRank=270.1,q75/q25=inf mlp_w1:H=0.7956,top10E=0.25,eRank=217.1,q75/q25=16.20 mlp_w2:H=0.8619,top10E=0.13,eRank=311.9,q75/q25=20.93 vo_prod:H=0.6693,top10E=0.22,eRank=124.9,q75/q25=inf train_time:573640ms step_avg:75.48ms +[2025-09-02 08:47:00] [Rank 0] step:7601/10000 train_time:573652ms step_avg:75.47ms +[2025-09-02 08:47:00] [Rank 0] step:7601/10000 train_time:573652ms step_avg:75.47ms +[2025-09-02 08:47:02] [Rank 0] step:7621/10000 train_time:575104ms step_avg:75.46ms +[2025-09-02 08:47:02] [Rank 0] step:7621/10000 train_time:575104ms step_avg:75.46ms +[2025-09-02 08:47:03] [Rank 0] step:7641/10000 train_time:576706ms step_avg:75.48ms +[2025-09-02 08:47:03] [Rank 0] step:7641/10000 train_time:576706ms step_avg:75.48ms +[2025-09-02 08:47:05] [Rank 0] step:7661/10000 train_time:578308ms step_avg:75.49ms +[2025-09-02 08:47:05] [Rank 0] step:7661/10000 train_time:578308ms step_avg:75.49ms +[2025-09-02 08:47:06] [Rank 0] step:7681/10000 train_time:579904ms step_avg:75.50ms +[2025-09-02 08:47:06] [Rank 0] step:7681/10000 train_time:579904ms step_avg:75.50ms +[2025-09-02 08:47:08] [Rank 0] step:7701/10000 train_time:581501ms step_avg:75.51ms +[2025-09-02 08:47:08] [Rank 0] step:7701/10000 train_time:581501ms step_avg:75.51ms +[2025-09-02 08:47:10] [Rank 0] step:7721/10000 train_time:583115ms step_avg:75.52ms +[2025-09-02 08:47:10] [Rank 0] step:7721/10000 train_time:583115ms step_avg:75.52ms +[2025-09-02 08:47:11] [Rank 0] step:7741/10000 train_time:584717ms step_avg:75.54ms +[2025-09-02 08:47:11] [Rank 0] step:7741/10000 train_time:584717ms step_avg:75.54ms +[2025-09-02 08:47:13] [Rank 0] step:7761/10000 train_time:586321ms step_avg:75.55ms +[2025-09-02 08:47:13] [Rank 0] step:7761/10000 train_time:586321ms step_avg:75.55ms +[2025-09-02 08:47:15] [Rank 0] step:7781/10000 train_time:587932ms step_avg:75.56ms +[2025-09-02 08:47:15] [Rank 0] step:7781/10000 train_time:587932ms step_avg:75.56ms +[2025-09-02 08:47:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:47:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:47:28] [Rank 0] PRINT: step:7800/10000 val_loss:3.8514 svd_entropy: attn_qk:H=0.7667,top10E=0.24,eRank=183.2,q75/q25=89.41 attn_vo:H=0.7926,top10E=0.15,eRank=271.2,q75/q25=inf mlp_w1:H=0.7969,top10E=0.25,eRank=218.7,q75/q25=16.33 mlp_w2:H=0.8625,top10E=0.13,eRank=313.3,q75/q25=21.02 vo_prod:H=0.6703,top10E=0.22,eRank=125.9,q75/q25=inf train_time:589711ms step_avg:75.60ms +[2025-09-02 08:47:28] [Rank 0] PRINT: step:7800/10000 val_loss:3.8514 svd_entropy: attn_qk:H=0.7667,top10E=0.24,eRank=183.2,q75/q25=89.41 attn_vo:H=0.7926,top10E=0.15,eRank=271.2,q75/q25=inf mlp_w1:H=0.7969,top10E=0.25,eRank=218.7,q75/q25=16.33 mlp_w2:H=0.8625,top10E=0.13,eRank=313.3,q75/q25=21.02 vo_prod:H=0.6703,top10E=0.22,eRank=125.9,q75/q25=inf train_time:589711ms step_avg:75.60ms +[2025-09-02 08:47:28] [Rank 0] step:7801/10000 train_time:589722ms step_avg:75.60ms +[2025-09-02 08:47:28] [Rank 0] step:7801/10000 train_time:589722ms step_avg:75.60ms +[2025-09-02 08:47:30] [Rank 0] step:7821/10000 train_time:591164ms step_avg:75.59ms +[2025-09-02 08:47:30] [Rank 0] step:7821/10000 train_time:591164ms step_avg:75.59ms +[2025-09-02 08:47:31] [Rank 0] step:7841/10000 train_time:592765ms step_avg:75.60ms +[2025-09-02 08:47:31] [Rank 0] step:7841/10000 train_time:592765ms step_avg:75.60ms +[2025-09-02 08:47:33] [Rank 0] step:7861/10000 train_time:594373ms step_avg:75.61ms +[2025-09-02 08:47:33] [Rank 0] step:7861/10000 train_time:594373ms step_avg:75.61ms +[2025-09-02 08:47:34] [Rank 0] step:7881/10000 train_time:595986ms step_avg:75.62ms +[2025-09-02 08:47:34] [Rank 0] step:7881/10000 train_time:595986ms step_avg:75.62ms +[2025-09-02 08:47:36] [Rank 0] step:7901/10000 train_time:597589ms step_avg:75.63ms +[2025-09-02 08:47:36] [Rank 0] step:7901/10000 train_time:597589ms step_avg:75.63ms +[2025-09-02 08:47:38] [Rank 0] step:7921/10000 train_time:599194ms step_avg:75.65ms +[2025-09-02 08:47:38] [Rank 0] step:7921/10000 train_time:599194ms step_avg:75.65ms +[2025-09-02 08:47:39] [Rank 0] step:7941/10000 train_time:600804ms step_avg:75.66ms +[2025-09-02 08:47:39] [Rank 0] step:7941/10000 train_time:600804ms step_avg:75.66ms +[2025-09-02 08:47:41] [Rank 0] step:7961/10000 train_time:602419ms step_avg:75.67ms +[2025-09-02 08:47:41] [Rank 0] step:7961/10000 train_time:602419ms step_avg:75.67ms +[2025-09-02 08:47:42] [Rank 0] step:7981/10000 train_time:604021ms step_avg:75.68ms +[2025-09-02 08:47:42] [Rank 0] step:7981/10000 train_time:604021ms step_avg:75.68ms +[2025-09-02 08:47:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:47:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:47:56] [Rank 0] PRINT: step:8000/10000 val_loss:3.8374 svd_entropy: attn_qk:H=0.7675,top10E=0.24,eRank=184.0,q75/q25=88.92 attn_vo:H=0.7934,top10E=0.15,eRank=272.2,q75/q25=inf mlp_w1:H=0.7980,top10E=0.25,eRank=220.1,q75/q25=16.40 mlp_w2:H=0.8631,top10E=0.13,eRank=314.6,q75/q25=21.03 vo_prod:H=0.6714,top10E=0.22,eRank=126.9,q75/q25=inf train_time:605788ms step_avg:75.72ms +[2025-09-02 08:47:56] [Rank 0] PRINT: step:8000/10000 val_loss:3.8374 svd_entropy: attn_qk:H=0.7675,top10E=0.24,eRank=184.0,q75/q25=88.92 attn_vo:H=0.7934,top10E=0.15,eRank=272.2,q75/q25=inf mlp_w1:H=0.7980,top10E=0.25,eRank=220.1,q75/q25=16.40 mlp_w2:H=0.8631,top10E=0.13,eRank=314.6,q75/q25=21.03 vo_prod:H=0.6714,top10E=0.22,eRank=126.9,q75/q25=inf train_time:605788ms step_avg:75.72ms +[2025-09-02 08:47:56] [Rank 0] step:8001/10000 train_time:605800ms step_avg:75.72ms +[2025-09-02 08:47:56] [Rank 0] step:8001/10000 train_time:605800ms step_avg:75.72ms +[2025-09-02 08:47:58] [Rank 0] step:8021/10000 train_time:607258ms step_avg:75.71ms +[2025-09-02 08:47:58] [Rank 0] step:8021/10000 train_time:607258ms step_avg:75.71ms +[2025-09-02 08:47:59] [Rank 0] step:8041/10000 train_time:608872ms step_avg:75.72ms +[2025-09-02 08:47:59] [Rank 0] step:8041/10000 train_time:608872ms step_avg:75.72ms +[2025-09-02 08:48:01] [Rank 0] step:8061/10000 train_time:610474ms step_avg:75.73ms +[2025-09-02 08:48:01] [Rank 0] step:8061/10000 train_time:610474ms step_avg:75.73ms +[2025-09-02 08:48:02] [Rank 0] step:8081/10000 train_time:612073ms step_avg:75.74ms +[2025-09-02 08:48:02] [Rank 0] step:8081/10000 train_time:612073ms step_avg:75.74ms +[2025-09-02 08:48:04] [Rank 0] step:8101/10000 train_time:613688ms step_avg:75.75ms +[2025-09-02 08:48:04] [Rank 0] step:8101/10000 train_time:613688ms step_avg:75.75ms +[2025-09-02 08:48:06] [Rank 0] step:8121/10000 train_time:615288ms step_avg:75.77ms +[2025-09-02 08:48:06] [Rank 0] step:8121/10000 train_time:615288ms step_avg:75.77ms +[2025-09-02 08:48:07] [Rank 0] step:8141/10000 train_time:616989ms step_avg:75.79ms +[2025-09-02 08:48:07] [Rank 0] step:8141/10000 train_time:616989ms step_avg:75.79ms +[2025-09-02 08:48:09] [Rank 0] step:8161/10000 train_time:618604ms step_avg:75.80ms +[2025-09-02 08:48:09] [Rank 0] step:8161/10000 train_time:618604ms step_avg:75.80ms +[2025-09-02 08:48:11] [Rank 0] step:8181/10000 train_time:620240ms step_avg:75.81ms +[2025-09-02 08:48:11] [Rank 0] step:8181/10000 train_time:620240ms step_avg:75.81ms +[2025-09-02 08:48:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:48:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:48:24] [Rank 0] PRINT: step:8200/10000 val_loss:3.8288 svd_entropy: attn_qk:H=0.7683,top10E=0.24,eRank=184.7,q75/q25=88.88 attn_vo:H=0.7941,top10E=0.14,eRank=273.2,q75/q25=inf mlp_w1:H=0.7990,top10E=0.25,eRank=221.4,q75/q25=16.50 mlp_w2:H=0.8637,top10E=0.12,eRank=315.8,q75/q25=21.03 vo_prod:H=0.6725,top10E=0.22,eRank=127.9,q75/q25=inf train_time:622059ms step_avg:75.86ms +[2025-09-02 08:48:24] [Rank 0] PRINT: step:8200/10000 val_loss:3.8288 svd_entropy: attn_qk:H=0.7683,top10E=0.24,eRank=184.7,q75/q25=88.88 attn_vo:H=0.7941,top10E=0.14,eRank=273.2,q75/q25=inf mlp_w1:H=0.7990,top10E=0.25,eRank=221.4,q75/q25=16.50 mlp_w2:H=0.8637,top10E=0.12,eRank=315.8,q75/q25=21.03 vo_prod:H=0.6725,top10E=0.22,eRank=127.9,q75/q25=inf train_time:622059ms step_avg:75.86ms +[2025-09-02 08:48:24] [Rank 0] step:8201/10000 train_time:622071ms step_avg:75.85ms +[2025-09-02 08:48:24] [Rank 0] step:8201/10000 train_time:622071ms step_avg:75.85ms +[2025-09-02 08:48:26] [Rank 0] step:8221/10000 train_time:623558ms step_avg:75.85ms +[2025-09-02 08:48:26] [Rank 0] step:8221/10000 train_time:623558ms step_avg:75.85ms +[2025-09-02 08:48:27] [Rank 0] step:8241/10000 train_time:625195ms step_avg:75.86ms +[2025-09-02 08:48:27] [Rank 0] step:8241/10000 train_time:625195ms step_avg:75.86ms +[2025-09-02 08:48:29] [Rank 0] step:8261/10000 train_time:626821ms step_avg:75.88ms +[2025-09-02 08:48:29] [Rank 0] step:8261/10000 train_time:626821ms step_avg:75.88ms +[2025-09-02 08:48:30] [Rank 0] step:8281/10000 train_time:628455ms step_avg:75.89ms +[2025-09-02 08:48:30] [Rank 0] step:8281/10000 train_time:628455ms step_avg:75.89ms +[2025-09-02 08:48:32] [Rank 0] step:8301/10000 train_time:630088ms step_avg:75.91ms +[2025-09-02 08:48:32] [Rank 0] step:8301/10000 train_time:630088ms step_avg:75.91ms +[2025-09-02 08:48:34] [Rank 0] step:8321/10000 train_time:631709ms step_avg:75.92ms +[2025-09-02 08:48:34] [Rank 0] step:8321/10000 train_time:631709ms step_avg:75.92ms +[2025-09-02 08:48:35] [Rank 0] step:8341/10000 train_time:633343ms step_avg:75.93ms +[2025-09-02 08:48:35] [Rank 0] step:8341/10000 train_time:633343ms step_avg:75.93ms +[2025-09-02 08:48:37] [Rank 0] step:8361/10000 train_time:634975ms step_avg:75.94ms +[2025-09-02 08:48:37] [Rank 0] step:8361/10000 train_time:634975ms step_avg:75.94ms +[2025-09-02 08:48:39] [Rank 0] step:8381/10000 train_time:636605ms step_avg:75.96ms +[2025-09-02 08:48:39] [Rank 0] step:8381/10000 train_time:636605ms step_avg:75.96ms +[2025-09-02 08:48:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:48:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:48:52] [Rank 0] PRINT: step:8400/10000 val_loss:3.8189 svd_entropy: attn_qk:H=0.7689,top10E=0.24,eRank=185.3,q75/q25=88.68 attn_vo:H=0.7947,top10E=0.14,eRank=274.0,q75/q25=inf mlp_w1:H=0.7999,top10E=0.24,eRank=222.5,q75/q25=16.56 mlp_w2:H=0.8642,top10E=0.12,eRank=316.9,q75/q25=21.01 vo_prod:H=0.6735,top10E=0.22,eRank=128.7,q75/q25=inf train_time:638396ms step_avg:76.00ms +[2025-09-02 08:48:52] [Rank 0] PRINT: step:8400/10000 val_loss:3.8189 svd_entropy: attn_qk:H=0.7689,top10E=0.24,eRank=185.3,q75/q25=88.68 attn_vo:H=0.7947,top10E=0.14,eRank=274.0,q75/q25=inf mlp_w1:H=0.7999,top10E=0.24,eRank=222.5,q75/q25=16.56 mlp_w2:H=0.8642,top10E=0.12,eRank=316.9,q75/q25=21.01 vo_prod:H=0.6735,top10E=0.22,eRank=128.7,q75/q25=inf train_time:638396ms step_avg:76.00ms +[2025-09-02 08:48:52] [Rank 0] step:8401/10000 train_time:638407ms step_avg:75.99ms +[2025-09-02 08:48:52] [Rank 0] step:8401/10000 train_time:638407ms step_avg:75.99ms +[2025-09-02 08:48:54] [Rank 0] step:8421/10000 train_time:639876ms step_avg:75.99ms +[2025-09-02 08:48:54] [Rank 0] step:8421/10000 train_time:639876ms step_avg:75.99ms +[2025-09-02 08:48:55] [Rank 0] step:8441/10000 train_time:641506ms step_avg:76.00ms +[2025-09-02 08:48:55] [Rank 0] step:8441/10000 train_time:641506ms step_avg:76.00ms +[2025-09-02 08:48:57] [Rank 0] step:8461/10000 train_time:643131ms step_avg:76.01ms +[2025-09-02 08:48:57] [Rank 0] step:8461/10000 train_time:643131ms step_avg:76.01ms +[2025-09-02 08:48:59] [Rank 0] step:8481/10000 train_time:644766ms step_avg:76.02ms +[2025-09-02 08:48:59] [Rank 0] step:8481/10000 train_time:644766ms step_avg:76.02ms +[2025-09-02 08:49:00] [Rank 0] step:8501/10000 train_time:646419ms step_avg:76.04ms +[2025-09-02 08:49:00] [Rank 0] step:8501/10000 train_time:646419ms step_avg:76.04ms +[2025-09-02 08:49:02] [Rank 0] step:8521/10000 train_time:648058ms step_avg:76.05ms +[2025-09-02 08:49:02] [Rank 0] step:8521/10000 train_time:648058ms step_avg:76.05ms +[2025-09-02 08:49:04] [Rank 0] step:8541/10000 train_time:649701ms step_avg:76.07ms +[2025-09-02 08:49:04] [Rank 0] step:8541/10000 train_time:649701ms step_avg:76.07ms +[2025-09-02 08:49:05] [Rank 0] step:8561/10000 train_time:651335ms step_avg:76.08ms +[2025-09-02 08:49:05] [Rank 0] step:8561/10000 train_time:651335ms step_avg:76.08ms +[2025-09-02 08:49:07] [Rank 0] step:8581/10000 train_time:652969ms step_avg:76.09ms +[2025-09-02 08:49:07] [Rank 0] step:8581/10000 train_time:652969ms step_avg:76.09ms +[2025-09-02 08:49:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:49:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:49:20] [Rank 0] PRINT: step:8600/10000 val_loss:3.8098 svd_entropy: attn_qk:H=0.7694,top10E=0.24,eRank=185.9,q75/q25=88.59 attn_vo:H=0.7952,top10E=0.14,eRank=274.7,q75/q25=inf mlp_w1:H=0.8007,top10E=0.24,eRank=223.6,q75/q25=16.64 mlp_w2:H=0.8647,top10E=0.12,eRank=317.8,q75/q25=20.98 vo_prod:H=0.6743,top10E=0.22,eRank=129.5,q75/q25=inf train_time:654760ms step_avg:76.13ms +[2025-09-02 08:49:20] [Rank 0] PRINT: step:8600/10000 val_loss:3.8098 svd_entropy: attn_qk:H=0.7694,top10E=0.24,eRank=185.9,q75/q25=88.59 attn_vo:H=0.7952,top10E=0.14,eRank=274.7,q75/q25=inf mlp_w1:H=0.8007,top10E=0.24,eRank=223.6,q75/q25=16.64 mlp_w2:H=0.8647,top10E=0.12,eRank=317.8,q75/q25=20.98 vo_prod:H=0.6743,top10E=0.22,eRank=129.5,q75/q25=inf train_time:654760ms step_avg:76.13ms +[2025-09-02 08:49:20] [Rank 0] step:8601/10000 train_time:654772ms step_avg:76.13ms +[2025-09-02 08:49:20] [Rank 0] step:8601/10000 train_time:654772ms step_avg:76.13ms +[2025-09-02 08:49:22] [Rank 0] step:8621/10000 train_time:656254ms step_avg:76.12ms +[2025-09-02 08:49:22] [Rank 0] step:8621/10000 train_time:656254ms step_avg:76.12ms +[2025-09-02 08:49:23] [Rank 0] step:8641/10000 train_time:657884ms step_avg:76.14ms +[2025-09-02 08:49:23] [Rank 0] step:8641/10000 train_time:657884ms step_avg:76.14ms +[2025-09-02 08:49:25] [Rank 0] step:8661/10000 train_time:659514ms step_avg:76.15ms +[2025-09-02 08:49:25] [Rank 0] step:8661/10000 train_time:659514ms step_avg:76.15ms +[2025-09-02 08:49:27] [Rank 0] step:8681/10000 train_time:661144ms step_avg:76.16ms +[2025-09-02 08:49:27] [Rank 0] step:8681/10000 train_time:661144ms step_avg:76.16ms +[2025-09-02 08:49:28] [Rank 0] step:8701/10000 train_time:662772ms step_avg:76.17ms +[2025-09-02 08:49:28] [Rank 0] step:8701/10000 train_time:662772ms step_avg:76.17ms +[2025-09-02 08:49:30] [Rank 0] step:8721/10000 train_time:664403ms step_avg:76.18ms +[2025-09-02 08:49:30] [Rank 0] step:8721/10000 train_time:664403ms step_avg:76.18ms +[2025-09-02 08:49:32] [Rank 0] step:8741/10000 train_time:666023ms step_avg:76.20ms +[2025-09-02 08:49:32] [Rank 0] step:8741/10000 train_time:666023ms step_avg:76.20ms +[2025-09-02 08:49:33] [Rank 0] step:8761/10000 train_time:667652ms step_avg:76.21ms +[2025-09-02 08:49:33] [Rank 0] step:8761/10000 train_time:667652ms step_avg:76.21ms +[2025-09-02 08:49:35] [Rank 0] step:8781/10000 train_time:669292ms step_avg:76.22ms +[2025-09-02 08:49:35] [Rank 0] step:8781/10000 train_time:669292ms step_avg:76.22ms +[2025-09-02 08:49:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:49:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:49:48] [Rank 0] PRINT: step:8800/10000 val_loss:3.7997 svd_entropy: attn_qk:H=0.7699,top10E=0.24,eRank=186.4,q75/q25=88.56 attn_vo:H=0.7957,top10E=0.14,eRank=275.4,q75/q25=inf mlp_w1:H=0.8014,top10E=0.24,eRank=224.5,q75/q25=16.69 mlp_w2:H=0.8651,top10E=0.12,eRank=318.7,q75/q25=20.96 vo_prod:H=0.6751,top10E=0.22,eRank=130.2,q75/q25=inf train_time:671086ms step_avg:76.26ms +[2025-09-02 08:49:48] [Rank 0] PRINT: step:8800/10000 val_loss:3.7997 svd_entropy: attn_qk:H=0.7699,top10E=0.24,eRank=186.4,q75/q25=88.56 attn_vo:H=0.7957,top10E=0.14,eRank=275.4,q75/q25=inf mlp_w1:H=0.8014,top10E=0.24,eRank=224.5,q75/q25=16.69 mlp_w2:H=0.8651,top10E=0.12,eRank=318.7,q75/q25=20.96 vo_prod:H=0.6751,top10E=0.22,eRank=130.2,q75/q25=inf train_time:671086ms step_avg:76.26ms +[2025-09-02 08:49:48] [Rank 0] step:8801/10000 train_time:671098ms step_avg:76.25ms +[2025-09-02 08:49:48] [Rank 0] step:8801/10000 train_time:671098ms step_avg:76.25ms +[2025-09-02 08:49:50] [Rank 0] step:8821/10000 train_time:672569ms step_avg:76.25ms +[2025-09-02 08:49:50] [Rank 0] step:8821/10000 train_time:672569ms step_avg:76.25ms +[2025-09-02 08:49:51] [Rank 0] step:8841/10000 train_time:674220ms step_avg:76.26ms +[2025-09-02 08:49:51] [Rank 0] step:8841/10000 train_time:674220ms step_avg:76.26ms +[2025-09-02 08:49:53] [Rank 0] step:8861/10000 train_time:675847ms step_avg:76.27ms +[2025-09-02 08:49:53] [Rank 0] step:8861/10000 train_time:675847ms step_avg:76.27ms +[2025-09-02 08:49:55] [Rank 0] step:8881/10000 train_time:677478ms step_avg:76.28ms +[2025-09-02 08:49:55] [Rank 0] step:8881/10000 train_time:677478ms step_avg:76.28ms +[2025-09-02 08:49:56] [Rank 0] step:8901/10000 train_time:679111ms step_avg:76.30ms +[2025-09-02 08:49:56] [Rank 0] step:8901/10000 train_time:679111ms step_avg:76.30ms +[2025-09-02 08:49:58] [Rank 0] step:8921/10000 train_time:680751ms step_avg:76.31ms +[2025-09-02 08:49:58] [Rank 0] step:8921/10000 train_time:680751ms step_avg:76.31ms +[2025-09-02 08:50:00] [Rank 0] step:8941/10000 train_time:682397ms step_avg:76.32ms +[2025-09-02 08:50:00] [Rank 0] step:8941/10000 train_time:682397ms step_avg:76.32ms +[2025-09-02 08:50:01] [Rank 0] step:8961/10000 train_time:684026ms step_avg:76.33ms +[2025-09-02 08:50:01] [Rank 0] step:8961/10000 train_time:684026ms step_avg:76.33ms +[2025-09-02 08:50:03] [Rank 0] step:8981/10000 train_time:685661ms step_avg:76.35ms +[2025-09-02 08:50:03] [Rank 0] step:8981/10000 train_time:685661ms step_avg:76.35ms +[2025-09-02 08:50:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:50:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:50:16] [Rank 0] PRINT: step:9000/10000 val_loss:3.7914 svd_entropy: attn_qk:H=0.7704,top10E=0.24,eRank=186.8,q75/q25=88.30 attn_vo:H=0.7961,top10E=0.14,eRank=275.9,q75/q25=inf mlp_w1:H=0.8020,top10E=0.24,eRank=225.3,q75/q25=16.70 mlp_w2:H=0.8655,top10E=0.12,eRank=319.6,q75/q25=20.92 vo_prod:H=0.6758,top10E=0.21,eRank=130.9,q75/q25=inf train_time:687453ms step_avg:76.38ms +[2025-09-02 08:50:16] [Rank 0] PRINT: step:9000/10000 val_loss:3.7914 svd_entropy: attn_qk:H=0.7704,top10E=0.24,eRank=186.8,q75/q25=88.30 attn_vo:H=0.7961,top10E=0.14,eRank=275.9,q75/q25=inf mlp_w1:H=0.8020,top10E=0.24,eRank=225.3,q75/q25=16.70 mlp_w2:H=0.8655,top10E=0.12,eRank=319.6,q75/q25=20.92 vo_prod:H=0.6758,top10E=0.21,eRank=130.9,q75/q25=inf train_time:687453ms step_avg:76.38ms +[2025-09-02 08:50:16] [Rank 0] step:9001/10000 train_time:687464ms step_avg:76.38ms +[2025-09-02 08:50:16] [Rank 0] step:9001/10000 train_time:687464ms step_avg:76.38ms +[2025-09-02 08:50:18] [Rank 0] step:9021/10000 train_time:688943ms step_avg:76.37ms +[2025-09-02 08:50:18] [Rank 0] step:9021/10000 train_time:688943ms step_avg:76.37ms +[2025-09-02 08:50:20] [Rank 0] step:9041/10000 train_time:690571ms step_avg:76.38ms +[2025-09-02 08:50:20] [Rank 0] step:9041/10000 train_time:690571ms step_avg:76.38ms +[2025-09-02 08:50:21] [Rank 0] step:9061/10000 train_time:692212ms step_avg:76.39ms +[2025-09-02 08:50:21] [Rank 0] step:9061/10000 train_time:692212ms step_avg:76.39ms +[2025-09-02 08:50:23] [Rank 0] step:9081/10000 train_time:693848ms step_avg:76.41ms +[2025-09-02 08:50:23] [Rank 0] step:9081/10000 train_time:693848ms step_avg:76.41ms +[2025-09-02 08:50:25] [Rank 0] step:9101/10000 train_time:695500ms step_avg:76.42ms +[2025-09-02 08:50:25] [Rank 0] step:9101/10000 train_time:695500ms step_avg:76.42ms +[2025-09-02 08:50:26] [Rank 0] step:9121/10000 train_time:697136ms step_avg:76.43ms +[2025-09-02 08:50:26] [Rank 0] step:9121/10000 train_time:697136ms step_avg:76.43ms +[2025-09-02 08:50:28] [Rank 0] step:9141/10000 train_time:698760ms step_avg:76.44ms +[2025-09-02 08:50:28] [Rank 0] step:9141/10000 train_time:698760ms step_avg:76.44ms +[2025-09-02 08:50:30] [Rank 0] step:9161/10000 train_time:700385ms step_avg:76.45ms +[2025-09-02 08:50:30] [Rank 0] step:9161/10000 train_time:700385ms step_avg:76.45ms +[2025-09-02 08:50:31] [Rank 0] step:9181/10000 train_time:702051ms step_avg:76.47ms +[2025-09-02 08:50:31] [Rank 0] step:9181/10000 train_time:702051ms step_avg:76.47ms +[2025-09-02 08:50:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:50:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:50:45] [Rank 0] PRINT: step:9200/10000 val_loss:3.7845 svd_entropy: attn_qk:H=0.7708,top10E=0.24,eRank=187.2,q75/q25=88.28 attn_vo:H=0.7965,top10E=0.14,eRank=276.5,q75/q25=inf mlp_w1:H=0.8025,top10E=0.24,eRank=226.0,q75/q25=16.67 mlp_w2:H=0.8659,top10E=0.12,eRank=320.4,q75/q25=20.89 vo_prod:H=0.6764,top10E=0.21,eRank=131.4,q75/q25=inf train_time:703846ms step_avg:76.50ms +[2025-09-02 08:50:45] [Rank 0] PRINT: step:9200/10000 val_loss:3.7845 svd_entropy: attn_qk:H=0.7708,top10E=0.24,eRank=187.2,q75/q25=88.28 attn_vo:H=0.7965,top10E=0.14,eRank=276.5,q75/q25=inf mlp_w1:H=0.8025,top10E=0.24,eRank=226.0,q75/q25=16.67 mlp_w2:H=0.8659,top10E=0.12,eRank=320.4,q75/q25=20.89 vo_prod:H=0.6764,top10E=0.21,eRank=131.4,q75/q25=inf train_time:703846ms step_avg:76.50ms +[2025-09-02 08:50:45] [Rank 0] step:9201/10000 train_time:703858ms step_avg:76.50ms +[2025-09-02 08:50:45] [Rank 0] step:9201/10000 train_time:703858ms step_avg:76.50ms +[2025-09-02 08:50:46] [Rank 0] step:9221/10000 train_time:705350ms step_avg:76.49ms +[2025-09-02 08:50:46] [Rank 0] step:9221/10000 train_time:705350ms step_avg:76.49ms +[2025-09-02 08:50:48] [Rank 0] step:9241/10000 train_time:706993ms step_avg:76.51ms +[2025-09-02 08:50:48] [Rank 0] step:9241/10000 train_time:706993ms step_avg:76.51ms +[2025-09-02 08:50:50] [Rank 0] step:9261/10000 train_time:708641ms step_avg:76.52ms +[2025-09-02 08:50:50] [Rank 0] step:9261/10000 train_time:708641ms step_avg:76.52ms +[2025-09-02 08:50:51] [Rank 0] step:9281/10000 train_time:710270ms step_avg:76.53ms +[2025-09-02 08:50:51] [Rank 0] step:9281/10000 train_time:710270ms step_avg:76.53ms +[2025-09-02 08:50:53] [Rank 0] step:9301/10000 train_time:711901ms step_avg:76.54ms +[2025-09-02 08:50:53] [Rank 0] step:9301/10000 train_time:711901ms step_avg:76.54ms +[2025-09-02 08:50:55] [Rank 0] step:9321/10000 train_time:713537ms step_avg:76.55ms +[2025-09-02 08:50:55] [Rank 0] step:9321/10000 train_time:713537ms step_avg:76.55ms +[2025-09-02 08:50:56] [Rank 0] step:9341/10000 train_time:715174ms step_avg:76.56ms +[2025-09-02 08:50:56] [Rank 0] step:9341/10000 train_time:715174ms step_avg:76.56ms +[2025-09-02 08:50:58] [Rank 0] step:9361/10000 train_time:716812ms step_avg:76.57ms +[2025-09-02 08:50:58] [Rank 0] step:9361/10000 train_time:716812ms step_avg:76.57ms +[2025-09-02 08:50:59] [Rank 0] step:9381/10000 train_time:718461ms step_avg:76.59ms +[2025-09-02 08:50:59] [Rank 0] step:9381/10000 train_time:718461ms step_avg:76.59ms +[2025-09-02 08:51:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:51:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:51:13] [Rank 0] PRINT: step:9400/10000 val_loss:3.7781 svd_entropy: attn_qk:H=0.7711,top10E=0.24,eRank=187.5,q75/q25=87.78 attn_vo:H=0.7968,top10E=0.14,eRank=276.9,q75/q25=inf mlp_w1:H=0.8029,top10E=0.24,eRank=226.6,q75/q25=16.68 mlp_w2:H=0.8662,top10E=0.12,eRank=321.0,q75/q25=20.91 vo_prod:H=0.6769,top10E=0.21,eRank=131.9,q75/q25=inf train_time:720268ms step_avg:76.62ms +[2025-09-02 08:51:13] [Rank 0] PRINT: step:9400/10000 val_loss:3.7781 svd_entropy: attn_qk:H=0.7711,top10E=0.24,eRank=187.5,q75/q25=87.78 attn_vo:H=0.7968,top10E=0.14,eRank=276.9,q75/q25=inf mlp_w1:H=0.8029,top10E=0.24,eRank=226.6,q75/q25=16.68 mlp_w2:H=0.8662,top10E=0.12,eRank=321.0,q75/q25=20.91 vo_prod:H=0.6769,top10E=0.21,eRank=131.9,q75/q25=inf train_time:720268ms step_avg:76.62ms +[2025-09-02 08:51:13] [Rank 0] step:9401/10000 train_time:720279ms step_avg:76.62ms +[2025-09-02 08:51:13] [Rank 0] step:9401/10000 train_time:720279ms step_avg:76.62ms +[2025-09-02 08:51:15] [Rank 0] step:9421/10000 train_time:721768ms step_avg:76.61ms +[2025-09-02 08:51:15] [Rank 0] step:9421/10000 train_time:721768ms step_avg:76.61ms +[2025-09-02 08:51:16] [Rank 0] step:9441/10000 train_time:723403ms step_avg:76.62ms +[2025-09-02 08:51:16] [Rank 0] step:9441/10000 train_time:723403ms step_avg:76.62ms +[2025-09-02 08:51:18] [Rank 0] step:9461/10000 train_time:725041ms step_avg:76.63ms +[2025-09-02 08:51:18] [Rank 0] step:9461/10000 train_time:725041ms step_avg:76.63ms +[2025-09-02 08:51:20] [Rank 0] step:9481/10000 train_time:726735ms step_avg:76.65ms +[2025-09-02 08:51:20] [Rank 0] step:9481/10000 train_time:726735ms step_avg:76.65ms +[2025-09-02 08:51:21] [Rank 0] step:9501/10000 train_time:728383ms step_avg:76.66ms +[2025-09-02 08:51:21] [Rank 0] step:9501/10000 train_time:728383ms step_avg:76.66ms +[2025-09-02 08:51:23] [Rank 0] step:9521/10000 train_time:730006ms step_avg:76.67ms +[2025-09-02 08:51:23] [Rank 0] step:9521/10000 train_time:730006ms step_avg:76.67ms +[2025-09-02 08:51:25] [Rank 0] step:9541/10000 train_time:731639ms step_avg:76.68ms +[2025-09-02 08:51:25] [Rank 0] step:9541/10000 train_time:731639ms step_avg:76.68ms +[2025-09-02 08:51:26] [Rank 0] step:9561/10000 train_time:733271ms step_avg:76.69ms +[2025-09-02 08:51:26] [Rank 0] step:9561/10000 train_time:733271ms step_avg:76.69ms +[2025-09-02 08:51:28] [Rank 0] step:9581/10000 train_time:734899ms step_avg:76.70ms +[2025-09-02 08:51:28] [Rank 0] step:9581/10000 train_time:734899ms step_avg:76.70ms +[2025-09-02 08:51:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:51:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:51:41] [Rank 0] PRINT: step:9600/10000 val_loss:3.7719 svd_entropy: attn_qk:H=0.7713,top10E=0.24,eRank=187.8,q75/q25=87.89 attn_vo:H=0.7971,top10E=0.14,eRank=277.2,q75/q25=inf mlp_w1:H=0.8033,top10E=0.24,eRank=227.0,q75/q25=16.68 mlp_w2:H=0.8665,top10E=0.12,eRank=321.6,q75/q25=20.86 vo_prod:H=0.6774,top10E=0.21,eRank=132.4,q75/q25=inf train_time:736707ms step_avg:76.74ms +[2025-09-02 08:51:41] [Rank 0] PRINT: step:9600/10000 val_loss:3.7719 svd_entropy: attn_qk:H=0.7713,top10E=0.24,eRank=187.8,q75/q25=87.89 attn_vo:H=0.7971,top10E=0.14,eRank=277.2,q75/q25=inf mlp_w1:H=0.8033,top10E=0.24,eRank=227.0,q75/q25=16.68 mlp_w2:H=0.8665,top10E=0.12,eRank=321.6,q75/q25=20.86 vo_prod:H=0.6774,top10E=0.21,eRank=132.4,q75/q25=inf train_time:736707ms step_avg:76.74ms +[2025-09-02 08:51:41] [Rank 0] step:9601/10000 train_time:736718ms step_avg:76.73ms +[2025-09-02 08:51:41] [Rank 0] step:9601/10000 train_time:736718ms step_avg:76.73ms +[2025-09-02 08:51:43] [Rank 0] step:9621/10000 train_time:738203ms step_avg:76.73ms +[2025-09-02 08:51:43] [Rank 0] step:9621/10000 train_time:738203ms step_avg:76.73ms +[2025-09-02 08:51:45] [Rank 0] step:9641/10000 train_time:739840ms step_avg:76.74ms +[2025-09-02 08:51:45] [Rank 0] step:9641/10000 train_time:739840ms step_avg:76.74ms +[2025-09-02 08:51:46] [Rank 0] step:9661/10000 train_time:741505ms step_avg:76.75ms +[2025-09-02 08:51:46] [Rank 0] step:9661/10000 train_time:741505ms step_avg:76.75ms +[2025-09-02 08:51:48] [Rank 0] step:9681/10000 train_time:743163ms step_avg:76.77ms +[2025-09-02 08:51:48] [Rank 0] step:9681/10000 train_time:743163ms step_avg:76.77ms +[2025-09-02 08:51:50] [Rank 0] step:9701/10000 train_time:744842ms step_avg:76.78ms +[2025-09-02 08:51:50] [Rank 0] step:9701/10000 train_time:744842ms step_avg:76.78ms +[2025-09-02 08:51:51] [Rank 0] step:9721/10000 train_time:746496ms step_avg:76.79ms +[2025-09-02 08:51:51] [Rank 0] step:9721/10000 train_time:746496ms step_avg:76.79ms +[2025-09-02 08:51:53] [Rank 0] step:9741/10000 train_time:748173ms step_avg:76.81ms +[2025-09-02 08:51:53] [Rank 0] step:9741/10000 train_time:748173ms step_avg:76.81ms +[2025-09-02 08:51:54] [Rank 0] step:9761/10000 train_time:749831ms step_avg:76.82ms +[2025-09-02 08:51:54] [Rank 0] step:9761/10000 train_time:749831ms step_avg:76.82ms +[2025-09-02 08:51:56] [Rank 0] step:9781/10000 train_time:751504ms step_avg:76.83ms +[2025-09-02 08:51:56] [Rank 0] step:9781/10000 train_time:751504ms step_avg:76.83ms +[2025-09-02 08:51:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:51:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:52:09] [Rank 0] PRINT: step:9800/10000 val_loss:3.7668 svd_entropy: attn_qk:H=0.7715,top10E=0.24,eRank=187.9,q75/q25=87.83 attn_vo:H=0.7973,top10E=0.14,eRank=277.5,q75/q25=inf mlp_w1:H=0.8036,top10E=0.24,eRank=227.4,q75/q25=16.65 mlp_w2:H=0.8667,top10E=0.12,eRank=322.0,q75/q25=20.85 vo_prod:H=0.6777,top10E=0.21,eRank=132.7,q75/q25=inf train_time:753347ms step_avg:76.87ms +[2025-09-02 08:52:09] [Rank 0] PRINT: step:9800/10000 val_loss:3.7668 svd_entropy: attn_qk:H=0.7715,top10E=0.24,eRank=187.9,q75/q25=87.83 attn_vo:H=0.7973,top10E=0.14,eRank=277.5,q75/q25=inf mlp_w1:H=0.8036,top10E=0.24,eRank=227.4,q75/q25=16.65 mlp_w2:H=0.8667,top10E=0.12,eRank=322.0,q75/q25=20.85 vo_prod:H=0.6777,top10E=0.21,eRank=132.7,q75/q25=inf train_time:753347ms step_avg:76.87ms +[2025-09-02 08:52:10] [Rank 0] step:9801/10000 train_time:753358ms step_avg:76.87ms +[2025-09-02 08:52:10] [Rank 0] step:9801/10000 train_time:753358ms step_avg:76.87ms +[2025-09-02 08:52:11] [Rank 0] step:9821/10000 train_time:754870ms step_avg:76.86ms +[2025-09-02 08:52:11] [Rank 0] step:9821/10000 train_time:754870ms step_avg:76.86ms +[2025-09-02 08:52:13] [Rank 0] step:9841/10000 train_time:756545ms step_avg:76.88ms +[2025-09-02 08:52:13] [Rank 0] step:9841/10000 train_time:756545ms step_avg:76.88ms +[2025-09-02 08:52:15] [Rank 0] step:9861/10000 train_time:758197ms step_avg:76.89ms +[2025-09-02 08:52:15] [Rank 0] step:9861/10000 train_time:758197ms step_avg:76.89ms +[2025-09-02 08:52:16] [Rank 0] step:9881/10000 train_time:759847ms step_avg:76.90ms +[2025-09-02 08:52:16] [Rank 0] step:9881/10000 train_time:759847ms step_avg:76.90ms +[2025-09-02 08:52:18] [Rank 0] step:9901/10000 train_time:761515ms step_avg:76.91ms +[2025-09-02 08:52:18] [Rank 0] step:9901/10000 train_time:761515ms step_avg:76.91ms +[2025-09-02 08:52:20] [Rank 0] step:9921/10000 train_time:763170ms step_avg:76.92ms +[2025-09-02 08:52:20] [Rank 0] step:9921/10000 train_time:763170ms step_avg:76.92ms +[2025-09-02 08:52:21] [Rank 0] step:9941/10000 train_time:764836ms step_avg:76.94ms +[2025-09-02 08:52:21] [Rank 0] step:9941/10000 train_time:764836ms step_avg:76.94ms +[2025-09-02 08:52:23] [Rank 0] step:9961/10000 train_time:766495ms step_avg:76.95ms +[2025-09-02 08:52:23] [Rank 0] step:9961/10000 train_time:766495ms step_avg:76.95ms +[2025-09-02 08:52:25] [Rank 0] step:9981/10000 train_time:768154ms step_avg:76.96ms +[2025-09-02 08:52:25] [Rank 0] step:9981/10000 train_time:768154ms step_avg:76.96ms +[2025-09-02 08:52:26] [Rank 0] step:10000/10000 train_time:769738ms step_avg:76.97ms +[2025-09-02 08:52:26] [Rank 0] step:10000/10000 train_time:769738ms step_avg:76.97ms +[2025-09-02 08:52:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:52:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:52:38] [Rank 0] PRINT: step:10000/10000 val_loss:3.7610 svd_entropy: attn_qk:H=0.7716,top10E=0.24,eRank=188.1,q75/q25=87.77 attn_vo:H=0.7974,top10E=0.14,eRank=277.7,q75/q25=inf mlp_w1:H=0.8038,top10E=0.24,eRank=227.7,q75/q25=16.64 mlp_w2:H=0.8668,top10E=0.12,eRank=322.4,q75/q25=20.79 vo_prod:H=0.6780,top10E=0.21,eRank=133.0,q75/q25=inf train_time:769996ms step_avg:77.00ms +[2025-09-02 08:52:38] [Rank 0] PRINT: step:10000/10000 val_loss:3.7610 svd_entropy: attn_qk:H=0.7716,top10E=0.24,eRank=188.1,q75/q25=87.77 attn_vo:H=0.7974,top10E=0.14,eRank=277.7,q75/q25=inf mlp_w1:H=0.8038,top10E=0.24,eRank=227.7,q75/q25=16.64 mlp_w2:H=0.8668,top10E=0.12,eRank=322.4,q75/q25=20.79 vo_prod:H=0.6780,top10E=0.21,eRank=133.0,q75/q25=inf train_time:769996ms step_avg:77.00ms +[2025-09-02 08:52:38] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 08:52:38 2025 --- +[2025-09-02 08:52:38] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 08:52:38 2025 --- +[2025-09-02 08:52:38] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14416 MiB +[2025-09-02 08:52:38] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14416 MiB diff --git a/logs_svd_qkvo/mode_14_param_qkvo_seed_45/config.json b/logs_svd_qkvo/mode_14_param_qkvo_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..217dc64e52f44c99d7428ce5a71e1040761b44ff --- /dev/null +++ b/logs_svd_qkvo/mode_14_param_qkvo_seed_45/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 14, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "53b0a528-4968-4355-94c8-9a1d914a882f", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_14_param_qkvo_seed_45/training_log_53b0a528-4968-4355-94c8-9a1d914a882f.txt b/logs_svd_qkvo/mode_14_param_qkvo_seed_45/training_log_53b0a528-4968-4355-94c8-9a1d914a882f.txt new file mode 100644 index 0000000000000000000000000000000000000000..04d9cc6fd2e3bf04b06d881b08f5b7ae0c023f0a --- /dev/null +++ b/logs_svd_qkvo/mode_14_param_qkvo_seed_45/training_log_53b0a528-4968-4355-94c8-9a1d914a882f.txt @@ -0,0 +1,2984 @@ +[2025-09-02 09:41:29] [Rank 0] PRINT: --- Script Start: Tue Sep 2 09:41:29 2025 --- +[2025-09-02 09:41:29] [Rank 0] PRINT: --- Script Start: Tue Sep 2 09:41:29 2025 --- +[2025-09-02 09:41:29] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=14, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 09:41:29] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=14, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 09:41:29] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 09:41:29] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 09:41:29] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-02 09:41:29] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-02 09:41:29] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_14_param_qkvo_seed_45 +[2025-09-02 09:41:29] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_14_param_qkvo_seed_45 +[2025-09-02 09:41:29] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 09:41:29] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 09:41:29] [Rank 0] PRINT: Constructing model... +[2025-09-02 09:41:29] [Rank 0] PRINT: Constructing model... +[2025-09-02 09:41:31] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 09:41:31] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 09:41:31] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 09:41:31] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 09:41:31] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 09:41:31] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 09:41:31] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 14 +[2025-09-02 09:41:31] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 14 +[2025-09-02 09:41:31] [Rank 0] PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 09:41:31] [Rank 0] PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 09:41:31] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 09:41:31] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 09:41:31] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 09:41:31] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 09:41:31] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 09:41:31] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 09:41:31] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 09:41:31] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 09:41:31] [Rank 0] PRINT: Starting warmup... +[2025-09-02 09:41:31] [Rank 0] PRINT: Starting warmup... +[2025-09-02 09:42:13] [Rank 0] PRINT: Warmup complete. +[2025-09-02 09:42:13] [Rank 0] PRINT: Warmup complete. +[2025-09-02 09:42:13] [Rank 0] PRINT: Starting training... +[2025-09-02 09:42:13] [Rank 0] PRINT: Starting training... +[2025-09-02 09:42:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:42:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:42:29] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.28 attn_vo:H=0.4623,top10E=0.02,eRank=232.8,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 09:42:29] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.28 attn_vo:H=0.4623,top10E=0.02,eRank=232.8,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 09:42:31] [Rank 0] step:21/10000 train_time:1308ms step_avg:62.27ms +[2025-09-02 09:42:31] [Rank 0] step:21/10000 train_time:1308ms step_avg:62.27ms +[2025-09-02 09:42:32] [Rank 0] step:41/10000 train_time:2706ms step_avg:66.00ms +[2025-09-02 09:42:32] [Rank 0] step:41/10000 train_time:2706ms step_avg:66.00ms +[2025-09-02 09:42:33] [Rank 0] step:61/10000 train_time:4109ms step_avg:67.36ms +[2025-09-02 09:42:33] [Rank 0] step:61/10000 train_time:4109ms step_avg:67.36ms +[2025-09-02 09:42:35] [Rank 0] step:81/10000 train_time:5514ms step_avg:68.08ms +[2025-09-02 09:42:35] [Rank 0] step:81/10000 train_time:5514ms step_avg:68.08ms +[2025-09-02 09:42:36] [Rank 0] step:101/10000 train_time:6919ms step_avg:68.51ms +[2025-09-02 09:42:36] [Rank 0] step:101/10000 train_time:6919ms step_avg:68.51ms +[2025-09-02 09:42:38] [Rank 0] step:121/10000 train_time:8328ms step_avg:68.82ms +[2025-09-02 09:42:38] [Rank 0] step:121/10000 train_time:8328ms step_avg:68.82ms +[2025-09-02 09:42:39] [Rank 0] step:141/10000 train_time:9734ms step_avg:69.04ms +[2025-09-02 09:42:39] [Rank 0] step:141/10000 train_time:9734ms step_avg:69.04ms +[2025-09-02 09:42:41] [Rank 0] step:161/10000 train_time:11141ms step_avg:69.20ms +[2025-09-02 09:42:41] [Rank 0] step:161/10000 train_time:11141ms step_avg:69.20ms +[2025-09-02 09:42:42] [Rank 0] step:181/10000 train_time:12548ms step_avg:69.32ms +[2025-09-02 09:42:42] [Rank 0] step:181/10000 train_time:12548ms step_avg:69.32ms +[2025-09-02 09:42:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:42:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:42:55] [Rank 0] PRINT: step:200/10000 val_loss:6.4356 svd_entropy: attn_qk:H=0.5144,top10E=0.70,eRank=78.1,q75/q25=12.04 attn_vo:H=0.4614,top10E=0.65,eRank=63.4,q75/q25=inf mlp_w1:H=0.4465,top10E=0.73,eRank=28.3,q75/q25=2.72 mlp_w2:H=0.1882,top10E=0.94,eRank=4.7,q75/q25=260.50 vo_prod:H=0.2545,top10E=0.86,eRank=9.1,q75/q25=inf train_time:14096ms step_avg:70.48ms +[2025-09-02 09:42:55] [Rank 0] PRINT: step:200/10000 val_loss:6.4356 svd_entropy: attn_qk:H=0.5144,top10E=0.70,eRank=78.1,q75/q25=12.04 attn_vo:H=0.4614,top10E=0.65,eRank=63.4,q75/q25=inf mlp_w1:H=0.4465,top10E=0.73,eRank=28.3,q75/q25=2.72 mlp_w2:H=0.1882,top10E=0.94,eRank=4.7,q75/q25=260.50 vo_prod:H=0.2545,top10E=0.86,eRank=9.1,q75/q25=inf train_time:14096ms step_avg:70.48ms +[2025-09-02 09:42:55] [Rank 0] step:201/10000 train_time:14107ms step_avg:70.18ms +[2025-09-02 09:42:55] [Rank 0] step:201/10000 train_time:14107ms step_avg:70.18ms +[2025-09-02 09:42:57] [Rank 0] step:221/10000 train_time:15389ms step_avg:69.63ms +[2025-09-02 09:42:57] [Rank 0] step:221/10000 train_time:15389ms step_avg:69.63ms +[2025-09-02 09:42:58] [Rank 0] step:241/10000 train_time:16859ms step_avg:69.95ms +[2025-09-02 09:42:58] [Rank 0] step:241/10000 train_time:16859ms step_avg:69.95ms +[2025-09-02 09:43:00] [Rank 0] step:261/10000 train_time:18267ms step_avg:69.99ms +[2025-09-02 09:43:00] [Rank 0] step:261/10000 train_time:18267ms step_avg:69.99ms +[2025-09-02 09:43:01] [Rank 0] step:281/10000 train_time:19676ms step_avg:70.02ms +[2025-09-02 09:43:01] [Rank 0] step:281/10000 train_time:19676ms step_avg:70.02ms +[2025-09-02 09:43:02] [Rank 0] step:301/10000 train_time:21089ms step_avg:70.06ms +[2025-09-02 09:43:02] [Rank 0] step:301/10000 train_time:21089ms step_avg:70.06ms +[2025-09-02 09:43:04] [Rank 0] step:321/10000 train_time:22499ms step_avg:70.09ms +[2025-09-02 09:43:04] [Rank 0] step:321/10000 train_time:22499ms step_avg:70.09ms +[2025-09-02 09:43:05] [Rank 0] step:341/10000 train_time:23910ms step_avg:70.12ms +[2025-09-02 09:43:05] [Rank 0] step:341/10000 train_time:23910ms step_avg:70.12ms +[2025-09-02 09:43:07] [Rank 0] step:361/10000 train_time:25320ms step_avg:70.14ms +[2025-09-02 09:43:07] [Rank 0] step:361/10000 train_time:25320ms step_avg:70.14ms +[2025-09-02 09:43:08] [Rank 0] step:381/10000 train_time:26730ms step_avg:70.16ms +[2025-09-02 09:43:08] [Rank 0] step:381/10000 train_time:26730ms step_avg:70.16ms +[2025-09-02 09:43:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:43:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:43:21] [Rank 0] PRINT: step:400/10000 val_loss:5.9266 svd_entropy: attn_qk:H=0.5597,top10E=0.60,eRank=86.8,q75/q25=13.25 attn_vo:H=0.5435,top10E=0.50,eRank=83.3,q75/q25=inf mlp_w1:H=0.4734,top10E=0.67,eRank=40.6,q75/q25=3.22 mlp_w2:H=0.5248,top10E=0.62,eRank=34.9,q75/q25=16.38 vo_prod:H=0.3745,top10E=0.75,eRank=17.5,q75/q25=inf train_time:28282ms step_avg:70.70ms +[2025-09-02 09:43:21] [Rank 0] PRINT: step:400/10000 val_loss:5.9266 svd_entropy: attn_qk:H=0.5597,top10E=0.60,eRank=86.8,q75/q25=13.25 attn_vo:H=0.5435,top10E=0.50,eRank=83.3,q75/q25=inf mlp_w1:H=0.4734,top10E=0.67,eRank=40.6,q75/q25=3.22 mlp_w2:H=0.5248,top10E=0.62,eRank=34.9,q75/q25=16.38 vo_prod:H=0.3745,top10E=0.75,eRank=17.5,q75/q25=inf train_time:28282ms step_avg:70.70ms +[2025-09-02 09:43:21] [Rank 0] step:401/10000 train_time:28292ms step_avg:70.55ms +[2025-09-02 09:43:21] [Rank 0] step:401/10000 train_time:28292ms step_avg:70.55ms +[2025-09-02 09:43:23] [Rank 0] step:421/10000 train_time:29575ms step_avg:70.25ms +[2025-09-02 09:43:23] [Rank 0] step:421/10000 train_time:29575ms step_avg:70.25ms +[2025-09-02 09:43:24] [Rank 0] step:441/10000 train_time:30982ms step_avg:70.25ms +[2025-09-02 09:43:24] [Rank 0] step:441/10000 train_time:30982ms step_avg:70.25ms +[2025-09-02 09:43:25] [Rank 0] step:461/10000 train_time:32392ms step_avg:70.26ms +[2025-09-02 09:43:25] [Rank 0] step:461/10000 train_time:32392ms step_avg:70.26ms +[2025-09-02 09:43:27] [Rank 0] step:481/10000 train_time:33804ms step_avg:70.28ms +[2025-09-02 09:43:27] [Rank 0] step:481/10000 train_time:33804ms step_avg:70.28ms +[2025-09-02 09:43:28] [Rank 0] step:501/10000 train_time:35212ms step_avg:70.28ms +[2025-09-02 09:43:28] [Rank 0] step:501/10000 train_time:35212ms step_avg:70.28ms +[2025-09-02 09:43:30] [Rank 0] step:521/10000 train_time:36623ms step_avg:70.29ms +[2025-09-02 09:43:30] [Rank 0] step:521/10000 train_time:36623ms step_avg:70.29ms +[2025-09-02 09:43:31] [Rank 0] step:541/10000 train_time:38032ms step_avg:70.30ms +[2025-09-02 09:43:31] [Rank 0] step:541/10000 train_time:38032ms step_avg:70.30ms +[2025-09-02 09:43:32] [Rank 0] step:561/10000 train_time:39440ms step_avg:70.30ms +[2025-09-02 09:43:32] [Rank 0] step:561/10000 train_time:39440ms step_avg:70.30ms +[2025-09-02 09:43:34] [Rank 0] step:581/10000 train_time:40851ms step_avg:70.31ms +[2025-09-02 09:43:34] [Rank 0] step:581/10000 train_time:40851ms step_avg:70.31ms +[2025-09-02 09:43:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:43:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:43:47] [Rank 0] PRINT: step:600/10000 val_loss:5.6465 svd_entropy: attn_qk:H=0.5903,top10E=0.54,eRank=93.9,q75/q25=14.78 attn_vo:H=0.5871,top10E=0.43,eRank=99.5,q75/q25=inf mlp_w1:H=0.5081,top10E=0.62,eRank=50.3,q75/q25=3.58 mlp_w2:H=0.6168,top10E=0.48,eRank=62.9,q75/q25=11.86 vo_prod:H=0.4333,top10E=0.63,eRank=24.7,q75/q25=inf train_time:42401ms step_avg:70.67ms +[2025-09-02 09:43:47] [Rank 0] PRINT: step:600/10000 val_loss:5.6465 svd_entropy: attn_qk:H=0.5903,top10E=0.54,eRank=93.9,q75/q25=14.78 attn_vo:H=0.5871,top10E=0.43,eRank=99.5,q75/q25=inf mlp_w1:H=0.5081,top10E=0.62,eRank=50.3,q75/q25=3.58 mlp_w2:H=0.6168,top10E=0.48,eRank=62.9,q75/q25=11.86 vo_prod:H=0.4333,top10E=0.63,eRank=24.7,q75/q25=inf train_time:42401ms step_avg:70.67ms +[2025-09-02 09:43:47] [Rank 0] step:601/10000 train_time:42412ms step_avg:70.57ms +[2025-09-02 09:43:47] [Rank 0] step:601/10000 train_time:42412ms step_avg:70.57ms +[2025-09-02 09:43:48] [Rank 0] step:621/10000 train_time:43710ms step_avg:70.39ms +[2025-09-02 09:43:48] [Rank 0] step:621/10000 train_time:43710ms step_avg:70.39ms +[2025-09-02 09:43:49] [Rank 0] step:641/10000 train_time:45117ms step_avg:70.39ms +[2025-09-02 09:43:49] [Rank 0] step:641/10000 train_time:45117ms step_avg:70.39ms +[2025-09-02 09:43:51] [Rank 0] step:661/10000 train_time:46526ms step_avg:70.39ms +[2025-09-02 09:43:51] [Rank 0] step:661/10000 train_time:46526ms step_avg:70.39ms +[2025-09-02 09:43:52] [Rank 0] step:681/10000 train_time:47934ms step_avg:70.39ms +[2025-09-02 09:43:52] [Rank 0] step:681/10000 train_time:47934ms step_avg:70.39ms +[2025-09-02 09:43:54] [Rank 0] step:701/10000 train_time:49345ms step_avg:70.39ms +[2025-09-02 09:43:54] [Rank 0] step:701/10000 train_time:49345ms step_avg:70.39ms +[2025-09-02 09:43:55] [Rank 0] step:721/10000 train_time:50754ms step_avg:70.39ms +[2025-09-02 09:43:55] [Rank 0] step:721/10000 train_time:50754ms step_avg:70.39ms +[2025-09-02 09:43:57] [Rank 0] step:741/10000 train_time:52165ms step_avg:70.40ms +[2025-09-02 09:43:57] [Rank 0] step:741/10000 train_time:52165ms step_avg:70.40ms +[2025-09-02 09:43:58] [Rank 0] step:761/10000 train_time:53587ms step_avg:70.42ms +[2025-09-02 09:43:58] [Rank 0] step:761/10000 train_time:53587ms step_avg:70.42ms +[2025-09-02 09:43:59] [Rank 0] step:781/10000 train_time:55011ms step_avg:70.44ms +[2025-09-02 09:43:59] [Rank 0] step:781/10000 train_time:55011ms step_avg:70.44ms +[2025-09-02 09:44:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:44:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:44:12] [Rank 0] PRINT: step:800/10000 val_loss:5.4323 svd_entropy: attn_qk:H=0.6129,top10E=0.49,eRank=99.8,q75/q25=16.65 attn_vo:H=0.6182,top10E=0.38,eRank=113.7,q75/q25=inf mlp_w1:H=0.5407,top10E=0.58,eRank=58.7,q75/q25=3.96 mlp_w2:H=0.6741,top10E=0.39,eRank=89.9,q75/q25=9.51 vo_prod:H=0.4736,top10E=0.55,eRank=31.7,q75/q25=inf train_time:56580ms step_avg:70.72ms +[2025-09-02 09:44:12] [Rank 0] PRINT: step:800/10000 val_loss:5.4323 svd_entropy: attn_qk:H=0.6129,top10E=0.49,eRank=99.8,q75/q25=16.65 attn_vo:H=0.6182,top10E=0.38,eRank=113.7,q75/q25=inf mlp_w1:H=0.5407,top10E=0.58,eRank=58.7,q75/q25=3.96 mlp_w2:H=0.6741,top10E=0.39,eRank=89.9,q75/q25=9.51 vo_prod:H=0.4736,top10E=0.55,eRank=31.7,q75/q25=inf train_time:56580ms step_avg:70.72ms +[2025-09-02 09:44:12] [Rank 0] step:801/10000 train_time:56590ms step_avg:70.65ms +[2025-09-02 09:44:12] [Rank 0] step:801/10000 train_time:56590ms step_avg:70.65ms +[2025-09-02 09:44:14] [Rank 0] step:821/10000 train_time:57886ms step_avg:70.51ms +[2025-09-02 09:44:14] [Rank 0] step:821/10000 train_time:57886ms step_avg:70.51ms +[2025-09-02 09:44:15] [Rank 0] step:841/10000 train_time:59309ms step_avg:70.52ms +[2025-09-02 09:44:15] [Rank 0] step:841/10000 train_time:59309ms step_avg:70.52ms +[2025-09-02 09:44:17] [Rank 0] step:861/10000 train_time:60731ms step_avg:70.53ms +[2025-09-02 09:44:17] [Rank 0] step:861/10000 train_time:60731ms step_avg:70.53ms +[2025-09-02 09:44:18] [Rank 0] step:881/10000 train_time:62153ms step_avg:70.55ms +[2025-09-02 09:44:18] [Rank 0] step:881/10000 train_time:62153ms step_avg:70.55ms +[2025-09-02 09:44:20] [Rank 0] step:901/10000 train_time:63575ms step_avg:70.56ms +[2025-09-02 09:44:20] [Rank 0] step:901/10000 train_time:63575ms step_avg:70.56ms +[2025-09-02 09:44:21] [Rank 0] step:921/10000 train_time:64997ms step_avg:70.57ms +[2025-09-02 09:44:21] [Rank 0] step:921/10000 train_time:64997ms step_avg:70.57ms +[2025-09-02 09:44:22] [Rank 0] step:941/10000 train_time:66420ms step_avg:70.58ms +[2025-09-02 09:44:22] [Rank 0] step:941/10000 train_time:66420ms step_avg:70.58ms +[2025-09-02 09:44:24] [Rank 0] step:961/10000 train_time:67844ms step_avg:70.60ms +[2025-09-02 09:44:24] [Rank 0] step:961/10000 train_time:67844ms step_avg:70.60ms +[2025-09-02 09:44:25] [Rank 0] step:981/10000 train_time:69269ms step_avg:70.61ms +[2025-09-02 09:44:25] [Rank 0] step:981/10000 train_time:69269ms step_avg:70.61ms +[2025-09-02 09:44:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:44:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:44:38] [Rank 0] PRINT: step:1000/10000 val_loss:5.2811 svd_entropy: attn_qk:H=0.6317,top10E=0.45,eRank=105.5,q75/q25=18.98 attn_vo:H=0.6423,top10E=0.35,eRank=127.3,q75/q25=inf mlp_w1:H=0.5660,top10E=0.55,eRank=65.8,q75/q25=4.32 mlp_w2:H=0.7047,top10E=0.34,eRank=109.9,q75/q25=10.19 vo_prod:H=0.5003,top10E=0.50,eRank=37.9,q75/q25=inf train_time:70836ms step_avg:70.84ms +[2025-09-02 09:44:38] [Rank 0] PRINT: step:1000/10000 val_loss:5.2811 svd_entropy: attn_qk:H=0.6317,top10E=0.45,eRank=105.5,q75/q25=18.98 attn_vo:H=0.6423,top10E=0.35,eRank=127.3,q75/q25=inf mlp_w1:H=0.5660,top10E=0.55,eRank=65.8,q75/q25=4.32 mlp_w2:H=0.7047,top10E=0.34,eRank=109.9,q75/q25=10.19 vo_prod:H=0.5003,top10E=0.50,eRank=37.9,q75/q25=inf train_time:70836ms step_avg:70.84ms +[2025-09-02 09:44:38] [Rank 0] step:1001/10000 train_time:70846ms step_avg:70.78ms +[2025-09-02 09:44:38] [Rank 0] step:1001/10000 train_time:70846ms step_avg:70.78ms +[2025-09-02 09:44:40] [Rank 0] step:1021/10000 train_time:72145ms step_avg:70.66ms +[2025-09-02 09:44:40] [Rank 0] step:1021/10000 train_time:72145ms step_avg:70.66ms +[2025-09-02 09:44:41] [Rank 0] step:1041/10000 train_time:73567ms step_avg:70.67ms +[2025-09-02 09:44:41] [Rank 0] step:1041/10000 train_time:73567ms step_avg:70.67ms +[2025-09-02 09:44:43] [Rank 0] step:1061/10000 train_time:74990ms step_avg:70.68ms +[2025-09-02 09:44:43] [Rank 0] step:1061/10000 train_time:74990ms step_avg:70.68ms +[2025-09-02 09:44:44] [Rank 0] step:1081/10000 train_time:76414ms step_avg:70.69ms +[2025-09-02 09:44:44] [Rank 0] step:1081/10000 train_time:76414ms step_avg:70.69ms +[2025-09-02 09:44:45] [Rank 0] step:1101/10000 train_time:77838ms step_avg:70.70ms +[2025-09-02 09:44:45] [Rank 0] step:1101/10000 train_time:77838ms step_avg:70.70ms +[2025-09-02 09:44:47] [Rank 0] step:1121/10000 train_time:79260ms step_avg:70.70ms +[2025-09-02 09:44:47] [Rank 0] step:1121/10000 train_time:79260ms step_avg:70.70ms +[2025-09-02 09:44:48] [Rank 0] step:1141/10000 train_time:80685ms step_avg:70.71ms +[2025-09-02 09:44:48] [Rank 0] step:1141/10000 train_time:80685ms step_avg:70.71ms +[2025-09-02 09:44:50] [Rank 0] step:1161/10000 train_time:82109ms step_avg:70.72ms +[2025-09-02 09:44:50] [Rank 0] step:1161/10000 train_time:82109ms step_avg:70.72ms +[2025-09-02 09:44:51] [Rank 0] step:1181/10000 train_time:83533ms step_avg:70.73ms +[2025-09-02 09:44:51] [Rank 0] step:1181/10000 train_time:83533ms step_avg:70.73ms +[2025-09-02 09:44:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:44:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:45:04] [Rank 0] PRINT: step:1200/10000 val_loss:5.1431 svd_entropy: attn_qk:H=0.6469,top10E=0.43,eRank=111.0,q75/q25=22.43 attn_vo:H=0.6627,top10E=0.32,eRank=141.2,q75/q25=inf mlp_w1:H=0.5848,top10E=0.52,eRank=72.0,q75/q25=4.70 mlp_w2:H=0.7249,top10E=0.31,eRank=125.8,q75/q25=11.42 vo_prod:H=0.5217,top10E=0.45,eRank=43.9,q75/q25=inf train_time:85099ms step_avg:70.92ms +[2025-09-02 09:45:04] [Rank 0] PRINT: step:1200/10000 val_loss:5.1431 svd_entropy: attn_qk:H=0.6469,top10E=0.43,eRank=111.0,q75/q25=22.43 attn_vo:H=0.6627,top10E=0.32,eRank=141.2,q75/q25=inf mlp_w1:H=0.5848,top10E=0.52,eRank=72.0,q75/q25=4.70 mlp_w2:H=0.7249,top10E=0.31,eRank=125.8,q75/q25=11.42 vo_prod:H=0.5217,top10E=0.45,eRank=43.9,q75/q25=inf train_time:85099ms step_avg:70.92ms +[2025-09-02 09:45:04] [Rank 0] step:1201/10000 train_time:85109ms step_avg:70.87ms +[2025-09-02 09:45:04] [Rank 0] step:1201/10000 train_time:85109ms step_avg:70.87ms +[2025-09-02 09:45:06] [Rank 0] step:1221/10000 train_time:86391ms step_avg:70.75ms +[2025-09-02 09:45:06] [Rank 0] step:1221/10000 train_time:86391ms step_avg:70.75ms +[2025-09-02 09:45:07] [Rank 0] step:1241/10000 train_time:87814ms step_avg:70.76ms +[2025-09-02 09:45:07] [Rank 0] step:1241/10000 train_time:87814ms step_avg:70.76ms +[2025-09-02 09:45:09] [Rank 0] step:1261/10000 train_time:89242ms step_avg:70.77ms +[2025-09-02 09:45:09] [Rank 0] step:1261/10000 train_time:89242ms step_avg:70.77ms +[2025-09-02 09:45:10] [Rank 0] step:1281/10000 train_time:90664ms step_avg:70.78ms +[2025-09-02 09:45:10] [Rank 0] step:1281/10000 train_time:90664ms step_avg:70.78ms +[2025-09-02 09:45:11] [Rank 0] step:1301/10000 train_time:92088ms step_avg:70.78ms +[2025-09-02 09:45:11] [Rank 0] step:1301/10000 train_time:92088ms step_avg:70.78ms +[2025-09-02 09:45:13] [Rank 0] step:1321/10000 train_time:93511ms step_avg:70.79ms +[2025-09-02 09:45:13] [Rank 0] step:1321/10000 train_time:93511ms step_avg:70.79ms +[2025-09-02 09:45:14] [Rank 0] step:1341/10000 train_time:94932ms step_avg:70.79ms +[2025-09-02 09:45:14] [Rank 0] step:1341/10000 train_time:94932ms step_avg:70.79ms +[2025-09-02 09:45:16] [Rank 0] step:1361/10000 train_time:96356ms step_avg:70.80ms +[2025-09-02 09:45:16] [Rank 0] step:1361/10000 train_time:96356ms step_avg:70.80ms +[2025-09-02 09:45:17] [Rank 0] step:1381/10000 train_time:97779ms step_avg:70.80ms +[2025-09-02 09:45:17] [Rank 0] step:1381/10000 train_time:97779ms step_avg:70.80ms +[2025-09-02 09:45:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:45:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:45:30] [Rank 0] PRINT: step:1400/10000 val_loss:5.0146 svd_entropy: attn_qk:H=0.6597,top10E=0.40,eRank=116.3,q75/q25=27.03 attn_vo:H=0.6799,top10E=0.29,eRank=154.2,q75/q25=inf mlp_w1:H=0.6020,top10E=0.50,eRank=78.3,q75/q25=5.18 mlp_w2:H=0.7413,top10E=0.28,eRank=140.2,q75/q25=12.97 vo_prod:H=0.5391,top10E=0.42,eRank=49.5,q75/q25=inf train_time:99348ms step_avg:70.96ms +[2025-09-02 09:45:30] [Rank 0] PRINT: step:1400/10000 val_loss:5.0146 svd_entropy: attn_qk:H=0.6597,top10E=0.40,eRank=116.3,q75/q25=27.03 attn_vo:H=0.6799,top10E=0.29,eRank=154.2,q75/q25=inf mlp_w1:H=0.6020,top10E=0.50,eRank=78.3,q75/q25=5.18 mlp_w2:H=0.7413,top10E=0.28,eRank=140.2,q75/q25=12.97 vo_prod:H=0.5391,top10E=0.42,eRank=49.5,q75/q25=inf train_time:99348ms step_avg:70.96ms +[2025-09-02 09:45:30] [Rank 0] step:1401/10000 train_time:99358ms step_avg:70.92ms +[2025-09-02 09:45:30] [Rank 0] step:1401/10000 train_time:99358ms step_avg:70.92ms +[2025-09-02 09:45:32] [Rank 0] step:1421/10000 train_time:100657ms step_avg:70.84ms +[2025-09-02 09:45:32] [Rank 0] step:1421/10000 train_time:100657ms step_avg:70.84ms +[2025-09-02 09:45:33] [Rank 0] step:1441/10000 train_time:102079ms step_avg:70.84ms +[2025-09-02 09:45:33] [Rank 0] step:1441/10000 train_time:102079ms step_avg:70.84ms +[2025-09-02 09:45:34] [Rank 0] step:1461/10000 train_time:103502ms step_avg:70.84ms +[2025-09-02 09:45:34] [Rank 0] step:1461/10000 train_time:103502ms step_avg:70.84ms +[2025-09-02 09:45:36] [Rank 0] step:1481/10000 train_time:104924ms step_avg:70.85ms +[2025-09-02 09:45:36] [Rank 0] step:1481/10000 train_time:104924ms step_avg:70.85ms +[2025-09-02 09:45:37] [Rank 0] step:1501/10000 train_time:106354ms step_avg:70.86ms +[2025-09-02 09:45:37] [Rank 0] step:1501/10000 train_time:106354ms step_avg:70.86ms +[2025-09-02 09:45:39] [Rank 0] step:1521/10000 train_time:107788ms step_avg:70.87ms +[2025-09-02 09:45:39] [Rank 0] step:1521/10000 train_time:107788ms step_avg:70.87ms +[2025-09-02 09:45:40] [Rank 0] step:1541/10000 train_time:109222ms step_avg:70.88ms +[2025-09-02 09:45:40] [Rank 0] step:1541/10000 train_time:109222ms step_avg:70.88ms +[2025-09-02 09:45:42] [Rank 0] step:1561/10000 train_time:110656ms step_avg:70.89ms +[2025-09-02 09:45:42] [Rank 0] step:1561/10000 train_time:110656ms step_avg:70.89ms +[2025-09-02 09:45:43] [Rank 0] step:1581/10000 train_time:112091ms step_avg:70.90ms +[2025-09-02 09:45:43] [Rank 0] step:1581/10000 train_time:112091ms step_avg:70.90ms +[2025-09-02 09:45:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:45:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:45:56] [Rank 0] PRINT: step:1600/10000 val_loss:4.8707 svd_entropy: attn_qk:H=0.6701,top10E=0.39,eRank=120.8,q75/q25=32.61 attn_vo:H=0.6943,top10E=0.28,eRank=166.1,q75/q25=inf mlp_w1:H=0.6183,top10E=0.48,eRank=84.8,q75/q25=5.71 mlp_w2:H=0.7547,top10E=0.26,eRank=153.5,q75/q25=14.58 vo_prod:H=0.5532,top10E=0.39,eRank=54.6,q75/q25=inf train_time:113669ms step_avg:71.04ms +[2025-09-02 09:45:56] [Rank 0] PRINT: step:1600/10000 val_loss:4.8707 svd_entropy: attn_qk:H=0.6701,top10E=0.39,eRank=120.8,q75/q25=32.61 attn_vo:H=0.6943,top10E=0.28,eRank=166.1,q75/q25=inf mlp_w1:H=0.6183,top10E=0.48,eRank=84.8,q75/q25=5.71 mlp_w2:H=0.7547,top10E=0.26,eRank=153.5,q75/q25=14.58 vo_prod:H=0.5532,top10E=0.39,eRank=54.6,q75/q25=inf train_time:113669ms step_avg:71.04ms +[2025-09-02 09:45:56] [Rank 0] step:1601/10000 train_time:113680ms step_avg:71.01ms +[2025-09-02 09:45:56] [Rank 0] step:1601/10000 train_time:113680ms step_avg:71.01ms +[2025-09-02 09:45:58] [Rank 0] step:1621/10000 train_time:114995ms step_avg:70.94ms +[2025-09-02 09:45:58] [Rank 0] step:1621/10000 train_time:114995ms step_avg:70.94ms +[2025-09-02 09:45:59] [Rank 0] step:1641/10000 train_time:116428ms step_avg:70.95ms +[2025-09-02 09:45:59] [Rank 0] step:1641/10000 train_time:116428ms step_avg:70.95ms +[2025-09-02 09:46:01] [Rank 0] step:1661/10000 train_time:117863ms step_avg:70.96ms +[2025-09-02 09:46:01] [Rank 0] step:1661/10000 train_time:117863ms step_avg:70.96ms +[2025-09-02 09:46:02] [Rank 0] step:1681/10000 train_time:119298ms step_avg:70.97ms +[2025-09-02 09:46:02] [Rank 0] step:1681/10000 train_time:119298ms step_avg:70.97ms +[2025-09-02 09:46:04] [Rank 0] step:1701/10000 train_time:120732ms step_avg:70.98ms +[2025-09-02 09:46:04] [Rank 0] step:1701/10000 train_time:120732ms step_avg:70.98ms +[2025-09-02 09:46:05] [Rank 0] step:1721/10000 train_time:122167ms step_avg:70.99ms +[2025-09-02 09:46:05] [Rank 0] step:1721/10000 train_time:122167ms step_avg:70.99ms +[2025-09-02 09:46:06] [Rank 0] step:1741/10000 train_time:123603ms step_avg:71.00ms +[2025-09-02 09:46:06] [Rank 0] step:1741/10000 train_time:123603ms step_avg:71.00ms +[2025-09-02 09:46:08] [Rank 0] step:1761/10000 train_time:125060ms step_avg:71.02ms +[2025-09-02 09:46:08] [Rank 0] step:1761/10000 train_time:125060ms step_avg:71.02ms +[2025-09-02 09:46:09] [Rank 0] step:1781/10000 train_time:126494ms step_avg:71.02ms +[2025-09-02 09:46:09] [Rank 0] step:1781/10000 train_time:126494ms step_avg:71.02ms +[2025-09-02 09:46:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:46:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:46:23] [Rank 0] PRINT: step:1800/10000 val_loss:4.7533 svd_entropy: attn_qk:H=0.6791,top10E=0.37,eRank=125.0,q75/q25=38.47 attn_vo:H=0.7060,top10E=0.26,eRank=176.2,q75/q25=inf mlp_w1:H=0.6337,top10E=0.46,eRank=91.4,q75/q25=6.28 mlp_w2:H=0.7660,top10E=0.25,eRank=165.7,q75/q25=16.26 vo_prod:H=0.5642,top10E=0.37,eRank=59.0,q75/q25=inf train_time:128073ms step_avg:71.15ms +[2025-09-02 09:46:23] [Rank 0] PRINT: step:1800/10000 val_loss:4.7533 svd_entropy: attn_qk:H=0.6791,top10E=0.37,eRank=125.0,q75/q25=38.47 attn_vo:H=0.7060,top10E=0.26,eRank=176.2,q75/q25=inf mlp_w1:H=0.6337,top10E=0.46,eRank=91.4,q75/q25=6.28 mlp_w2:H=0.7660,top10E=0.25,eRank=165.7,q75/q25=16.26 vo_prod:H=0.5642,top10E=0.37,eRank=59.0,q75/q25=inf train_time:128073ms step_avg:71.15ms +[2025-09-02 09:46:23] [Rank 0] step:1801/10000 train_time:128083ms step_avg:71.12ms +[2025-09-02 09:46:23] [Rank 0] step:1801/10000 train_time:128083ms step_avg:71.12ms +[2025-09-02 09:46:24] [Rank 0] step:1821/10000 train_time:129398ms step_avg:71.06ms +[2025-09-02 09:46:24] [Rank 0] step:1821/10000 train_time:129398ms step_avg:71.06ms +[2025-09-02 09:46:26] [Rank 0] step:1841/10000 train_time:130830ms step_avg:71.06ms +[2025-09-02 09:46:26] [Rank 0] step:1841/10000 train_time:130830ms step_avg:71.06ms +[2025-09-02 09:46:27] [Rank 0] step:1861/10000 train_time:132263ms step_avg:71.07ms +[2025-09-02 09:46:27] [Rank 0] step:1861/10000 train_time:132263ms step_avg:71.07ms +[2025-09-02 09:46:28] [Rank 0] step:1881/10000 train_time:133696ms step_avg:71.08ms +[2025-09-02 09:46:28] [Rank 0] step:1881/10000 train_time:133696ms step_avg:71.08ms +[2025-09-02 09:46:30] [Rank 0] step:1901/10000 train_time:135131ms step_avg:71.08ms +[2025-09-02 09:46:30] [Rank 0] step:1901/10000 train_time:135131ms step_avg:71.08ms +[2025-09-02 09:46:31] [Rank 0] step:1921/10000 train_time:136565ms step_avg:71.09ms +[2025-09-02 09:46:31] [Rank 0] step:1921/10000 train_time:136565ms step_avg:71.09ms +[2025-09-02 09:46:33] [Rank 0] step:1941/10000 train_time:137999ms step_avg:71.10ms +[2025-09-02 09:46:33] [Rank 0] step:1941/10000 train_time:137999ms step_avg:71.10ms +[2025-09-02 09:46:34] [Rank 0] step:1961/10000 train_time:139433ms step_avg:71.10ms +[2025-09-02 09:46:34] [Rank 0] step:1961/10000 train_time:139433ms step_avg:71.10ms +[2025-09-02 09:46:36] [Rank 0] step:1981/10000 train_time:140867ms step_avg:71.11ms +[2025-09-02 09:46:36] [Rank 0] step:1981/10000 train_time:140867ms step_avg:71.11ms +[2025-09-02 09:46:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:46:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:46:49] [Rank 0] PRINT: step:2000/10000 val_loss:4.6720 svd_entropy: attn_qk:H=0.6869,top10E=0.36,eRank=128.9,q75/q25=44.46 attn_vo:H=0.7160,top10E=0.24,eRank=185.0,q75/q25=inf mlp_w1:H=0.6469,top10E=0.45,eRank=97.6,q75/q25=6.80 mlp_w2:H=0.7752,top10E=0.23,eRank=176.5,q75/q25=17.70 vo_prod:H=0.5742,top10E=0.36,eRank=63.2,q75/q25=inf train_time:142445ms step_avg:71.22ms +[2025-09-02 09:46:49] [Rank 0] PRINT: step:2000/10000 val_loss:4.6720 svd_entropy: attn_qk:H=0.6869,top10E=0.36,eRank=128.9,q75/q25=44.46 attn_vo:H=0.7160,top10E=0.24,eRank=185.0,q75/q25=inf mlp_w1:H=0.6469,top10E=0.45,eRank=97.6,q75/q25=6.80 mlp_w2:H=0.7752,top10E=0.23,eRank=176.5,q75/q25=17.70 vo_prod:H=0.5742,top10E=0.36,eRank=63.2,q75/q25=inf train_time:142445ms step_avg:71.22ms +[2025-09-02 09:46:49] [Rank 0] step:2001/10000 train_time:142456ms step_avg:71.19ms +[2025-09-02 09:46:49] [Rank 0] step:2001/10000 train_time:142456ms step_avg:71.19ms +[2025-09-02 09:46:50] [Rank 0] step:2021/10000 train_time:143772ms step_avg:71.14ms +[2025-09-02 09:46:50] [Rank 0] step:2021/10000 train_time:143772ms step_avg:71.14ms +[2025-09-02 09:46:52] [Rank 0] step:2041/10000 train_time:145325ms step_avg:71.20ms +[2025-09-02 09:46:52] [Rank 0] step:2041/10000 train_time:145325ms step_avg:71.20ms +[2025-09-02 09:46:53] [Rank 0] step:2061/10000 train_time:146759ms step_avg:71.21ms +[2025-09-02 09:46:53] [Rank 0] step:2061/10000 train_time:146759ms step_avg:71.21ms +[2025-09-02 09:46:55] [Rank 0] step:2081/10000 train_time:148193ms step_avg:71.21ms +[2025-09-02 09:46:55] [Rank 0] step:2081/10000 train_time:148193ms step_avg:71.21ms +[2025-09-02 09:46:56] [Rank 0] step:2101/10000 train_time:149628ms step_avg:71.22ms +[2025-09-02 09:46:56] [Rank 0] step:2101/10000 train_time:149628ms step_avg:71.22ms +[2025-09-02 09:46:58] [Rank 0] step:2121/10000 train_time:151063ms step_avg:71.22ms +[2025-09-02 09:46:58] [Rank 0] step:2121/10000 train_time:151063ms step_avg:71.22ms +[2025-09-02 09:46:59] [Rank 0] step:2141/10000 train_time:152498ms step_avg:71.23ms +[2025-09-02 09:46:59] [Rank 0] step:2141/10000 train_time:152498ms step_avg:71.23ms +[2025-09-02 09:47:00] [Rank 0] step:2161/10000 train_time:153934ms step_avg:71.23ms +[2025-09-02 09:47:00] [Rank 0] step:2161/10000 train_time:153934ms step_avg:71.23ms +[2025-09-02 09:47:02] [Rank 0] step:2181/10000 train_time:155370ms step_avg:71.24ms +[2025-09-02 09:47:02] [Rank 0] step:2181/10000 train_time:155370ms step_avg:71.24ms +[2025-09-02 09:47:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:47:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:47:15] [Rank 0] PRINT: step:2200/10000 val_loss:4.5927 svd_entropy: attn_qk:H=0.6937,top10E=0.35,eRank=132.5,q75/q25=50.12 attn_vo:H=0.7240,top10E=0.23,eRank=192.3,q75/q25=inf mlp_w1:H=0.6586,top10E=0.43,eRank=103.6,q75/q25=7.33 mlp_w2:H=0.7829,top10E=0.22,eRank=185.9,q75/q25=18.65 vo_prod:H=0.5825,top10E=0.34,eRank=67.0,q75/q25=inf train_time:156948ms step_avg:71.34ms +[2025-09-02 09:47:15] [Rank 0] PRINT: step:2200/10000 val_loss:4.5927 svd_entropy: attn_qk:H=0.6937,top10E=0.35,eRank=132.5,q75/q25=50.12 attn_vo:H=0.7240,top10E=0.23,eRank=192.3,q75/q25=inf mlp_w1:H=0.6586,top10E=0.43,eRank=103.6,q75/q25=7.33 mlp_w2:H=0.7829,top10E=0.22,eRank=185.9,q75/q25=18.65 vo_prod:H=0.5825,top10E=0.34,eRank=67.0,q75/q25=inf train_time:156948ms step_avg:71.34ms +[2025-09-02 09:47:15] [Rank 0] step:2201/10000 train_time:156959ms step_avg:71.31ms +[2025-09-02 09:47:15] [Rank 0] step:2201/10000 train_time:156959ms step_avg:71.31ms +[2025-09-02 09:47:17] [Rank 0] step:2221/10000 train_time:158262ms step_avg:71.26ms +[2025-09-02 09:47:17] [Rank 0] step:2221/10000 train_time:158262ms step_avg:71.26ms +[2025-09-02 09:47:18] [Rank 0] step:2241/10000 train_time:159727ms step_avg:71.27ms +[2025-09-02 09:47:18] [Rank 0] step:2241/10000 train_time:159727ms step_avg:71.27ms +[2025-09-02 09:47:20] [Rank 0] step:2261/10000 train_time:161205ms step_avg:71.30ms +[2025-09-02 09:47:20] [Rank 0] step:2261/10000 train_time:161205ms step_avg:71.30ms +[2025-09-02 09:47:21] [Rank 0] step:2281/10000 train_time:162683ms step_avg:71.32ms +[2025-09-02 09:47:21] [Rank 0] step:2281/10000 train_time:162683ms step_avg:71.32ms +[2025-09-02 09:47:23] [Rank 0] step:2301/10000 train_time:164161ms step_avg:71.34ms +[2025-09-02 09:47:23] [Rank 0] step:2301/10000 train_time:164161ms step_avg:71.34ms +[2025-09-02 09:47:24] [Rank 0] step:2321/10000 train_time:165640ms step_avg:71.37ms +[2025-09-02 09:47:24] [Rank 0] step:2321/10000 train_time:165640ms step_avg:71.37ms +[2025-09-02 09:47:25] [Rank 0] step:2341/10000 train_time:167117ms step_avg:71.39ms +[2025-09-02 09:47:25] [Rank 0] step:2341/10000 train_time:167117ms step_avg:71.39ms +[2025-09-02 09:47:27] [Rank 0] step:2361/10000 train_time:168597ms step_avg:71.41ms +[2025-09-02 09:47:27] [Rank 0] step:2361/10000 train_time:168597ms step_avg:71.41ms +[2025-09-02 09:47:28] [Rank 0] step:2381/10000 train_time:170076ms step_avg:71.43ms +[2025-09-02 09:47:28] [Rank 0] step:2381/10000 train_time:170076ms step_avg:71.43ms +[2025-09-02 09:47:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:47:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:47:42] [Rank 0] PRINT: step:2400/10000 val_loss:4.5050 svd_entropy: attn_qk:H=0.6993,top10E=0.34,eRank=135.6,q75/q25=55.22 attn_vo:H=0.7313,top10E=0.22,eRank=199.0,q75/q25=inf mlp_w1:H=0.6696,top10E=0.42,eRank=109.7,q75/q25=7.81 mlp_w2:H=0.7896,top10E=0.21,eRank=194.5,q75/q25=19.17 vo_prod:H=0.5905,top10E=0.33,eRank=71.0,q75/q25=inf train_time:171703ms step_avg:71.54ms +[2025-09-02 09:47:42] [Rank 0] PRINT: step:2400/10000 val_loss:4.5050 svd_entropy: attn_qk:H=0.6993,top10E=0.34,eRank=135.6,q75/q25=55.22 attn_vo:H=0.7313,top10E=0.22,eRank=199.0,q75/q25=inf mlp_w1:H=0.6696,top10E=0.42,eRank=109.7,q75/q25=7.81 mlp_w2:H=0.7896,top10E=0.21,eRank=194.5,q75/q25=19.17 vo_prod:H=0.5905,top10E=0.33,eRank=71.0,q75/q25=inf train_time:171703ms step_avg:71.54ms +[2025-09-02 09:47:42] [Rank 0] step:2401/10000 train_time:171713ms step_avg:71.52ms +[2025-09-02 09:47:42] [Rank 0] step:2401/10000 train_time:171713ms step_avg:71.52ms +[2025-09-02 09:47:43] [Rank 0] step:2421/10000 train_time:173057ms step_avg:71.48ms +[2025-09-02 09:47:43] [Rank 0] step:2421/10000 train_time:173057ms step_avg:71.48ms +[2025-09-02 09:47:45] [Rank 0] step:2441/10000 train_time:174535ms step_avg:71.50ms +[2025-09-02 09:47:45] [Rank 0] step:2441/10000 train_time:174535ms step_avg:71.50ms +[2025-09-02 09:47:46] [Rank 0] step:2461/10000 train_time:176014ms step_avg:71.52ms +[2025-09-02 09:47:46] [Rank 0] step:2461/10000 train_time:176014ms step_avg:71.52ms +[2025-09-02 09:47:48] [Rank 0] step:2481/10000 train_time:177491ms step_avg:71.54ms +[2025-09-02 09:47:48] [Rank 0] step:2481/10000 train_time:177491ms step_avg:71.54ms +[2025-09-02 09:47:49] [Rank 0] step:2501/10000 train_time:178974ms step_avg:71.56ms +[2025-09-02 09:47:49] [Rank 0] step:2501/10000 train_time:178974ms step_avg:71.56ms +[2025-09-02 09:47:50] [Rank 0] step:2521/10000 train_time:180453ms step_avg:71.58ms +[2025-09-02 09:47:50] [Rank 0] step:2521/10000 train_time:180453ms step_avg:71.58ms +[2025-09-02 09:47:52] [Rank 0] step:2541/10000 train_time:181933ms step_avg:71.60ms +[2025-09-02 09:47:52] [Rank 0] step:2541/10000 train_time:181933ms step_avg:71.60ms +[2025-09-02 09:47:53] [Rank 0] step:2561/10000 train_time:183412ms step_avg:71.62ms +[2025-09-02 09:47:53] [Rank 0] step:2561/10000 train_time:183412ms step_avg:71.62ms +[2025-09-02 09:47:55] [Rank 0] step:2581/10000 train_time:184894ms step_avg:71.64ms +[2025-09-02 09:47:55] [Rank 0] step:2581/10000 train_time:184894ms step_avg:71.64ms +[2025-09-02 09:47:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:47:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:48:08] [Rank 0] PRINT: step:2600/10000 val_loss:4.4436 svd_entropy: attn_qk:H=0.7050,top10E=0.33,eRank=138.9,q75/q25=59.83 attn_vo:H=0.7376,top10E=0.21,eRank=205.0,q75/q25=inf mlp_w1:H=0.6799,top10E=0.40,eRank=115.9,q75/q25=8.22 mlp_w2:H=0.7961,top10E=0.20,eRank=203.1,q75/q25=19.10 vo_prod:H=0.5980,top10E=0.32,eRank=74.7,q75/q25=inf train_time:186522ms step_avg:71.74ms +[2025-09-02 09:48:08] [Rank 0] PRINT: step:2600/10000 val_loss:4.4436 svd_entropy: attn_qk:H=0.7050,top10E=0.33,eRank=138.9,q75/q25=59.83 attn_vo:H=0.7376,top10E=0.21,eRank=205.0,q75/q25=inf mlp_w1:H=0.6799,top10E=0.40,eRank=115.9,q75/q25=8.22 mlp_w2:H=0.7961,top10E=0.20,eRank=203.1,q75/q25=19.10 vo_prod:H=0.5980,top10E=0.32,eRank=74.7,q75/q25=inf train_time:186522ms step_avg:71.74ms +[2025-09-02 09:48:08] [Rank 0] step:2601/10000 train_time:186533ms step_avg:71.72ms +[2025-09-02 09:48:08] [Rank 0] step:2601/10000 train_time:186533ms step_avg:71.72ms +[2025-09-02 09:48:10] [Rank 0] step:2621/10000 train_time:187864ms step_avg:71.68ms +[2025-09-02 09:48:10] [Rank 0] step:2621/10000 train_time:187864ms step_avg:71.68ms +[2025-09-02 09:48:11] [Rank 0] step:2641/10000 train_time:189342ms step_avg:71.69ms +[2025-09-02 09:48:11] [Rank 0] step:2641/10000 train_time:189342ms step_avg:71.69ms +[2025-09-02 09:48:13] [Rank 0] step:2661/10000 train_time:190819ms step_avg:71.71ms +[2025-09-02 09:48:13] [Rank 0] step:2661/10000 train_time:190819ms step_avg:71.71ms +[2025-09-02 09:48:14] [Rank 0] step:2681/10000 train_time:192298ms step_avg:71.73ms +[2025-09-02 09:48:14] [Rank 0] step:2681/10000 train_time:192298ms step_avg:71.73ms +[2025-09-02 09:48:16] [Rank 0] step:2701/10000 train_time:193775ms step_avg:71.74ms +[2025-09-02 09:48:16] [Rank 0] step:2701/10000 train_time:193775ms step_avg:71.74ms +[2025-09-02 09:48:17] [Rank 0] step:2721/10000 train_time:195254ms step_avg:71.76ms +[2025-09-02 09:48:17] [Rank 0] step:2721/10000 train_time:195254ms step_avg:71.76ms +[2025-09-02 09:48:18] [Rank 0] step:2741/10000 train_time:196733ms step_avg:71.77ms +[2025-09-02 09:48:18] [Rank 0] step:2741/10000 train_time:196733ms step_avg:71.77ms +[2025-09-02 09:48:20] [Rank 0] step:2761/10000 train_time:198213ms step_avg:71.79ms +[2025-09-02 09:48:20] [Rank 0] step:2761/10000 train_time:198213ms step_avg:71.79ms +[2025-09-02 09:48:21] [Rank 0] step:2781/10000 train_time:199692ms step_avg:71.81ms +[2025-09-02 09:48:21] [Rank 0] step:2781/10000 train_time:199692ms step_avg:71.81ms +[2025-09-02 09:48:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:48:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:48:35] [Rank 0] PRINT: step:2800/10000 val_loss:4.3976 svd_entropy: attn_qk:H=0.7104,top10E=0.32,eRank=142.1,q75/q25=64.73 attn_vo:H=0.7432,top10E=0.21,eRank=210.5,q75/q25=inf mlp_w1:H=0.6896,top10E=0.39,eRank=122.1,q75/q25=8.57 mlp_w2:H=0.8021,top10E=0.20,eRank=211.3,q75/q25=18.86 vo_prod:H=0.6048,top10E=0.30,eRank=78.3,q75/q25=inf train_time:201321ms step_avg:71.90ms +[2025-09-02 09:48:35] [Rank 0] PRINT: step:2800/10000 val_loss:4.3976 svd_entropy: attn_qk:H=0.7104,top10E=0.32,eRank=142.1,q75/q25=64.73 attn_vo:H=0.7432,top10E=0.21,eRank=210.5,q75/q25=inf mlp_w1:H=0.6896,top10E=0.39,eRank=122.1,q75/q25=8.57 mlp_w2:H=0.8021,top10E=0.20,eRank=211.3,q75/q25=18.86 vo_prod:H=0.6048,top10E=0.30,eRank=78.3,q75/q25=inf train_time:201321ms step_avg:71.90ms +[2025-09-02 09:48:35] [Rank 0] step:2801/10000 train_time:201331ms step_avg:71.88ms +[2025-09-02 09:48:35] [Rank 0] step:2801/10000 train_time:201331ms step_avg:71.88ms +[2025-09-02 09:48:36] [Rank 0] step:2821/10000 train_time:202666ms step_avg:71.84ms +[2025-09-02 09:48:36] [Rank 0] step:2821/10000 train_time:202666ms step_avg:71.84ms +[2025-09-02 09:48:38] [Rank 0] step:2841/10000 train_time:204142ms step_avg:71.86ms +[2025-09-02 09:48:38] [Rank 0] step:2841/10000 train_time:204142ms step_avg:71.86ms +[2025-09-02 09:48:39] [Rank 0] step:2861/10000 train_time:205619ms step_avg:71.87ms +[2025-09-02 09:48:39] [Rank 0] step:2861/10000 train_time:205619ms step_avg:71.87ms +[2025-09-02 09:48:41] [Rank 0] step:2881/10000 train_time:207095ms step_avg:71.88ms +[2025-09-02 09:48:41] [Rank 0] step:2881/10000 train_time:207095ms step_avg:71.88ms +[2025-09-02 09:48:42] [Rank 0] step:2901/10000 train_time:208572ms step_avg:71.90ms +[2025-09-02 09:48:42] [Rank 0] step:2901/10000 train_time:208572ms step_avg:71.90ms +[2025-09-02 09:48:44] [Rank 0] step:2921/10000 train_time:210051ms step_avg:71.91ms +[2025-09-02 09:48:44] [Rank 0] step:2921/10000 train_time:210051ms step_avg:71.91ms +[2025-09-02 09:48:45] [Rank 0] step:2941/10000 train_time:211529ms step_avg:71.92ms +[2025-09-02 09:48:45] [Rank 0] step:2941/10000 train_time:211529ms step_avg:71.92ms +[2025-09-02 09:48:47] [Rank 0] step:2961/10000 train_time:213008ms step_avg:71.94ms +[2025-09-02 09:48:47] [Rank 0] step:2961/10000 train_time:213008ms step_avg:71.94ms +[2025-09-02 09:48:48] [Rank 0] step:2981/10000 train_time:214493ms step_avg:71.95ms +[2025-09-02 09:48:48] [Rank 0] step:2981/10000 train_time:214493ms step_avg:71.95ms +[2025-09-02 09:48:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:48:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:49:01] [Rank 0] PRINT: step:3000/10000 val_loss:4.3521 svd_entropy: attn_qk:H=0.7150,top10E=0.32,eRank=144.9,q75/q25=68.41 attn_vo:H=0.7482,top10E=0.20,eRank=215.4,q75/q25=inf mlp_w1:H=0.6986,top10E=0.38,eRank=128.2,q75/q25=8.95 mlp_w2:H=0.8071,top10E=0.19,eRank=218.4,q75/q25=18.69 vo_prod:H=0.6107,top10E=0.29,eRank=81.7,q75/q25=inf train_time:216128ms step_avg:72.04ms +[2025-09-02 09:49:01] [Rank 0] PRINT: step:3000/10000 val_loss:4.3521 svd_entropy: attn_qk:H=0.7150,top10E=0.32,eRank=144.9,q75/q25=68.41 attn_vo:H=0.7482,top10E=0.20,eRank=215.4,q75/q25=inf mlp_w1:H=0.6986,top10E=0.38,eRank=128.2,q75/q25=8.95 mlp_w2:H=0.8071,top10E=0.19,eRank=218.4,q75/q25=18.69 vo_prod:H=0.6107,top10E=0.29,eRank=81.7,q75/q25=inf train_time:216128ms step_avg:72.04ms +[2025-09-02 09:49:01] [Rank 0] step:3001/10000 train_time:216138ms step_avg:72.02ms +[2025-09-02 09:49:01] [Rank 0] step:3001/10000 train_time:216138ms step_avg:72.02ms +[2025-09-02 09:49:03] [Rank 0] step:3021/10000 train_time:217494ms step_avg:71.99ms +[2025-09-02 09:49:03] [Rank 0] step:3021/10000 train_time:217494ms step_avg:71.99ms +[2025-09-02 09:49:04] [Rank 0] step:3041/10000 train_time:218977ms step_avg:72.01ms +[2025-09-02 09:49:04] [Rank 0] step:3041/10000 train_time:218977ms step_avg:72.01ms +[2025-09-02 09:49:06] [Rank 0] step:3061/10000 train_time:220462ms step_avg:72.02ms +[2025-09-02 09:49:06] [Rank 0] step:3061/10000 train_time:220462ms step_avg:72.02ms +[2025-09-02 09:49:07] [Rank 0] step:3081/10000 train_time:221947ms step_avg:72.04ms +[2025-09-02 09:49:07] [Rank 0] step:3081/10000 train_time:221947ms step_avg:72.04ms +[2025-09-02 09:49:09] [Rank 0] step:3101/10000 train_time:223434ms step_avg:72.05ms +[2025-09-02 09:49:09] [Rank 0] step:3101/10000 train_time:223434ms step_avg:72.05ms +[2025-09-02 09:49:10] [Rank 0] step:3121/10000 train_time:224920ms step_avg:72.07ms +[2025-09-02 09:49:10] [Rank 0] step:3121/10000 train_time:224920ms step_avg:72.07ms +[2025-09-02 09:49:12] [Rank 0] step:3141/10000 train_time:226406ms step_avg:72.08ms +[2025-09-02 09:49:12] [Rank 0] step:3141/10000 train_time:226406ms step_avg:72.08ms +[2025-09-02 09:49:13] [Rank 0] step:3161/10000 train_time:227893ms step_avg:72.10ms +[2025-09-02 09:49:13] [Rank 0] step:3161/10000 train_time:227893ms step_avg:72.10ms +[2025-09-02 09:49:15] [Rank 0] step:3181/10000 train_time:229380ms step_avg:72.11ms +[2025-09-02 09:49:15] [Rank 0] step:3181/10000 train_time:229380ms step_avg:72.11ms +[2025-09-02 09:49:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:49:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:49:28] [Rank 0] PRINT: step:3200/10000 val_loss:4.3174 svd_entropy: attn_qk:H=0.7194,top10E=0.31,eRank=147.8,q75/q25=71.54 attn_vo:H=0.7526,top10E=0.19,eRank=220.0,q75/q25=inf mlp_w1:H=0.7068,top10E=0.37,eRank=134.1,q75/q25=9.24 mlp_w2:H=0.8120,top10E=0.18,eRank=225.5,q75/q25=18.54 vo_prod:H=0.6159,top10E=0.29,eRank=84.8,q75/q25=inf train_time:231018ms step_avg:72.19ms +[2025-09-02 09:49:28] [Rank 0] PRINT: step:3200/10000 val_loss:4.3174 svd_entropy: attn_qk:H=0.7194,top10E=0.31,eRank=147.8,q75/q25=71.54 attn_vo:H=0.7526,top10E=0.19,eRank=220.0,q75/q25=inf mlp_w1:H=0.7068,top10E=0.37,eRank=134.1,q75/q25=9.24 mlp_w2:H=0.8120,top10E=0.18,eRank=225.5,q75/q25=18.54 vo_prod:H=0.6159,top10E=0.29,eRank=84.8,q75/q25=inf train_time:231018ms step_avg:72.19ms +[2025-09-02 09:49:28] [Rank 0] step:3201/10000 train_time:231029ms step_avg:72.17ms +[2025-09-02 09:49:28] [Rank 0] step:3201/10000 train_time:231029ms step_avg:72.17ms +[2025-09-02 09:49:29] [Rank 0] step:3221/10000 train_time:232385ms step_avg:72.15ms +[2025-09-02 09:49:29] [Rank 0] step:3221/10000 train_time:232385ms step_avg:72.15ms +[2025-09-02 09:49:31] [Rank 0] step:3241/10000 train_time:233872ms step_avg:72.16ms +[2025-09-02 09:49:31] [Rank 0] step:3241/10000 train_time:233872ms step_avg:72.16ms +[2025-09-02 09:49:32] [Rank 0] step:3261/10000 train_time:235359ms step_avg:72.17ms +[2025-09-02 09:49:32] [Rank 0] step:3261/10000 train_time:235359ms step_avg:72.17ms +[2025-09-02 09:49:34] [Rank 0] step:3281/10000 train_time:236847ms step_avg:72.19ms +[2025-09-02 09:49:34] [Rank 0] step:3281/10000 train_time:236847ms step_avg:72.19ms +[2025-09-02 09:49:35] [Rank 0] step:3301/10000 train_time:238334ms step_avg:72.20ms +[2025-09-02 09:49:35] [Rank 0] step:3301/10000 train_time:238334ms step_avg:72.20ms +[2025-09-02 09:49:37] [Rank 0] step:3321/10000 train_time:239820ms step_avg:72.21ms +[2025-09-02 09:49:37] [Rank 0] step:3321/10000 train_time:239820ms step_avg:72.21ms +[2025-09-02 09:49:38] [Rank 0] step:3341/10000 train_time:241309ms step_avg:72.23ms +[2025-09-02 09:49:38] [Rank 0] step:3341/10000 train_time:241309ms step_avg:72.23ms +[2025-09-02 09:49:40] [Rank 0] step:3361/10000 train_time:242797ms step_avg:72.24ms +[2025-09-02 09:49:40] [Rank 0] step:3361/10000 train_time:242797ms step_avg:72.24ms +[2025-09-02 09:49:41] [Rank 0] step:3381/10000 train_time:244285ms step_avg:72.25ms +[2025-09-02 09:49:41] [Rank 0] step:3381/10000 train_time:244285ms step_avg:72.25ms +[2025-09-02 09:49:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:49:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:49:54] [Rank 0] PRINT: step:3400/10000 val_loss:4.2712 svd_entropy: attn_qk:H=0.7239,top10E=0.31,eRank=150.7,q75/q25=74.62 attn_vo:H=0.7568,top10E=0.19,eRank=224.4,q75/q25=inf mlp_w1:H=0.7145,top10E=0.36,eRank=139.9,q75/q25=9.59 mlp_w2:H=0.8162,top10E=0.18,eRank=231.8,q75/q25=18.58 vo_prod:H=0.6213,top10E=0.28,eRank=88.1,q75/q25=inf train_time:245924ms step_avg:72.33ms +[2025-09-02 09:49:54] [Rank 0] PRINT: step:3400/10000 val_loss:4.2712 svd_entropy: attn_qk:H=0.7239,top10E=0.31,eRank=150.7,q75/q25=74.62 attn_vo:H=0.7568,top10E=0.19,eRank=224.4,q75/q25=inf mlp_w1:H=0.7145,top10E=0.36,eRank=139.9,q75/q25=9.59 mlp_w2:H=0.8162,top10E=0.18,eRank=231.8,q75/q25=18.58 vo_prod:H=0.6213,top10E=0.28,eRank=88.1,q75/q25=inf train_time:245924ms step_avg:72.33ms +[2025-09-02 09:49:54] [Rank 0] step:3401/10000 train_time:245934ms step_avg:72.31ms +[2025-09-02 09:49:54] [Rank 0] step:3401/10000 train_time:245934ms step_avg:72.31ms +[2025-09-02 09:49:56] [Rank 0] step:3421/10000 train_time:247275ms step_avg:72.28ms +[2025-09-02 09:49:56] [Rank 0] step:3421/10000 train_time:247275ms step_avg:72.28ms +[2025-09-02 09:49:57] [Rank 0] step:3441/10000 train_time:248759ms step_avg:72.29ms +[2025-09-02 09:49:57] [Rank 0] step:3441/10000 train_time:248759ms step_avg:72.29ms +[2025-09-02 09:49:59] [Rank 0] step:3461/10000 train_time:250245ms step_avg:72.30ms +[2025-09-02 09:49:59] [Rank 0] step:3461/10000 train_time:250245ms step_avg:72.30ms +[2025-09-02 09:50:00] [Rank 0] step:3481/10000 train_time:251731ms step_avg:72.32ms +[2025-09-02 09:50:00] [Rank 0] step:3481/10000 train_time:251731ms step_avg:72.32ms +[2025-09-02 09:50:02] [Rank 0] step:3501/10000 train_time:253218ms step_avg:72.33ms +[2025-09-02 09:50:02] [Rank 0] step:3501/10000 train_time:253218ms step_avg:72.33ms +[2025-09-02 09:50:03] [Rank 0] step:3521/10000 train_time:254706ms step_avg:72.34ms +[2025-09-02 09:50:03] [Rank 0] step:3521/10000 train_time:254706ms step_avg:72.34ms +[2025-09-02 09:50:05] [Rank 0] step:3541/10000 train_time:256192ms step_avg:72.35ms +[2025-09-02 09:50:05] [Rank 0] step:3541/10000 train_time:256192ms step_avg:72.35ms +[2025-09-02 09:50:06] [Rank 0] step:3561/10000 train_time:257679ms step_avg:72.36ms +[2025-09-02 09:50:06] [Rank 0] step:3561/10000 train_time:257679ms step_avg:72.36ms +[2025-09-02 09:50:08] [Rank 0] step:3581/10000 train_time:259165ms step_avg:72.37ms +[2025-09-02 09:50:08] [Rank 0] step:3581/10000 train_time:259165ms step_avg:72.37ms +[2025-09-02 09:50:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:50:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:50:21] [Rank 0] PRINT: step:3600/10000 val_loss:4.2582 svd_entropy: attn_qk:H=0.7277,top10E=0.30,eRank=153.4,q75/q25=77.41 attn_vo:H=0.7605,top10E=0.18,eRank=228.5,q75/q25=inf mlp_w1:H=0.7214,top10E=0.35,eRank=145.5,q75/q25=9.94 mlp_w2:H=0.8197,top10E=0.17,eRank=237.3,q75/q25=18.67 vo_prod:H=0.6259,top10E=0.27,eRank=91.0,q75/q25=inf train_time:260803ms step_avg:72.45ms +[2025-09-02 09:50:21] [Rank 0] PRINT: step:3600/10000 val_loss:4.2582 svd_entropy: attn_qk:H=0.7277,top10E=0.30,eRank=153.4,q75/q25=77.41 attn_vo:H=0.7605,top10E=0.18,eRank=228.5,q75/q25=inf mlp_w1:H=0.7214,top10E=0.35,eRank=145.5,q75/q25=9.94 mlp_w2:H=0.8197,top10E=0.17,eRank=237.3,q75/q25=18.67 vo_prod:H=0.6259,top10E=0.27,eRank=91.0,q75/q25=inf train_time:260803ms step_avg:72.45ms +[2025-09-02 09:50:21] [Rank 0] step:3601/10000 train_time:260813ms step_avg:72.43ms +[2025-09-02 09:50:21] [Rank 0] step:3601/10000 train_time:260813ms step_avg:72.43ms +[2025-09-02 09:50:23] [Rank 0] step:3621/10000 train_time:262166ms step_avg:72.40ms +[2025-09-02 09:50:23] [Rank 0] step:3621/10000 train_time:262166ms step_avg:72.40ms +[2025-09-02 09:50:24] [Rank 0] step:3641/10000 train_time:263649ms step_avg:72.41ms +[2025-09-02 09:50:24] [Rank 0] step:3641/10000 train_time:263649ms step_avg:72.41ms +[2025-09-02 09:50:25] [Rank 0] step:3661/10000 train_time:265135ms step_avg:72.42ms +[2025-09-02 09:50:25] [Rank 0] step:3661/10000 train_time:265135ms step_avg:72.42ms +[2025-09-02 09:50:27] [Rank 0] step:3681/10000 train_time:266621ms step_avg:72.43ms +[2025-09-02 09:50:27] [Rank 0] step:3681/10000 train_time:266621ms step_avg:72.43ms +[2025-09-02 09:50:28] [Rank 0] step:3701/10000 train_time:268107ms step_avg:72.44ms +[2025-09-02 09:50:28] [Rank 0] step:3701/10000 train_time:268107ms step_avg:72.44ms +[2025-09-02 09:50:30] [Rank 0] step:3721/10000 train_time:269620ms step_avg:72.46ms +[2025-09-02 09:50:30] [Rank 0] step:3721/10000 train_time:269620ms step_avg:72.46ms +[2025-09-02 09:50:31] [Rank 0] step:3741/10000 train_time:271141ms step_avg:72.48ms +[2025-09-02 09:50:31] [Rank 0] step:3741/10000 train_time:271141ms step_avg:72.48ms +[2025-09-02 09:50:33] [Rank 0] step:3761/10000 train_time:272667ms step_avg:72.50ms +[2025-09-02 09:50:33] [Rank 0] step:3761/10000 train_time:272667ms step_avg:72.50ms +[2025-09-02 09:50:35] [Rank 0] step:3781/10000 train_time:274197ms step_avg:72.52ms +[2025-09-02 09:50:35] [Rank 0] step:3781/10000 train_time:274197ms step_avg:72.52ms +[2025-09-02 09:50:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:50:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:50:48] [Rank 0] PRINT: step:3800/10000 val_loss:4.2010 svd_entropy: attn_qk:H=0.7310,top10E=0.30,eRank=155.8,q75/q25=79.16 attn_vo:H=0.7639,top10E=0.18,eRank=232.3,q75/q25=inf mlp_w1:H=0.7281,top10E=0.34,eRank=151.0,q75/q25=10.33 mlp_w2:H=0.8228,top10E=0.17,eRank=242.2,q75/q25=18.92 vo_prod:H=0.6302,top10E=0.27,eRank=93.9,q75/q25=inf train_time:275880ms step_avg:72.60ms +[2025-09-02 09:50:48] [Rank 0] PRINT: step:3800/10000 val_loss:4.2010 svd_entropy: attn_qk:H=0.7310,top10E=0.30,eRank=155.8,q75/q25=79.16 attn_vo:H=0.7639,top10E=0.18,eRank=232.3,q75/q25=inf mlp_w1:H=0.7281,top10E=0.34,eRank=151.0,q75/q25=10.33 mlp_w2:H=0.8228,top10E=0.17,eRank=242.2,q75/q25=18.92 vo_prod:H=0.6302,top10E=0.27,eRank=93.9,q75/q25=inf train_time:275880ms step_avg:72.60ms +[2025-09-02 09:50:48] [Rank 0] step:3801/10000 train_time:275891ms step_avg:72.58ms +[2025-09-02 09:50:48] [Rank 0] step:3801/10000 train_time:275891ms step_avg:72.58ms +[2025-09-02 09:50:49] [Rank 0] step:3821/10000 train_time:277285ms step_avg:72.57ms +[2025-09-02 09:50:49] [Rank 0] step:3821/10000 train_time:277285ms step_avg:72.57ms +[2025-09-02 09:50:51] [Rank 0] step:3841/10000 train_time:278810ms step_avg:72.59ms +[2025-09-02 09:50:51] [Rank 0] step:3841/10000 train_time:278810ms step_avg:72.59ms +[2025-09-02 09:50:52] [Rank 0] step:3861/10000 train_time:280334ms step_avg:72.61ms +[2025-09-02 09:50:52] [Rank 0] step:3861/10000 train_time:280334ms step_avg:72.61ms +[2025-09-02 09:50:54] [Rank 0] step:3881/10000 train_time:281856ms step_avg:72.62ms +[2025-09-02 09:50:54] [Rank 0] step:3881/10000 train_time:281856ms step_avg:72.62ms +[2025-09-02 09:50:55] [Rank 0] step:3901/10000 train_time:283380ms step_avg:72.64ms +[2025-09-02 09:50:55] [Rank 0] step:3901/10000 train_time:283380ms step_avg:72.64ms +[2025-09-02 09:50:57] [Rank 0] step:3921/10000 train_time:284902ms step_avg:72.66ms +[2025-09-02 09:50:57] [Rank 0] step:3921/10000 train_time:284902ms step_avg:72.66ms +[2025-09-02 09:50:58] [Rank 0] step:3941/10000 train_time:286425ms step_avg:72.68ms +[2025-09-02 09:50:58] [Rank 0] step:3941/10000 train_time:286425ms step_avg:72.68ms +[2025-09-02 09:51:00] [Rank 0] step:3961/10000 train_time:287946ms step_avg:72.70ms +[2025-09-02 09:51:00] [Rank 0] step:3961/10000 train_time:287946ms step_avg:72.70ms +[2025-09-02 09:51:02] [Rank 0] step:3981/10000 train_time:289469ms step_avg:72.71ms +[2025-09-02 09:51:02] [Rank 0] step:3981/10000 train_time:289469ms step_avg:72.71ms +[2025-09-02 09:51:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:51:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:51:15] [Rank 0] PRINT: step:4000/10000 val_loss:4.1730 svd_entropy: attn_qk:H=0.7345,top10E=0.29,eRank=158.3,q75/q25=81.09 attn_vo:H=0.7670,top10E=0.17,eRank=235.7,q75/q25=inf mlp_w1:H=0.7341,top10E=0.33,eRank=156.5,q75/q25=10.60 mlp_w2:H=0.8261,top10E=0.17,eRank=247.5,q75/q25=18.92 vo_prod:H=0.6341,top10E=0.26,eRank=96.6,q75/q25=inf train_time:291143ms step_avg:72.79ms +[2025-09-02 09:51:15] [Rank 0] PRINT: step:4000/10000 val_loss:4.1730 svd_entropy: attn_qk:H=0.7345,top10E=0.29,eRank=158.3,q75/q25=81.09 attn_vo:H=0.7670,top10E=0.17,eRank=235.7,q75/q25=inf mlp_w1:H=0.7341,top10E=0.33,eRank=156.5,q75/q25=10.60 mlp_w2:H=0.8261,top10E=0.17,eRank=247.5,q75/q25=18.92 vo_prod:H=0.6341,top10E=0.26,eRank=96.6,q75/q25=inf train_time:291143ms step_avg:72.79ms +[2025-09-02 09:51:15] [Rank 0] step:4001/10000 train_time:291154ms step_avg:72.77ms +[2025-09-02 09:51:15] [Rank 0] step:4001/10000 train_time:291154ms step_avg:72.77ms +[2025-09-02 09:51:16] [Rank 0] step:4021/10000 train_time:292533ms step_avg:72.75ms +[2025-09-02 09:51:16] [Rank 0] step:4021/10000 train_time:292533ms step_avg:72.75ms +[2025-09-02 09:51:18] [Rank 0] step:4041/10000 train_time:294055ms step_avg:72.77ms +[2025-09-02 09:51:18] [Rank 0] step:4041/10000 train_time:294055ms step_avg:72.77ms +[2025-09-02 09:51:19] [Rank 0] step:4061/10000 train_time:295578ms step_avg:72.78ms +[2025-09-02 09:51:19] [Rank 0] step:4061/10000 train_time:295578ms step_avg:72.78ms +[2025-09-02 09:51:21] [Rank 0] step:4081/10000 train_time:297208ms step_avg:72.83ms +[2025-09-02 09:51:21] [Rank 0] step:4081/10000 train_time:297208ms step_avg:72.83ms +[2025-09-02 09:51:22] [Rank 0] step:4101/10000 train_time:298729ms step_avg:72.84ms +[2025-09-02 09:51:22] [Rank 0] step:4101/10000 train_time:298729ms step_avg:72.84ms +[2025-09-02 09:51:24] [Rank 0] step:4121/10000 train_time:300252ms step_avg:72.86ms +[2025-09-02 09:51:24] [Rank 0] step:4121/10000 train_time:300252ms step_avg:72.86ms +[2025-09-02 09:51:26] [Rank 0] step:4141/10000 train_time:301773ms step_avg:72.87ms +[2025-09-02 09:51:26] [Rank 0] step:4141/10000 train_time:301773ms step_avg:72.87ms +[2025-09-02 09:51:27] [Rank 0] step:4161/10000 train_time:303297ms step_avg:72.89ms +[2025-09-02 09:51:27] [Rank 0] step:4161/10000 train_time:303297ms step_avg:72.89ms +[2025-09-02 09:51:29] [Rank 0] step:4181/10000 train_time:304822ms step_avg:72.91ms +[2025-09-02 09:51:29] [Rank 0] step:4181/10000 train_time:304822ms step_avg:72.91ms +[2025-09-02 09:51:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:51:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:51:42] [Rank 0] PRINT: step:4200/10000 val_loss:4.1512 svd_entropy: attn_qk:H=0.7377,top10E=0.29,eRank=160.6,q75/q25=82.28 attn_vo:H=0.7699,top10E=0.17,eRank=239.1,q75/q25=inf mlp_w1:H=0.7395,top10E=0.32,eRank=161.5,q75/q25=11.01 mlp_w2:H=0.8285,top10E=0.16,eRank=251.6,q75/q25=19.19 vo_prod:H=0.6377,top10E=0.25,eRank=99.1,q75/q25=inf train_time:306500ms step_avg:72.98ms +[2025-09-02 09:51:42] [Rank 0] PRINT: step:4200/10000 val_loss:4.1512 svd_entropy: attn_qk:H=0.7377,top10E=0.29,eRank=160.6,q75/q25=82.28 attn_vo:H=0.7699,top10E=0.17,eRank=239.1,q75/q25=inf mlp_w1:H=0.7395,top10E=0.32,eRank=161.5,q75/q25=11.01 mlp_w2:H=0.8285,top10E=0.16,eRank=251.6,q75/q25=19.19 vo_prod:H=0.6377,top10E=0.25,eRank=99.1,q75/q25=inf train_time:306500ms step_avg:72.98ms +[2025-09-02 09:51:42] [Rank 0] step:4201/10000 train_time:306511ms step_avg:72.96ms +[2025-09-02 09:51:42] [Rank 0] step:4201/10000 train_time:306511ms step_avg:72.96ms +[2025-09-02 09:51:43] [Rank 0] step:4221/10000 train_time:307892ms step_avg:72.94ms +[2025-09-02 09:51:43] [Rank 0] step:4221/10000 train_time:307892ms step_avg:72.94ms +[2025-09-02 09:51:45] [Rank 0] step:4241/10000 train_time:309416ms step_avg:72.96ms +[2025-09-02 09:51:45] [Rank 0] step:4241/10000 train_time:309416ms step_avg:72.96ms +[2025-09-02 09:51:46] [Rank 0] step:4261/10000 train_time:310938ms step_avg:72.97ms +[2025-09-02 09:51:46] [Rank 0] step:4261/10000 train_time:310938ms step_avg:72.97ms +[2025-09-02 09:51:48] [Rank 0] step:4281/10000 train_time:312460ms step_avg:72.99ms +[2025-09-02 09:51:48] [Rank 0] step:4281/10000 train_time:312460ms step_avg:72.99ms +[2025-09-02 09:51:49] [Rank 0] step:4301/10000 train_time:313984ms step_avg:73.00ms +[2025-09-02 09:51:49] [Rank 0] step:4301/10000 train_time:313984ms step_avg:73.00ms +[2025-09-02 09:51:51] [Rank 0] step:4321/10000 train_time:315511ms step_avg:73.02ms +[2025-09-02 09:51:51] [Rank 0] step:4321/10000 train_time:315511ms step_avg:73.02ms +[2025-09-02 09:51:52] [Rank 0] step:4341/10000 train_time:317032ms step_avg:73.03ms +[2025-09-02 09:51:52] [Rank 0] step:4341/10000 train_time:317032ms step_avg:73.03ms +[2025-09-02 09:51:54] [Rank 0] step:4361/10000 train_time:318557ms step_avg:73.05ms +[2025-09-02 09:51:54] [Rank 0] step:4361/10000 train_time:318557ms step_avg:73.05ms +[2025-09-02 09:51:56] [Rank 0] step:4381/10000 train_time:320082ms step_avg:73.06ms +[2025-09-02 09:51:56] [Rank 0] step:4381/10000 train_time:320082ms step_avg:73.06ms +[2025-09-02 09:51:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:51:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:52:09] [Rank 0] PRINT: step:4400/10000 val_loss:4.1411 svd_entropy: attn_qk:H=0.7407,top10E=0.28,eRank=162.9,q75/q25=83.04 attn_vo:H=0.7725,top10E=0.16,eRank=242.2,q75/q25=inf mlp_w1:H=0.7445,top10E=0.32,eRank=166.2,q75/q25=11.44 mlp_w2:H=0.8305,top10E=0.16,eRank=255.0,q75/q25=19.66 vo_prod:H=0.6411,top10E=0.25,eRank=101.6,q75/q25=inf train_time:321758ms step_avg:73.13ms +[2025-09-02 09:52:09] [Rank 0] PRINT: step:4400/10000 val_loss:4.1411 svd_entropy: attn_qk:H=0.7407,top10E=0.28,eRank=162.9,q75/q25=83.04 attn_vo:H=0.7725,top10E=0.16,eRank=242.2,q75/q25=inf mlp_w1:H=0.7445,top10E=0.32,eRank=166.2,q75/q25=11.44 mlp_w2:H=0.8305,top10E=0.16,eRank=255.0,q75/q25=19.66 vo_prod:H=0.6411,top10E=0.25,eRank=101.6,q75/q25=inf train_time:321758ms step_avg:73.13ms +[2025-09-02 09:52:09] [Rank 0] step:4401/10000 train_time:321769ms step_avg:73.11ms +[2025-09-02 09:52:09] [Rank 0] step:4401/10000 train_time:321769ms step_avg:73.11ms +[2025-09-02 09:52:10] [Rank 0] step:4421/10000 train_time:323169ms step_avg:73.10ms +[2025-09-02 09:52:10] [Rank 0] step:4421/10000 train_time:323169ms step_avg:73.10ms +[2025-09-02 09:52:12] [Rank 0] step:4441/10000 train_time:324689ms step_avg:73.11ms +[2025-09-02 09:52:12] [Rank 0] step:4441/10000 train_time:324689ms step_avg:73.11ms +[2025-09-02 09:52:13] [Rank 0] step:4461/10000 train_time:326217ms step_avg:73.13ms +[2025-09-02 09:52:13] [Rank 0] step:4461/10000 train_time:326217ms step_avg:73.13ms +[2025-09-02 09:52:15] [Rank 0] step:4481/10000 train_time:327745ms step_avg:73.14ms +[2025-09-02 09:52:15] [Rank 0] step:4481/10000 train_time:327745ms step_avg:73.14ms +[2025-09-02 09:52:16] [Rank 0] step:4501/10000 train_time:329273ms step_avg:73.16ms +[2025-09-02 09:52:16] [Rank 0] step:4501/10000 train_time:329273ms step_avg:73.16ms +[2025-09-02 09:52:18] [Rank 0] step:4521/10000 train_time:330797ms step_avg:73.17ms +[2025-09-02 09:52:18] [Rank 0] step:4521/10000 train_time:330797ms step_avg:73.17ms +[2025-09-02 09:52:19] [Rank 0] step:4541/10000 train_time:332324ms step_avg:73.18ms +[2025-09-02 09:52:19] [Rank 0] step:4541/10000 train_time:332324ms step_avg:73.18ms +[2025-09-02 09:52:21] [Rank 0] step:4561/10000 train_time:333852ms step_avg:73.20ms +[2025-09-02 09:52:21] [Rank 0] step:4561/10000 train_time:333852ms step_avg:73.20ms +[2025-09-02 09:52:23] [Rank 0] step:4581/10000 train_time:335381ms step_avg:73.21ms +[2025-09-02 09:52:23] [Rank 0] step:4581/10000 train_time:335381ms step_avg:73.21ms +[2025-09-02 09:52:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:52:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:52:36] [Rank 0] PRINT: step:4600/10000 val_loss:4.0967 svd_entropy: attn_qk:H=0.7436,top10E=0.28,eRank=165.1,q75/q25=84.74 attn_vo:H=0.7751,top10E=0.16,eRank=245.3,q75/q25=inf mlp_w1:H=0.7493,top10E=0.31,eRank=170.8,q75/q25=11.87 mlp_w2:H=0.8325,top10E=0.16,eRank=258.5,q75/q25=19.98 vo_prod:H=0.6445,top10E=0.25,eRank=104.1,q75/q25=inf train_time:337063ms step_avg:73.27ms +[2025-09-02 09:52:36] [Rank 0] PRINT: step:4600/10000 val_loss:4.0967 svd_entropy: attn_qk:H=0.7436,top10E=0.28,eRank=165.1,q75/q25=84.74 attn_vo:H=0.7751,top10E=0.16,eRank=245.3,q75/q25=inf mlp_w1:H=0.7493,top10E=0.31,eRank=170.8,q75/q25=11.87 mlp_w2:H=0.8325,top10E=0.16,eRank=258.5,q75/q25=19.98 vo_prod:H=0.6445,top10E=0.25,eRank=104.1,q75/q25=inf train_time:337063ms step_avg:73.27ms +[2025-09-02 09:52:36] [Rank 0] step:4601/10000 train_time:337074ms step_avg:73.26ms +[2025-09-02 09:52:36] [Rank 0] step:4601/10000 train_time:337074ms step_avg:73.26ms +[2025-09-02 09:52:37] [Rank 0] step:4621/10000 train_time:338463ms step_avg:73.24ms +[2025-09-02 09:52:37] [Rank 0] step:4621/10000 train_time:338463ms step_avg:73.24ms +[2025-09-02 09:52:39] [Rank 0] step:4641/10000 train_time:339989ms step_avg:73.26ms +[2025-09-02 09:52:39] [Rank 0] step:4641/10000 train_time:339989ms step_avg:73.26ms +[2025-09-02 09:52:40] [Rank 0] step:4661/10000 train_time:341517ms step_avg:73.27ms +[2025-09-02 09:52:40] [Rank 0] step:4661/10000 train_time:341517ms step_avg:73.27ms +[2025-09-02 09:52:42] [Rank 0] step:4681/10000 train_time:343044ms step_avg:73.28ms +[2025-09-02 09:52:42] [Rank 0] step:4681/10000 train_time:343044ms step_avg:73.28ms +[2025-09-02 09:52:43] [Rank 0] step:4701/10000 train_time:344571ms step_avg:73.30ms +[2025-09-02 09:52:43] [Rank 0] step:4701/10000 train_time:344571ms step_avg:73.30ms +[2025-09-02 09:52:45] [Rank 0] step:4721/10000 train_time:346098ms step_avg:73.31ms +[2025-09-02 09:52:45] [Rank 0] step:4721/10000 train_time:346098ms step_avg:73.31ms +[2025-09-02 09:52:46] [Rank 0] step:4741/10000 train_time:347629ms step_avg:73.32ms +[2025-09-02 09:52:46] [Rank 0] step:4741/10000 train_time:347629ms step_avg:73.32ms +[2025-09-02 09:52:48] [Rank 0] step:4761/10000 train_time:349156ms step_avg:73.34ms +[2025-09-02 09:52:48] [Rank 0] step:4761/10000 train_time:349156ms step_avg:73.34ms +[2025-09-02 09:52:50] [Rank 0] step:4781/10000 train_time:350685ms step_avg:73.35ms +[2025-09-02 09:52:50] [Rank 0] step:4781/10000 train_time:350685ms step_avg:73.35ms +[2025-09-02 09:52:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:52:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:53:03] [Rank 0] PRINT: step:4800/10000 val_loss:4.0827 svd_entropy: attn_qk:H=0.7463,top10E=0.27,eRank=167.3,q75/q25=85.72 attn_vo:H=0.7775,top10E=0.16,eRank=248.3,q75/q25=inf mlp_w1:H=0.7536,top10E=0.30,eRank=175.1,q75/q25=12.29 mlp_w2:H=0.8343,top10E=0.16,eRank=261.6,q75/q25=20.37 vo_prod:H=0.6477,top10E=0.24,eRank=106.5,q75/q25=inf train_time:352370ms step_avg:73.41ms +[2025-09-02 09:53:03] [Rank 0] PRINT: step:4800/10000 val_loss:4.0827 svd_entropy: attn_qk:H=0.7463,top10E=0.27,eRank=167.3,q75/q25=85.72 attn_vo:H=0.7775,top10E=0.16,eRank=248.3,q75/q25=inf mlp_w1:H=0.7536,top10E=0.30,eRank=175.1,q75/q25=12.29 mlp_w2:H=0.8343,top10E=0.16,eRank=261.6,q75/q25=20.37 vo_prod:H=0.6477,top10E=0.24,eRank=106.5,q75/q25=inf train_time:352370ms step_avg:73.41ms +[2025-09-02 09:53:03] [Rank 0] step:4801/10000 train_time:352382ms step_avg:73.40ms +[2025-09-02 09:53:03] [Rank 0] step:4801/10000 train_time:352382ms step_avg:73.40ms +[2025-09-02 09:53:04] [Rank 0] step:4821/10000 train_time:353766ms step_avg:73.38ms +[2025-09-02 09:53:04] [Rank 0] step:4821/10000 train_time:353766ms step_avg:73.38ms +[2025-09-02 09:53:06] [Rank 0] step:4841/10000 train_time:355292ms step_avg:73.39ms +[2025-09-02 09:53:06] [Rank 0] step:4841/10000 train_time:355292ms step_avg:73.39ms +[2025-09-02 09:53:07] [Rank 0] step:4861/10000 train_time:356823ms step_avg:73.41ms +[2025-09-02 09:53:07] [Rank 0] step:4861/10000 train_time:356823ms step_avg:73.41ms +[2025-09-02 09:53:09] [Rank 0] step:4881/10000 train_time:358350ms step_avg:73.42ms +[2025-09-02 09:53:09] [Rank 0] step:4881/10000 train_time:358350ms step_avg:73.42ms +[2025-09-02 09:53:10] [Rank 0] step:4901/10000 train_time:359876ms step_avg:73.43ms +[2025-09-02 09:53:10] [Rank 0] step:4901/10000 train_time:359876ms step_avg:73.43ms +[2025-09-02 09:53:12] [Rank 0] step:4921/10000 train_time:361406ms step_avg:73.44ms +[2025-09-02 09:53:12] [Rank 0] step:4921/10000 train_time:361406ms step_avg:73.44ms +[2025-09-02 09:53:13] [Rank 0] step:4941/10000 train_time:362936ms step_avg:73.45ms +[2025-09-02 09:53:13] [Rank 0] step:4941/10000 train_time:362936ms step_avg:73.45ms +[2025-09-02 09:53:15] [Rank 0] step:4961/10000 train_time:364464ms step_avg:73.47ms +[2025-09-02 09:53:15] [Rank 0] step:4961/10000 train_time:364464ms step_avg:73.47ms +[2025-09-02 09:53:17] [Rank 0] step:4981/10000 train_time:365994ms step_avg:73.48ms +[2025-09-02 09:53:17] [Rank 0] step:4981/10000 train_time:365994ms step_avg:73.48ms +[2025-09-02 09:53:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:53:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:53:30] [Rank 0] PRINT: step:5000/10000 val_loss:4.0621 svd_entropy: attn_qk:H=0.7489,top10E=0.27,eRank=169.4,q75/q25=86.75 attn_vo:H=0.7797,top10E=0.16,eRank=251.0,q75/q25=inf mlp_w1:H=0.7575,top10E=0.30,eRank=179.1,q75/q25=12.80 mlp_w2:H=0.8359,top10E=0.15,eRank=264.5,q75/q25=20.82 vo_prod:H=0.6506,top10E=0.24,eRank=108.7,q75/q25=inf train_time:367677ms step_avg:73.54ms +[2025-09-02 09:53:30] [Rank 0] PRINT: step:5000/10000 val_loss:4.0621 svd_entropy: attn_qk:H=0.7489,top10E=0.27,eRank=169.4,q75/q25=86.75 attn_vo:H=0.7797,top10E=0.16,eRank=251.0,q75/q25=inf mlp_w1:H=0.7575,top10E=0.30,eRank=179.1,q75/q25=12.80 mlp_w2:H=0.8359,top10E=0.15,eRank=264.5,q75/q25=20.82 vo_prod:H=0.6506,top10E=0.24,eRank=108.7,q75/q25=inf train_time:367677ms step_avg:73.54ms +[2025-09-02 09:53:30] [Rank 0] step:5001/10000 train_time:367688ms step_avg:73.52ms +[2025-09-02 09:53:30] [Rank 0] step:5001/10000 train_time:367688ms step_avg:73.52ms +[2025-09-02 09:53:31] [Rank 0] step:5021/10000 train_time:369069ms step_avg:73.51ms +[2025-09-02 09:53:31] [Rank 0] step:5021/10000 train_time:369069ms step_avg:73.51ms +[2025-09-02 09:53:33] [Rank 0] step:5041/10000 train_time:370599ms step_avg:73.52ms +[2025-09-02 09:53:33] [Rank 0] step:5041/10000 train_time:370599ms step_avg:73.52ms +[2025-09-02 09:53:34] [Rank 0] step:5061/10000 train_time:372127ms step_avg:73.53ms +[2025-09-02 09:53:34] [Rank 0] step:5061/10000 train_time:372127ms step_avg:73.53ms +[2025-09-02 09:53:36] [Rank 0] step:5081/10000 train_time:373655ms step_avg:73.54ms +[2025-09-02 09:53:36] [Rank 0] step:5081/10000 train_time:373655ms step_avg:73.54ms +[2025-09-02 09:53:37] [Rank 0] step:5101/10000 train_time:375184ms step_avg:73.55ms +[2025-09-02 09:53:37] [Rank 0] step:5101/10000 train_time:375184ms step_avg:73.55ms +[2025-09-02 09:53:39] [Rank 0] step:5121/10000 train_time:376714ms step_avg:73.56ms +[2025-09-02 09:53:39] [Rank 0] step:5121/10000 train_time:376714ms step_avg:73.56ms +[2025-09-02 09:53:41] [Rank 0] step:5141/10000 train_time:378255ms step_avg:73.58ms +[2025-09-02 09:53:41] [Rank 0] step:5141/10000 train_time:378255ms step_avg:73.58ms +[2025-09-02 09:53:42] [Rank 0] step:5161/10000 train_time:379783ms step_avg:73.59ms +[2025-09-02 09:53:42] [Rank 0] step:5161/10000 train_time:379783ms step_avg:73.59ms +[2025-09-02 09:53:44] [Rank 0] step:5181/10000 train_time:381316ms step_avg:73.60ms +[2025-09-02 09:53:44] [Rank 0] step:5181/10000 train_time:381316ms step_avg:73.60ms +[2025-09-02 09:53:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:53:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:53:57] [Rank 0] PRINT: step:5200/10000 val_loss:4.0404 svd_entropy: attn_qk:H=0.7513,top10E=0.27,eRank=171.4,q75/q25=86.80 attn_vo:H=0.7818,top10E=0.15,eRank=253.6,q75/q25=inf mlp_w1:H=0.7611,top10E=0.29,eRank=182.9,q75/q25=13.22 mlp_w2:H=0.8372,top10E=0.15,eRank=267.0,q75/q25=21.39 vo_prod:H=0.6533,top10E=0.23,eRank=110.8,q75/q25=inf train_time:383024ms step_avg:73.66ms +[2025-09-02 09:53:57] [Rank 0] PRINT: step:5200/10000 val_loss:4.0404 svd_entropy: attn_qk:H=0.7513,top10E=0.27,eRank=171.4,q75/q25=86.80 attn_vo:H=0.7818,top10E=0.15,eRank=253.6,q75/q25=inf mlp_w1:H=0.7611,top10E=0.29,eRank=182.9,q75/q25=13.22 mlp_w2:H=0.8372,top10E=0.15,eRank=267.0,q75/q25=21.39 vo_prod:H=0.6533,top10E=0.23,eRank=110.8,q75/q25=inf train_time:383024ms step_avg:73.66ms +[2025-09-02 09:53:57] [Rank 0] step:5201/10000 train_time:383035ms step_avg:73.65ms +[2025-09-02 09:53:57] [Rank 0] step:5201/10000 train_time:383035ms step_avg:73.65ms +[2025-09-02 09:53:58] [Rank 0] step:5221/10000 train_time:384448ms step_avg:73.63ms +[2025-09-02 09:53:58] [Rank 0] step:5221/10000 train_time:384448ms step_avg:73.63ms +[2025-09-02 09:54:00] [Rank 0] step:5241/10000 train_time:386006ms step_avg:73.65ms +[2025-09-02 09:54:00] [Rank 0] step:5241/10000 train_time:386006ms step_avg:73.65ms +[2025-09-02 09:54:02] [Rank 0] step:5261/10000 train_time:387568ms step_avg:73.67ms +[2025-09-02 09:54:02] [Rank 0] step:5261/10000 train_time:387568ms step_avg:73.67ms +[2025-09-02 09:54:03] [Rank 0] step:5281/10000 train_time:389131ms step_avg:73.69ms +[2025-09-02 09:54:03] [Rank 0] step:5281/10000 train_time:389131ms step_avg:73.69ms +[2025-09-02 09:54:05] [Rank 0] step:5301/10000 train_time:390701ms step_avg:73.70ms +[2025-09-02 09:54:05] [Rank 0] step:5301/10000 train_time:390701ms step_avg:73.70ms +[2025-09-02 09:54:06] [Rank 0] step:5321/10000 train_time:392261ms step_avg:73.72ms +[2025-09-02 09:54:06] [Rank 0] step:5321/10000 train_time:392261ms step_avg:73.72ms +[2025-09-02 09:54:08] [Rank 0] step:5341/10000 train_time:393822ms step_avg:73.74ms +[2025-09-02 09:54:08] [Rank 0] step:5341/10000 train_time:393822ms step_avg:73.74ms +[2025-09-02 09:54:09] [Rank 0] step:5361/10000 train_time:395388ms step_avg:73.75ms +[2025-09-02 09:54:09] [Rank 0] step:5361/10000 train_time:395388ms step_avg:73.75ms +[2025-09-02 09:54:11] [Rank 0] step:5381/10000 train_time:396952ms step_avg:73.77ms +[2025-09-02 09:54:11] [Rank 0] step:5381/10000 train_time:396952ms step_avg:73.77ms +[2025-09-02 09:54:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:54:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:54:24] [Rank 0] PRINT: step:5400/10000 val_loss:4.0243 svd_entropy: attn_qk:H=0.7535,top10E=0.26,eRank=173.2,q75/q25=87.85 attn_vo:H=0.7837,top10E=0.15,eRank=256.0,q75/q25=inf mlp_w1:H=0.7646,top10E=0.29,eRank=186.6,q75/q25=13.66 mlp_w2:H=0.8386,top10E=0.15,eRank=269.5,q75/q25=21.77 vo_prod:H=0.6560,top10E=0.23,eRank=112.9,q75/q25=inf train_time:398668ms step_avg:73.83ms +[2025-09-02 09:54:24] [Rank 0] PRINT: step:5400/10000 val_loss:4.0243 svd_entropy: attn_qk:H=0.7535,top10E=0.26,eRank=173.2,q75/q25=87.85 attn_vo:H=0.7837,top10E=0.15,eRank=256.0,q75/q25=inf mlp_w1:H=0.7646,top10E=0.29,eRank=186.6,q75/q25=13.66 mlp_w2:H=0.8386,top10E=0.15,eRank=269.5,q75/q25=21.77 vo_prod:H=0.6560,top10E=0.23,eRank=112.9,q75/q25=inf train_time:398668ms step_avg:73.83ms +[2025-09-02 09:54:24] [Rank 0] step:5401/10000 train_time:398679ms step_avg:73.82ms +[2025-09-02 09:54:24] [Rank 0] step:5401/10000 train_time:398679ms step_avg:73.82ms +[2025-09-02 09:54:26] [Rank 0] step:5421/10000 train_time:400105ms step_avg:73.81ms +[2025-09-02 09:54:26] [Rank 0] step:5421/10000 train_time:400105ms step_avg:73.81ms +[2025-09-02 09:54:27] [Rank 0] step:5441/10000 train_time:401661ms step_avg:73.82ms +[2025-09-02 09:54:27] [Rank 0] step:5441/10000 train_time:401661ms step_avg:73.82ms +[2025-09-02 09:54:29] [Rank 0] step:5461/10000 train_time:403222ms step_avg:73.84ms +[2025-09-02 09:54:29] [Rank 0] step:5461/10000 train_time:403222ms step_avg:73.84ms +[2025-09-02 09:54:30] [Rank 0] step:5481/10000 train_time:404785ms step_avg:73.85ms +[2025-09-02 09:54:30] [Rank 0] step:5481/10000 train_time:404785ms step_avg:73.85ms +[2025-09-02 09:54:32] [Rank 0] step:5501/10000 train_time:406351ms step_avg:73.87ms +[2025-09-02 09:54:32] [Rank 0] step:5501/10000 train_time:406351ms step_avg:73.87ms +[2025-09-02 09:54:34] [Rank 0] step:5521/10000 train_time:407916ms step_avg:73.88ms +[2025-09-02 09:54:34] [Rank 0] step:5521/10000 train_time:407916ms step_avg:73.88ms +[2025-09-02 09:54:35] [Rank 0] step:5541/10000 train_time:409476ms step_avg:73.90ms +[2025-09-02 09:54:35] [Rank 0] step:5541/10000 train_time:409476ms step_avg:73.90ms +[2025-09-02 09:54:37] [Rank 0] step:5561/10000 train_time:411037ms step_avg:73.91ms +[2025-09-02 09:54:37] [Rank 0] step:5561/10000 train_time:411037ms step_avg:73.91ms +[2025-09-02 09:54:38] [Rank 0] step:5581/10000 train_time:412599ms step_avg:73.93ms +[2025-09-02 09:54:38] [Rank 0] step:5581/10000 train_time:412599ms step_avg:73.93ms +[2025-09-02 09:54:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:54:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:54:51] [Rank 0] PRINT: step:5600/10000 val_loss:4.0078 svd_entropy: attn_qk:H=0.7556,top10E=0.26,eRank=175.0,q75/q25=87.85 attn_vo:H=0.7855,top10E=0.15,eRank=258.4,q75/q25=inf mlp_w1:H=0.7679,top10E=0.28,eRank=190.1,q75/q25=14.11 mlp_w2:H=0.8399,top10E=0.15,eRank=271.8,q75/q25=22.18 vo_prod:H=0.6582,top10E=0.23,eRank=114.8,q75/q25=inf train_time:414319ms step_avg:73.99ms +[2025-09-02 09:54:51] [Rank 0] PRINT: step:5600/10000 val_loss:4.0078 svd_entropy: attn_qk:H=0.7556,top10E=0.26,eRank=175.0,q75/q25=87.85 attn_vo:H=0.7855,top10E=0.15,eRank=258.4,q75/q25=inf mlp_w1:H=0.7679,top10E=0.28,eRank=190.1,q75/q25=14.11 mlp_w2:H=0.8399,top10E=0.15,eRank=271.8,q75/q25=22.18 vo_prod:H=0.6582,top10E=0.23,eRank=114.8,q75/q25=inf train_time:414319ms step_avg:73.99ms +[2025-09-02 09:54:52] [Rank 0] step:5601/10000 train_time:414330ms step_avg:73.97ms +[2025-09-02 09:54:52] [Rank 0] step:5601/10000 train_time:414330ms step_avg:73.97ms +[2025-09-02 09:54:53] [Rank 0] step:5621/10000 train_time:415737ms step_avg:73.96ms +[2025-09-02 09:54:53] [Rank 0] step:5621/10000 train_time:415737ms step_avg:73.96ms +[2025-09-02 09:54:55] [Rank 0] step:5641/10000 train_time:417298ms step_avg:73.98ms +[2025-09-02 09:54:55] [Rank 0] step:5641/10000 train_time:417298ms step_avg:73.98ms +[2025-09-02 09:54:56] [Rank 0] step:5661/10000 train_time:418855ms step_avg:73.99ms +[2025-09-02 09:54:56] [Rank 0] step:5661/10000 train_time:418855ms step_avg:73.99ms +[2025-09-02 09:54:58] [Rank 0] step:5681/10000 train_time:420420ms step_avg:74.00ms +[2025-09-02 09:54:58] [Rank 0] step:5681/10000 train_time:420420ms step_avg:74.00ms +[2025-09-02 09:54:59] [Rank 0] step:5701/10000 train_time:421979ms step_avg:74.02ms +[2025-09-02 09:54:59] [Rank 0] step:5701/10000 train_time:421979ms step_avg:74.02ms +[2025-09-02 09:55:01] [Rank 0] step:5721/10000 train_time:423543ms step_avg:74.03ms +[2025-09-02 09:55:01] [Rank 0] step:5721/10000 train_time:423543ms step_avg:74.03ms +[2025-09-02 09:55:02] [Rank 0] step:5741/10000 train_time:425104ms step_avg:74.05ms +[2025-09-02 09:55:02] [Rank 0] step:5741/10000 train_time:425104ms step_avg:74.05ms +[2025-09-02 09:55:04] [Rank 0] step:5761/10000 train_time:426665ms step_avg:74.06ms +[2025-09-02 09:55:04] [Rank 0] step:5761/10000 train_time:426665ms step_avg:74.06ms +[2025-09-02 09:55:06] [Rank 0] step:5781/10000 train_time:428227ms step_avg:74.07ms +[2025-09-02 09:55:06] [Rank 0] step:5781/10000 train_time:428227ms step_avg:74.07ms +[2025-09-02 09:55:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:55:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:55:19] [Rank 0] PRINT: step:5800/10000 val_loss:3.9987 svd_entropy: attn_qk:H=0.7578,top10E=0.26,eRank=176.9,q75/q25=87.83 attn_vo:H=0.7873,top10E=0.15,eRank=260.7,q75/q25=inf mlp_w1:H=0.7709,top10E=0.28,eRank=193.4,q75/q25=14.54 mlp_w2:H=0.8411,top10E=0.15,eRank=274.1,q75/q25=22.61 vo_prod:H=0.6607,top10E=0.22,eRank=116.7,q75/q25=inf train_time:429949ms step_avg:74.13ms +[2025-09-02 09:55:19] [Rank 0] PRINT: step:5800/10000 val_loss:3.9987 svd_entropy: attn_qk:H=0.7578,top10E=0.26,eRank=176.9,q75/q25=87.83 attn_vo:H=0.7873,top10E=0.15,eRank=260.7,q75/q25=inf mlp_w1:H=0.7709,top10E=0.28,eRank=193.4,q75/q25=14.54 mlp_w2:H=0.8411,top10E=0.15,eRank=274.1,q75/q25=22.61 vo_prod:H=0.6607,top10E=0.22,eRank=116.7,q75/q25=inf train_time:429949ms step_avg:74.13ms +[2025-09-02 09:55:19] [Rank 0] step:5801/10000 train_time:429960ms step_avg:74.12ms +[2025-09-02 09:55:19] [Rank 0] step:5801/10000 train_time:429960ms step_avg:74.12ms +[2025-09-02 09:55:20] [Rank 0] step:5821/10000 train_time:431382ms step_avg:74.11ms +[2025-09-02 09:55:20] [Rank 0] step:5821/10000 train_time:431382ms step_avg:74.11ms +[2025-09-02 09:55:22] [Rank 0] step:5841/10000 train_time:432939ms step_avg:74.12ms +[2025-09-02 09:55:22] [Rank 0] step:5841/10000 train_time:432939ms step_avg:74.12ms +[2025-09-02 09:55:24] [Rank 0] step:5861/10000 train_time:434503ms step_avg:74.13ms +[2025-09-02 09:55:24] [Rank 0] step:5861/10000 train_time:434503ms step_avg:74.13ms +[2025-09-02 09:55:25] [Rank 0] step:5881/10000 train_time:436068ms step_avg:74.15ms +[2025-09-02 09:55:25] [Rank 0] step:5881/10000 train_time:436068ms step_avg:74.15ms +[2025-09-02 09:55:27] [Rank 0] step:5901/10000 train_time:437629ms step_avg:74.16ms +[2025-09-02 09:55:27] [Rank 0] step:5901/10000 train_time:437629ms step_avg:74.16ms +[2025-09-02 09:55:28] [Rank 0] step:5921/10000 train_time:439190ms step_avg:74.17ms +[2025-09-02 09:55:28] [Rank 0] step:5921/10000 train_time:439190ms step_avg:74.17ms +[2025-09-02 09:55:30] [Rank 0] step:5941/10000 train_time:440757ms step_avg:74.19ms +[2025-09-02 09:55:30] [Rank 0] step:5941/10000 train_time:440757ms step_avg:74.19ms +[2025-09-02 09:55:31] [Rank 0] step:5961/10000 train_time:442326ms step_avg:74.20ms +[2025-09-02 09:55:31] [Rank 0] step:5961/10000 train_time:442326ms step_avg:74.20ms +[2025-09-02 09:55:33] [Rank 0] step:5981/10000 train_time:443891ms step_avg:74.22ms +[2025-09-02 09:55:33] [Rank 0] step:5981/10000 train_time:443891ms step_avg:74.22ms +[2025-09-02 09:55:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:55:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:55:46] [Rank 0] PRINT: step:6000/10000 val_loss:3.9775 svd_entropy: attn_qk:H=0.7598,top10E=0.26,eRank=178.6,q75/q25=88.45 attn_vo:H=0.7889,top10E=0.14,eRank=262.8,q75/q25=inf mlp_w1:H=0.7739,top10E=0.27,eRank=196.6,q75/q25=14.97 mlp_w2:H=0.8423,top10E=0.15,eRank=276.4,q75/q25=22.95 vo_prod:H=0.6629,top10E=0.22,eRank=118.6,q75/q25=inf train_time:445609ms step_avg:74.27ms +[2025-09-02 09:55:46] [Rank 0] PRINT: step:6000/10000 val_loss:3.9775 svd_entropy: attn_qk:H=0.7598,top10E=0.26,eRank=178.6,q75/q25=88.45 attn_vo:H=0.7889,top10E=0.14,eRank=262.8,q75/q25=inf mlp_w1:H=0.7739,top10E=0.27,eRank=196.6,q75/q25=14.97 mlp_w2:H=0.8423,top10E=0.15,eRank=276.4,q75/q25=22.95 vo_prod:H=0.6629,top10E=0.22,eRank=118.6,q75/q25=inf train_time:445609ms step_avg:74.27ms +[2025-09-02 09:55:46] [Rank 0] step:6001/10000 train_time:445620ms step_avg:74.26ms +[2025-09-02 09:55:46] [Rank 0] step:6001/10000 train_time:445620ms step_avg:74.26ms +[2025-09-02 09:55:48] [Rank 0] step:6021/10000 train_time:447056ms step_avg:74.25ms +[2025-09-02 09:55:48] [Rank 0] step:6021/10000 train_time:447056ms step_avg:74.25ms +[2025-09-02 09:55:49] [Rank 0] step:6041/10000 train_time:448620ms step_avg:74.26ms +[2025-09-02 09:55:49] [Rank 0] step:6041/10000 train_time:448620ms step_avg:74.26ms +[2025-09-02 09:55:51] [Rank 0] step:6061/10000 train_time:450193ms step_avg:74.28ms +[2025-09-02 09:55:51] [Rank 0] step:6061/10000 train_time:450193ms step_avg:74.28ms +[2025-09-02 09:55:53] [Rank 0] step:6081/10000 train_time:451761ms step_avg:74.29ms +[2025-09-02 09:55:53] [Rank 0] step:6081/10000 train_time:451761ms step_avg:74.29ms +[2025-09-02 09:55:54] [Rank 0] step:6101/10000 train_time:453332ms step_avg:74.30ms +[2025-09-02 09:55:54] [Rank 0] step:6101/10000 train_time:453332ms step_avg:74.30ms +[2025-09-02 09:55:56] [Rank 0] step:6121/10000 train_time:455166ms step_avg:74.36ms +[2025-09-02 09:55:56] [Rank 0] step:6121/10000 train_time:455166ms step_avg:74.36ms +[2025-09-02 09:55:58] [Rank 0] step:6141/10000 train_time:456742ms step_avg:74.38ms +[2025-09-02 09:55:58] [Rank 0] step:6141/10000 train_time:456742ms step_avg:74.38ms +[2025-09-02 09:55:59] [Rank 0] step:6161/10000 train_time:458310ms step_avg:74.39ms +[2025-09-02 09:55:59] [Rank 0] step:6161/10000 train_time:458310ms step_avg:74.39ms +[2025-09-02 09:56:01] [Rank 0] step:6181/10000 train_time:459877ms step_avg:74.40ms +[2025-09-02 09:56:01] [Rank 0] step:6181/10000 train_time:459877ms step_avg:74.40ms +[2025-09-02 09:56:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:56:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:56:14] [Rank 0] PRINT: step:6200/10000 val_loss:3.9611 svd_entropy: attn_qk:H=0.7616,top10E=0.25,eRank=180.2,q75/q25=88.15 attn_vo:H=0.7905,top10E=0.14,eRank=264.9,q75/q25=inf mlp_w1:H=0.7766,top10E=0.27,eRank=199.6,q75/q25=15.43 mlp_w2:H=0.8433,top10E=0.15,eRank=278.4,q75/q25=23.30 vo_prod:H=0.6649,top10E=0.22,eRank=120.3,q75/q25=inf train_time:461602ms step_avg:74.45ms +[2025-09-02 09:56:14] [Rank 0] PRINT: step:6200/10000 val_loss:3.9611 svd_entropy: attn_qk:H=0.7616,top10E=0.25,eRank=180.2,q75/q25=88.15 attn_vo:H=0.7905,top10E=0.14,eRank=264.9,q75/q25=inf mlp_w1:H=0.7766,top10E=0.27,eRank=199.6,q75/q25=15.43 mlp_w2:H=0.8433,top10E=0.15,eRank=278.4,q75/q25=23.30 vo_prod:H=0.6649,top10E=0.22,eRank=120.3,q75/q25=inf train_time:461602ms step_avg:74.45ms +[2025-09-02 09:56:14] [Rank 0] step:6201/10000 train_time:461613ms step_avg:74.44ms +[2025-09-02 09:56:14] [Rank 0] step:6201/10000 train_time:461613ms step_avg:74.44ms +[2025-09-02 09:56:16] [Rank 0] step:6221/10000 train_time:463038ms step_avg:74.43ms +[2025-09-02 09:56:16] [Rank 0] step:6221/10000 train_time:463038ms step_avg:74.43ms +[2025-09-02 09:56:17] [Rank 0] step:6241/10000 train_time:464598ms step_avg:74.44ms +[2025-09-02 09:56:17] [Rank 0] step:6241/10000 train_time:464598ms step_avg:74.44ms +[2025-09-02 09:56:19] [Rank 0] step:6261/10000 train_time:466164ms step_avg:74.46ms +[2025-09-02 09:56:19] [Rank 0] step:6261/10000 train_time:466164ms step_avg:74.46ms +[2025-09-02 09:56:20] [Rank 0] step:6281/10000 train_time:467732ms step_avg:74.47ms +[2025-09-02 09:56:20] [Rank 0] step:6281/10000 train_time:467732ms step_avg:74.47ms +[2025-09-02 09:56:22] [Rank 0] step:6301/10000 train_time:469303ms step_avg:74.48ms +[2025-09-02 09:56:22] [Rank 0] step:6301/10000 train_time:469303ms step_avg:74.48ms +[2025-09-02 09:56:23] [Rank 0] step:6321/10000 train_time:470866ms step_avg:74.49ms +[2025-09-02 09:56:23] [Rank 0] step:6321/10000 train_time:470866ms step_avg:74.49ms +[2025-09-02 09:56:25] [Rank 0] step:6341/10000 train_time:472437ms step_avg:74.51ms +[2025-09-02 09:56:25] [Rank 0] step:6341/10000 train_time:472437ms step_avg:74.51ms +[2025-09-02 09:56:27] [Rank 0] step:6361/10000 train_time:474009ms step_avg:74.52ms +[2025-09-02 09:56:27] [Rank 0] step:6361/10000 train_time:474009ms step_avg:74.52ms +[2025-09-02 09:56:28] [Rank 0] step:6381/10000 train_time:475586ms step_avg:74.53ms +[2025-09-02 09:56:28] [Rank 0] step:6381/10000 train_time:475586ms step_avg:74.53ms +[2025-09-02 09:56:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:56:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:56:42] [Rank 0] PRINT: step:6400/10000 val_loss:3.9445 svd_entropy: attn_qk:H=0.7633,top10E=0.25,eRank=181.8,q75/q25=88.31 attn_vo:H=0.7919,top10E=0.14,eRank=266.8,q75/q25=inf mlp_w1:H=0.7788,top10E=0.27,eRank=202.1,q75/q25=15.77 mlp_w2:H=0.8443,top10E=0.14,eRank=280.2,q75/q25=23.66 vo_prod:H=0.6669,top10E=0.22,eRank=122.0,q75/q25=inf train_time:477312ms step_avg:74.58ms +[2025-09-02 09:56:42] [Rank 0] PRINT: step:6400/10000 val_loss:3.9445 svd_entropy: attn_qk:H=0.7633,top10E=0.25,eRank=181.8,q75/q25=88.31 attn_vo:H=0.7919,top10E=0.14,eRank=266.8,q75/q25=inf mlp_w1:H=0.7788,top10E=0.27,eRank=202.1,q75/q25=15.77 mlp_w2:H=0.8443,top10E=0.14,eRank=280.2,q75/q25=23.66 vo_prod:H=0.6669,top10E=0.22,eRank=122.0,q75/q25=inf train_time:477312ms step_avg:74.58ms +[2025-09-02 09:56:42] [Rank 0] step:6401/10000 train_time:477323ms step_avg:74.57ms +[2025-09-02 09:56:42] [Rank 0] step:6401/10000 train_time:477323ms step_avg:74.57ms +[2025-09-02 09:56:43] [Rank 0] step:6421/10000 train_time:478749ms step_avg:74.56ms +[2025-09-02 09:56:43] [Rank 0] step:6421/10000 train_time:478749ms step_avg:74.56ms +[2025-09-02 09:56:45] [Rank 0] step:6441/10000 train_time:480314ms step_avg:74.57ms +[2025-09-02 09:56:45] [Rank 0] step:6441/10000 train_time:480314ms step_avg:74.57ms +[2025-09-02 09:56:46] [Rank 0] step:6461/10000 train_time:481882ms step_avg:74.58ms +[2025-09-02 09:56:46] [Rank 0] step:6461/10000 train_time:481882ms step_avg:74.58ms +[2025-09-02 09:56:48] [Rank 0] step:6481/10000 train_time:483458ms step_avg:74.60ms +[2025-09-02 09:56:48] [Rank 0] step:6481/10000 train_time:483458ms step_avg:74.60ms +[2025-09-02 09:56:50] [Rank 0] step:6501/10000 train_time:485021ms step_avg:74.61ms +[2025-09-02 09:56:50] [Rank 0] step:6501/10000 train_time:485021ms step_avg:74.61ms +[2025-09-02 09:56:51] [Rank 0] step:6521/10000 train_time:486585ms step_avg:74.62ms +[2025-09-02 09:56:51] [Rank 0] step:6521/10000 train_time:486585ms step_avg:74.62ms +[2025-09-02 09:56:53] [Rank 0] step:6541/10000 train_time:488153ms step_avg:74.63ms +[2025-09-02 09:56:53] [Rank 0] step:6541/10000 train_time:488153ms step_avg:74.63ms +[2025-09-02 09:56:54] [Rank 0] step:6561/10000 train_time:489723ms step_avg:74.64ms +[2025-09-02 09:56:54] [Rank 0] step:6561/10000 train_time:489723ms step_avg:74.64ms +[2025-09-02 09:56:56] [Rank 0] step:6581/10000 train_time:491288ms step_avg:74.65ms +[2025-09-02 09:56:56] [Rank 0] step:6581/10000 train_time:491288ms step_avg:74.65ms +[2025-09-02 09:56:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:56:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:57:09] [Rank 0] PRINT: step:6600/10000 val_loss:3.9333 svd_entropy: attn_qk:H=0.7649,top10E=0.25,eRank=183.3,q75/q25=88.19 attn_vo:H=0.7932,top10E=0.14,eRank=268.7,q75/q25=inf mlp_w1:H=0.7809,top10E=0.26,eRank=204.6,q75/q25=16.16 mlp_w2:H=0.8452,top10E=0.14,eRank=282.1,q75/q25=24.10 vo_prod:H=0.6687,top10E=0.22,eRank=123.7,q75/q25=inf train_time:493013ms step_avg:74.70ms +[2025-09-02 09:57:09] [Rank 0] PRINT: step:6600/10000 val_loss:3.9333 svd_entropy: attn_qk:H=0.7649,top10E=0.25,eRank=183.3,q75/q25=88.19 attn_vo:H=0.7932,top10E=0.14,eRank=268.7,q75/q25=inf mlp_w1:H=0.7809,top10E=0.26,eRank=204.6,q75/q25=16.16 mlp_w2:H=0.8452,top10E=0.14,eRank=282.1,q75/q25=24.10 vo_prod:H=0.6687,top10E=0.22,eRank=123.7,q75/q25=inf train_time:493013ms step_avg:74.70ms +[2025-09-02 09:57:09] [Rank 0] step:6601/10000 train_time:493024ms step_avg:74.69ms +[2025-09-02 09:57:09] [Rank 0] step:6601/10000 train_time:493024ms step_avg:74.69ms +[2025-09-02 09:57:11] [Rank 0] step:6621/10000 train_time:494457ms step_avg:74.68ms +[2025-09-02 09:57:11] [Rank 0] step:6621/10000 train_time:494457ms step_avg:74.68ms +[2025-09-02 09:57:12] [Rank 0] step:6641/10000 train_time:496025ms step_avg:74.69ms +[2025-09-02 09:57:12] [Rank 0] step:6641/10000 train_time:496025ms step_avg:74.69ms +[2025-09-02 09:57:14] [Rank 0] step:6661/10000 train_time:497591ms step_avg:74.70ms +[2025-09-02 09:57:14] [Rank 0] step:6661/10000 train_time:497591ms step_avg:74.70ms +[2025-09-02 09:57:16] [Rank 0] step:6681/10000 train_time:499174ms step_avg:74.72ms +[2025-09-02 09:57:16] [Rank 0] step:6681/10000 train_time:499174ms step_avg:74.72ms +[2025-09-02 09:57:17] [Rank 0] step:6701/10000 train_time:500772ms step_avg:74.73ms +[2025-09-02 09:57:17] [Rank 0] step:6701/10000 train_time:500772ms step_avg:74.73ms +[2025-09-02 09:57:19] [Rank 0] step:6721/10000 train_time:502367ms step_avg:74.75ms +[2025-09-02 09:57:19] [Rank 0] step:6721/10000 train_time:502367ms step_avg:74.75ms +[2025-09-02 09:57:20] [Rank 0] step:6741/10000 train_time:503961ms step_avg:74.76ms +[2025-09-02 09:57:20] [Rank 0] step:6741/10000 train_time:503961ms step_avg:74.76ms +[2025-09-02 09:57:22] [Rank 0] step:6761/10000 train_time:505555ms step_avg:74.78ms +[2025-09-02 09:57:22] [Rank 0] step:6761/10000 train_time:505555ms step_avg:74.78ms +[2025-09-02 09:57:24] [Rank 0] step:6781/10000 train_time:507154ms step_avg:74.79ms +[2025-09-02 09:57:24] [Rank 0] step:6781/10000 train_time:507154ms step_avg:74.79ms +[2025-09-02 09:57:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:57:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:57:37] [Rank 0] PRINT: step:6800/10000 val_loss:3.9175 svd_entropy: attn_qk:H=0.7663,top10E=0.25,eRank=184.6,q75/q25=88.19 attn_vo:H=0.7944,top10E=0.14,eRank=270.3,q75/q25=inf mlp_w1:H=0.7828,top10E=0.26,eRank=206.8,q75/q25=16.46 mlp_w2:H=0.8461,top10E=0.14,eRank=283.7,q75/q25=24.33 vo_prod:H=0.6703,top10E=0.21,eRank=125.1,q75/q25=inf train_time:508910ms step_avg:74.84ms +[2025-09-02 09:57:37] [Rank 0] PRINT: step:6800/10000 val_loss:3.9175 svd_entropy: attn_qk:H=0.7663,top10E=0.25,eRank=184.6,q75/q25=88.19 attn_vo:H=0.7944,top10E=0.14,eRank=270.3,q75/q25=inf mlp_w1:H=0.7828,top10E=0.26,eRank=206.8,q75/q25=16.46 mlp_w2:H=0.8461,top10E=0.14,eRank=283.7,q75/q25=24.33 vo_prod:H=0.6703,top10E=0.21,eRank=125.1,q75/q25=inf train_time:508910ms step_avg:74.84ms +[2025-09-02 09:57:37] [Rank 0] step:6801/10000 train_time:508921ms step_avg:74.83ms +[2025-09-02 09:57:37] [Rank 0] step:6801/10000 train_time:508921ms step_avg:74.83ms +[2025-09-02 09:57:39] [Rank 0] step:6821/10000 train_time:510376ms step_avg:74.82ms +[2025-09-02 09:57:39] [Rank 0] step:6821/10000 train_time:510376ms step_avg:74.82ms +[2025-09-02 09:57:40] [Rank 0] step:6841/10000 train_time:511965ms step_avg:74.84ms +[2025-09-02 09:57:40] [Rank 0] step:6841/10000 train_time:511965ms step_avg:74.84ms +[2025-09-02 09:57:42] [Rank 0] step:6861/10000 train_time:513561ms step_avg:74.85ms +[2025-09-02 09:57:42] [Rank 0] step:6861/10000 train_time:513561ms step_avg:74.85ms +[2025-09-02 09:57:44] [Rank 0] step:6881/10000 train_time:515152ms step_avg:74.87ms +[2025-09-02 09:57:44] [Rank 0] step:6881/10000 train_time:515152ms step_avg:74.87ms +[2025-09-02 09:57:45] [Rank 0] step:6901/10000 train_time:516747ms step_avg:74.88ms +[2025-09-02 09:57:45] [Rank 0] step:6901/10000 train_time:516747ms step_avg:74.88ms +[2025-09-02 09:57:47] [Rank 0] step:6921/10000 train_time:518339ms step_avg:74.89ms +[2025-09-02 09:57:47] [Rank 0] step:6921/10000 train_time:518339ms step_avg:74.89ms +[2025-09-02 09:57:48] [Rank 0] step:6941/10000 train_time:519942ms step_avg:74.91ms +[2025-09-02 09:57:48] [Rank 0] step:6941/10000 train_time:519942ms step_avg:74.91ms +[2025-09-02 09:57:50] [Rank 0] step:6961/10000 train_time:521551ms step_avg:74.92ms +[2025-09-02 09:57:50] [Rank 0] step:6961/10000 train_time:521551ms step_avg:74.92ms +[2025-09-02 09:57:52] [Rank 0] step:6981/10000 train_time:523155ms step_avg:74.94ms +[2025-09-02 09:57:52] [Rank 0] step:6981/10000 train_time:523155ms step_avg:74.94ms +[2025-09-02 09:57:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:57:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:58:05] [Rank 0] PRINT: step:7000/10000 val_loss:3.9042 svd_entropy: attn_qk:H=0.7676,top10E=0.25,eRank=185.8,q75/q25=88.04 attn_vo:H=0.7955,top10E=0.14,eRank=271.8,q75/q25=inf mlp_w1:H=0.7844,top10E=0.26,eRank=208.7,q75/q25=16.75 mlp_w2:H=0.8468,top10E=0.14,eRank=285.3,q75/q25=24.63 vo_prod:H=0.6719,top10E=0.21,eRank=126.5,q75/q25=inf train_time:524918ms step_avg:74.99ms +[2025-09-02 09:58:05] [Rank 0] PRINT: step:7000/10000 val_loss:3.9042 svd_entropy: attn_qk:H=0.7676,top10E=0.25,eRank=185.8,q75/q25=88.04 attn_vo:H=0.7955,top10E=0.14,eRank=271.8,q75/q25=inf mlp_w1:H=0.7844,top10E=0.26,eRank=208.7,q75/q25=16.75 mlp_w2:H=0.8468,top10E=0.14,eRank=285.3,q75/q25=24.63 vo_prod:H=0.6719,top10E=0.21,eRank=126.5,q75/q25=inf train_time:524918ms step_avg:74.99ms +[2025-09-02 09:58:05] [Rank 0] step:7001/10000 train_time:524928ms step_avg:74.98ms +[2025-09-02 09:58:05] [Rank 0] step:7001/10000 train_time:524928ms step_avg:74.98ms +[2025-09-02 09:58:07] [Rank 0] step:7021/10000 train_time:526371ms step_avg:74.97ms +[2025-09-02 09:58:07] [Rank 0] step:7021/10000 train_time:526371ms step_avg:74.97ms +[2025-09-02 09:58:08] [Rank 0] step:7041/10000 train_time:527969ms step_avg:74.98ms +[2025-09-02 09:58:08] [Rank 0] step:7041/10000 train_time:527969ms step_avg:74.98ms +[2025-09-02 09:58:10] [Rank 0] step:7061/10000 train_time:529563ms step_avg:75.00ms +[2025-09-02 09:58:10] [Rank 0] step:7061/10000 train_time:529563ms step_avg:75.00ms +[2025-09-02 09:58:11] [Rank 0] step:7081/10000 train_time:531160ms step_avg:75.01ms +[2025-09-02 09:58:11] [Rank 0] step:7081/10000 train_time:531160ms step_avg:75.01ms +[2025-09-02 09:58:13] [Rank 0] step:7101/10000 train_time:532755ms step_avg:75.03ms +[2025-09-02 09:58:13] [Rank 0] step:7101/10000 train_time:532755ms step_avg:75.03ms +[2025-09-02 09:58:14] [Rank 0] step:7121/10000 train_time:534355ms step_avg:75.04ms +[2025-09-02 09:58:14] [Rank 0] step:7121/10000 train_time:534355ms step_avg:75.04ms +[2025-09-02 09:58:16] [Rank 0] step:7141/10000 train_time:535955ms step_avg:75.05ms +[2025-09-02 09:58:16] [Rank 0] step:7141/10000 train_time:535955ms step_avg:75.05ms +[2025-09-02 09:58:18] [Rank 0] step:7161/10000 train_time:537556ms step_avg:75.07ms +[2025-09-02 09:58:18] [Rank 0] step:7161/10000 train_time:537556ms step_avg:75.07ms +[2025-09-02 09:58:19] [Rank 0] step:7181/10000 train_time:539153ms step_avg:75.08ms +[2025-09-02 09:58:19] [Rank 0] step:7181/10000 train_time:539153ms step_avg:75.08ms +[2025-09-02 09:58:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:58:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:58:33] [Rank 0] PRINT: step:7200/10000 val_loss:3.8932 svd_entropy: attn_qk:H=0.7689,top10E=0.24,eRank=187.0,q75/q25=87.67 attn_vo:H=0.7965,top10E=0.14,eRank=273.2,q75/q25=inf mlp_w1:H=0.7860,top10E=0.26,eRank=210.5,q75/q25=16.98 mlp_w2:H=0.8476,top10E=0.14,eRank=286.8,q75/q25=24.88 vo_prod:H=0.6734,top10E=0.21,eRank=127.9,q75/q25=inf train_time:540916ms step_avg:75.13ms +[2025-09-02 09:58:33] [Rank 0] PRINT: step:7200/10000 val_loss:3.8932 svd_entropy: attn_qk:H=0.7689,top10E=0.24,eRank=187.0,q75/q25=87.67 attn_vo:H=0.7965,top10E=0.14,eRank=273.2,q75/q25=inf mlp_w1:H=0.7860,top10E=0.26,eRank=210.5,q75/q25=16.98 mlp_w2:H=0.8476,top10E=0.14,eRank=286.8,q75/q25=24.88 vo_prod:H=0.6734,top10E=0.21,eRank=127.9,q75/q25=inf train_time:540916ms step_avg:75.13ms +[2025-09-02 09:58:33] [Rank 0] step:7201/10000 train_time:540927ms step_avg:75.12ms +[2025-09-02 09:58:33] [Rank 0] step:7201/10000 train_time:540927ms step_avg:75.12ms +[2025-09-02 09:58:34] [Rank 0] step:7221/10000 train_time:542390ms step_avg:75.11ms +[2025-09-02 09:58:34] [Rank 0] step:7221/10000 train_time:542390ms step_avg:75.11ms +[2025-09-02 09:58:36] [Rank 0] step:7241/10000 train_time:543982ms step_avg:75.13ms +[2025-09-02 09:58:36] [Rank 0] step:7241/10000 train_time:543982ms step_avg:75.13ms +[2025-09-02 09:58:38] [Rank 0] step:7261/10000 train_time:545577ms step_avg:75.14ms +[2025-09-02 09:58:38] [Rank 0] step:7261/10000 train_time:545577ms step_avg:75.14ms +[2025-09-02 09:58:39] [Rank 0] step:7281/10000 train_time:547181ms step_avg:75.15ms +[2025-09-02 09:58:39] [Rank 0] step:7281/10000 train_time:547181ms step_avg:75.15ms +[2025-09-02 09:58:41] [Rank 0] step:7301/10000 train_time:548779ms step_avg:75.16ms +[2025-09-02 09:58:41] [Rank 0] step:7301/10000 train_time:548779ms step_avg:75.16ms +[2025-09-02 09:58:42] [Rank 0] step:7321/10000 train_time:550385ms step_avg:75.18ms +[2025-09-02 09:58:42] [Rank 0] step:7321/10000 train_time:550385ms step_avg:75.18ms +[2025-09-02 09:58:44] [Rank 0] step:7341/10000 train_time:551985ms step_avg:75.19ms +[2025-09-02 09:58:44] [Rank 0] step:7341/10000 train_time:551985ms step_avg:75.19ms +[2025-09-02 09:58:46] [Rank 0] step:7361/10000 train_time:553585ms step_avg:75.21ms +[2025-09-02 09:58:46] [Rank 0] step:7361/10000 train_time:553585ms step_avg:75.21ms +[2025-09-02 09:58:47] [Rank 0] step:7381/10000 train_time:555192ms step_avg:75.22ms +[2025-09-02 09:58:47] [Rank 0] step:7381/10000 train_time:555192ms step_avg:75.22ms +[2025-09-02 09:58:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:58:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:59:01] [Rank 0] PRINT: step:7400/10000 val_loss:3.8748 svd_entropy: attn_qk:H=0.7699,top10E=0.24,eRank=187.9,q75/q25=87.56 attn_vo:H=0.7973,top10E=0.13,eRank=274.4,q75/q25=inf mlp_w1:H=0.7872,top10E=0.25,eRank=212.0,q75/q25=17.23 mlp_w2:H=0.8483,top10E=0.14,eRank=288.2,q75/q25=25.00 vo_prod:H=0.6747,top10E=0.21,eRank=129.1,q75/q25=inf train_time:556936ms step_avg:75.26ms +[2025-09-02 09:59:01] [Rank 0] PRINT: step:7400/10000 val_loss:3.8748 svd_entropy: attn_qk:H=0.7699,top10E=0.24,eRank=187.9,q75/q25=87.56 attn_vo:H=0.7973,top10E=0.13,eRank=274.4,q75/q25=inf mlp_w1:H=0.7872,top10E=0.25,eRank=212.0,q75/q25=17.23 mlp_w2:H=0.8483,top10E=0.14,eRank=288.2,q75/q25=25.00 vo_prod:H=0.6747,top10E=0.21,eRank=129.1,q75/q25=inf train_time:556936ms step_avg:75.26ms +[2025-09-02 09:59:01] [Rank 0] step:7401/10000 train_time:556947ms step_avg:75.25ms +[2025-09-02 09:59:01] [Rank 0] step:7401/10000 train_time:556947ms step_avg:75.25ms +[2025-09-02 09:59:02] [Rank 0] step:7421/10000 train_time:558395ms step_avg:75.25ms +[2025-09-02 09:59:02] [Rank 0] step:7421/10000 train_time:558395ms step_avg:75.25ms +[2025-09-02 09:59:04] [Rank 0] step:7441/10000 train_time:559992ms step_avg:75.26ms +[2025-09-02 09:59:04] [Rank 0] step:7441/10000 train_time:559992ms step_avg:75.26ms +[2025-09-02 09:59:05] [Rank 0] step:7461/10000 train_time:561589ms step_avg:75.27ms +[2025-09-02 09:59:05] [Rank 0] step:7461/10000 train_time:561589ms step_avg:75.27ms +[2025-09-02 09:59:07] [Rank 0] step:7481/10000 train_time:563193ms step_avg:75.28ms +[2025-09-02 09:59:07] [Rank 0] step:7481/10000 train_time:563193ms step_avg:75.28ms +[2025-09-02 09:59:09] [Rank 0] step:7501/10000 train_time:564796ms step_avg:75.30ms +[2025-09-02 09:59:09] [Rank 0] step:7501/10000 train_time:564796ms step_avg:75.30ms +[2025-09-02 09:59:10] [Rank 0] step:7521/10000 train_time:566397ms step_avg:75.31ms +[2025-09-02 09:59:10] [Rank 0] step:7521/10000 train_time:566397ms step_avg:75.31ms +[2025-09-02 09:59:12] [Rank 0] step:7541/10000 train_time:568008ms step_avg:75.32ms +[2025-09-02 09:59:12] [Rank 0] step:7541/10000 train_time:568008ms step_avg:75.32ms +[2025-09-02 09:59:13] [Rank 0] step:7561/10000 train_time:569597ms step_avg:75.33ms +[2025-09-02 09:59:13] [Rank 0] step:7561/10000 train_time:569597ms step_avg:75.33ms +[2025-09-02 09:59:15] [Rank 0] step:7581/10000 train_time:571207ms step_avg:75.35ms +[2025-09-02 09:59:15] [Rank 0] step:7581/10000 train_time:571207ms step_avg:75.35ms +[2025-09-02 09:59:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:59:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:59:28] [Rank 0] PRINT: step:7600/10000 val_loss:3.8710 svd_entropy: attn_qk:H=0.7709,top10E=0.24,eRank=188.9,q75/q25=87.31 attn_vo:H=0.7981,top10E=0.13,eRank=275.5,q75/q25=inf mlp_w1:H=0.7885,top10E=0.25,eRank=213.5,q75/q25=17.38 mlp_w2:H=0.8490,top10E=0.14,eRank=289.6,q75/q25=25.17 vo_prod:H=0.6759,top10E=0.21,eRank=130.2,q75/q25=inf train_time:572975ms step_avg:75.39ms +[2025-09-02 09:59:28] [Rank 0] PRINT: step:7600/10000 val_loss:3.8710 svd_entropy: attn_qk:H=0.7709,top10E=0.24,eRank=188.9,q75/q25=87.31 attn_vo:H=0.7981,top10E=0.13,eRank=275.5,q75/q25=inf mlp_w1:H=0.7885,top10E=0.25,eRank=213.5,q75/q25=17.38 mlp_w2:H=0.8490,top10E=0.14,eRank=289.6,q75/q25=25.17 vo_prod:H=0.6759,top10E=0.21,eRank=130.2,q75/q25=inf train_time:572975ms step_avg:75.39ms +[2025-09-02 09:59:28] [Rank 0] step:7601/10000 train_time:572985ms step_avg:75.38ms +[2025-09-02 09:59:28] [Rank 0] step:7601/10000 train_time:572985ms step_avg:75.38ms +[2025-09-02 09:59:30] [Rank 0] step:7621/10000 train_time:574447ms step_avg:75.38ms +[2025-09-02 09:59:30] [Rank 0] step:7621/10000 train_time:574447ms step_avg:75.38ms +[2025-09-02 09:59:32] [Rank 0] step:7641/10000 train_time:576046ms step_avg:75.39ms +[2025-09-02 09:59:32] [Rank 0] step:7641/10000 train_time:576046ms step_avg:75.39ms +[2025-09-02 09:59:33] [Rank 0] step:7661/10000 train_time:577646ms step_avg:75.40ms +[2025-09-02 09:59:33] [Rank 0] step:7661/10000 train_time:577646ms step_avg:75.40ms +[2025-09-02 09:59:35] [Rank 0] step:7681/10000 train_time:579240ms step_avg:75.41ms +[2025-09-02 09:59:35] [Rank 0] step:7681/10000 train_time:579240ms step_avg:75.41ms +[2025-09-02 09:59:36] [Rank 0] step:7701/10000 train_time:580832ms step_avg:75.42ms +[2025-09-02 09:59:36] [Rank 0] step:7701/10000 train_time:580832ms step_avg:75.42ms +[2025-09-02 09:59:38] [Rank 0] step:7721/10000 train_time:582443ms step_avg:75.44ms +[2025-09-02 09:59:38] [Rank 0] step:7721/10000 train_time:582443ms step_avg:75.44ms +[2025-09-02 09:59:40] [Rank 0] step:7741/10000 train_time:584041ms step_avg:75.45ms +[2025-09-02 09:59:40] [Rank 0] step:7741/10000 train_time:584041ms step_avg:75.45ms +[2025-09-02 09:59:41] [Rank 0] step:7761/10000 train_time:585644ms step_avg:75.46ms +[2025-09-02 09:59:41] [Rank 0] step:7761/10000 train_time:585644ms step_avg:75.46ms +[2025-09-02 09:59:43] [Rank 0] step:7781/10000 train_time:587251ms step_avg:75.47ms +[2025-09-02 09:59:43] [Rank 0] step:7781/10000 train_time:587251ms step_avg:75.47ms +[2025-09-02 09:59:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:59:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:59:56] [Rank 0] PRINT: step:7800/10000 val_loss:3.8561 svd_entropy: attn_qk:H=0.7718,top10E=0.24,eRank=189.8,q75/q25=86.85 attn_vo:H=0.7989,top10E=0.13,eRank=276.6,q75/q25=inf mlp_w1:H=0.7896,top10E=0.25,eRank=214.9,q75/q25=17.59 mlp_w2:H=0.8496,top10E=0.14,eRank=290.8,q75/q25=25.31 vo_prod:H=0.6770,top10E=0.21,eRank=131.3,q75/q25=inf train_time:589022ms step_avg:75.52ms +[2025-09-02 09:59:56] [Rank 0] PRINT: step:7800/10000 val_loss:3.8561 svd_entropy: attn_qk:H=0.7718,top10E=0.24,eRank=189.8,q75/q25=86.85 attn_vo:H=0.7989,top10E=0.13,eRank=276.6,q75/q25=inf mlp_w1:H=0.7896,top10E=0.25,eRank=214.9,q75/q25=17.59 mlp_w2:H=0.8496,top10E=0.14,eRank=290.8,q75/q25=25.31 vo_prod:H=0.6770,top10E=0.21,eRank=131.3,q75/q25=inf train_time:589022ms step_avg:75.52ms +[2025-09-02 09:59:56] [Rank 0] step:7801/10000 train_time:589033ms step_avg:75.51ms +[2025-09-02 09:59:56] [Rank 0] step:7801/10000 train_time:589033ms step_avg:75.51ms +[2025-09-02 09:59:58] [Rank 0] step:7821/10000 train_time:590495ms step_avg:75.50ms +[2025-09-02 09:59:58] [Rank 0] step:7821/10000 train_time:590495ms step_avg:75.50ms +[2025-09-02 09:59:59] [Rank 0] step:7841/10000 train_time:592092ms step_avg:75.51ms +[2025-09-02 09:59:59] [Rank 0] step:7841/10000 train_time:592092ms step_avg:75.51ms +[2025-09-02 10:00:01] [Rank 0] step:7861/10000 train_time:593696ms step_avg:75.52ms +[2025-09-02 10:00:01] [Rank 0] step:7861/10000 train_time:593696ms step_avg:75.52ms +[2025-09-02 10:00:02] [Rank 0] step:7881/10000 train_time:595308ms step_avg:75.54ms +[2025-09-02 10:00:02] [Rank 0] step:7881/10000 train_time:595308ms step_avg:75.54ms +[2025-09-02 10:00:04] [Rank 0] step:7901/10000 train_time:596906ms step_avg:75.55ms +[2025-09-02 10:00:04] [Rank 0] step:7901/10000 train_time:596906ms step_avg:75.55ms +[2025-09-02 10:00:06] [Rank 0] step:7921/10000 train_time:598509ms step_avg:75.56ms +[2025-09-02 10:00:06] [Rank 0] step:7921/10000 train_time:598509ms step_avg:75.56ms +[2025-09-02 10:00:07] [Rank 0] step:7941/10000 train_time:600122ms step_avg:75.57ms +[2025-09-02 10:00:07] [Rank 0] step:7941/10000 train_time:600122ms step_avg:75.57ms +[2025-09-02 10:00:09] [Rank 0] step:7961/10000 train_time:601728ms step_avg:75.58ms +[2025-09-02 10:00:09] [Rank 0] step:7961/10000 train_time:601728ms step_avg:75.58ms +[2025-09-02 10:00:10] [Rank 0] step:7981/10000 train_time:603331ms step_avg:75.60ms +[2025-09-02 10:00:10] [Rank 0] step:7981/10000 train_time:603331ms step_avg:75.60ms +[2025-09-02 10:00:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 10:00:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 10:00:24] [Rank 0] PRINT: step:8000/10000 val_loss:3.8417 svd_entropy: attn_qk:H=0.7726,top10E=0.24,eRank=190.6,q75/q25=86.53 attn_vo:H=0.7996,top10E=0.13,eRank=277.6,q75/q25=inf mlp_w1:H=0.7906,top10E=0.25,eRank=216.1,q75/q25=17.71 mlp_w2:H=0.8502,top10E=0.14,eRank=292.0,q75/q25=25.41 vo_prod:H=0.6781,top10E=0.20,eRank=132.3,q75/q25=inf train_time:605095ms step_avg:75.64ms +[2025-09-02 10:00:24] [Rank 0] PRINT: step:8000/10000 val_loss:3.8417 svd_entropy: attn_qk:H=0.7726,top10E=0.24,eRank=190.6,q75/q25=86.53 attn_vo:H=0.7996,top10E=0.13,eRank=277.6,q75/q25=inf mlp_w1:H=0.7906,top10E=0.25,eRank=216.1,q75/q25=17.71 mlp_w2:H=0.8502,top10E=0.14,eRank=292.0,q75/q25=25.41 vo_prod:H=0.6781,top10E=0.20,eRank=132.3,q75/q25=inf train_time:605095ms step_avg:75.64ms +[2025-09-02 10:00:24] [Rank 0] step:8001/10000 train_time:605106ms step_avg:75.63ms +[2025-09-02 10:00:24] [Rank 0] step:8001/10000 train_time:605106ms step_avg:75.63ms +[2025-09-02 10:00:25] [Rank 0] step:8021/10000 train_time:606563ms step_avg:75.62ms +[2025-09-02 10:00:25] [Rank 0] step:8021/10000 train_time:606563ms step_avg:75.62ms +[2025-09-02 10:00:27] [Rank 0] step:8041/10000 train_time:608174ms step_avg:75.63ms +[2025-09-02 10:00:27] [Rank 0] step:8041/10000 train_time:608174ms step_avg:75.63ms +[2025-09-02 10:00:28] [Rank 0] step:8061/10000 train_time:609774ms step_avg:75.64ms +[2025-09-02 10:00:28] [Rank 0] step:8061/10000 train_time:609774ms step_avg:75.64ms +[2025-09-02 10:00:30] [Rank 0] step:8081/10000 train_time:611369ms step_avg:75.66ms +[2025-09-02 10:00:30] [Rank 0] step:8081/10000 train_time:611369ms step_avg:75.66ms +[2025-09-02 10:00:32] [Rank 0] step:8101/10000 train_time:612980ms step_avg:75.67ms +[2025-09-02 10:00:32] [Rank 0] step:8101/10000 train_time:612980ms step_avg:75.67ms +[2025-09-02 10:00:33] [Rank 0] step:8121/10000 train_time:614580ms step_avg:75.68ms +[2025-09-02 10:00:33] [Rank 0] step:8121/10000 train_time:614580ms step_avg:75.68ms +[2025-09-02 10:00:35] [Rank 0] step:8141/10000 train_time:616186ms step_avg:75.69ms +[2025-09-02 10:00:35] [Rank 0] step:8141/10000 train_time:616186ms step_avg:75.69ms +[2025-09-02 10:00:37] [Rank 0] step:8161/10000 train_time:617804ms step_avg:75.70ms +[2025-09-02 10:00:37] [Rank 0] step:8161/10000 train_time:617804ms step_avg:75.70ms +[2025-09-02 10:00:38] [Rank 0] step:8181/10000 train_time:619436ms step_avg:75.72ms +[2025-09-02 10:00:38] [Rank 0] step:8181/10000 train_time:619436ms step_avg:75.72ms +[2025-09-02 10:00:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 10:00:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 10:00:51] [Rank 0] PRINT: step:8200/10000 val_loss:3.8331 svd_entropy: attn_qk:H=0.7733,top10E=0.24,eRank=191.3,q75/q25=86.43 attn_vo:H=0.8002,top10E=0.13,eRank=278.5,q75/q25=inf mlp_w1:H=0.7915,top10E=0.25,eRank=217.1,q75/q25=17.78 mlp_w2:H=0.8508,top10E=0.14,eRank=293.0,q75/q25=25.50 vo_prod:H=0.6791,top10E=0.20,eRank=133.2,q75/q25=inf train_time:621253ms step_avg:75.76ms +[2025-09-02 10:00:51] [Rank 0] PRINT: step:8200/10000 val_loss:3.8331 svd_entropy: attn_qk:H=0.7733,top10E=0.24,eRank=191.3,q75/q25=86.43 attn_vo:H=0.8002,top10E=0.13,eRank=278.5,q75/q25=inf mlp_w1:H=0.7915,top10E=0.25,eRank=217.1,q75/q25=17.78 mlp_w2:H=0.8508,top10E=0.14,eRank=293.0,q75/q25=25.50 vo_prod:H=0.6791,top10E=0.20,eRank=133.2,q75/q25=inf train_time:621253ms step_avg:75.76ms +[2025-09-02 10:00:51] [Rank 0] step:8201/10000 train_time:621265ms step_avg:75.75ms +[2025-09-02 10:00:51] [Rank 0] step:8201/10000 train_time:621265ms step_avg:75.75ms +[2025-09-02 10:00:53] [Rank 0] step:8221/10000 train_time:622752ms step_avg:75.75ms +[2025-09-02 10:00:53] [Rank 0] step:8221/10000 train_time:622752ms step_avg:75.75ms +[2025-09-02 10:00:55] [Rank 0] step:8241/10000 train_time:624389ms step_avg:75.77ms +[2025-09-02 10:00:55] [Rank 0] step:8241/10000 train_time:624389ms step_avg:75.77ms +[2025-09-02 10:00:56] [Rank 0] step:8261/10000 train_time:626021ms step_avg:75.78ms +[2025-09-02 10:00:56] [Rank 0] step:8261/10000 train_time:626021ms step_avg:75.78ms +[2025-09-02 10:00:58] [Rank 0] step:8281/10000 train_time:627656ms step_avg:75.79ms +[2025-09-02 10:00:58] [Rank 0] step:8281/10000 train_time:627656ms step_avg:75.79ms +[2025-09-02 10:01:00] [Rank 0] step:8301/10000 train_time:629287ms step_avg:75.81ms +[2025-09-02 10:01:00] [Rank 0] step:8301/10000 train_time:629287ms step_avg:75.81ms +[2025-09-02 10:01:01] [Rank 0] step:8321/10000 train_time:630908ms step_avg:75.82ms +[2025-09-02 10:01:01] [Rank 0] step:8321/10000 train_time:630908ms step_avg:75.82ms +[2025-09-02 10:01:03] [Rank 0] step:8341/10000 train_time:632542ms step_avg:75.84ms +[2025-09-02 10:01:03] [Rank 0] step:8341/10000 train_time:632542ms step_avg:75.84ms +[2025-09-02 10:01:05] [Rank 0] step:8361/10000 train_time:634179ms step_avg:75.85ms +[2025-09-02 10:01:05] [Rank 0] step:8361/10000 train_time:634179ms step_avg:75.85ms +[2025-09-02 10:01:06] [Rank 0] step:8381/10000 train_time:635816ms step_avg:75.86ms +[2025-09-02 10:01:06] [Rank 0] step:8381/10000 train_time:635816ms step_avg:75.86ms +[2025-09-02 10:01:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 10:01:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 10:01:19] [Rank 0] PRINT: step:8400/10000 val_loss:3.8237 svd_entropy: attn_qk:H=0.7739,top10E=0.24,eRank=191.9,q75/q25=86.22 attn_vo:H=0.8008,top10E=0.13,eRank=279.3,q75/q25=inf mlp_w1:H=0.7923,top10E=0.25,eRank=218.1,q75/q25=17.86 mlp_w2:H=0.8513,top10E=0.14,eRank=294.1,q75/q25=25.60 vo_prod:H=0.6800,top10E=0.20,eRank=134.1,q75/q25=inf train_time:637608ms step_avg:75.91ms +[2025-09-02 10:01:19] [Rank 0] PRINT: step:8400/10000 val_loss:3.8237 svd_entropy: attn_qk:H=0.7739,top10E=0.24,eRank=191.9,q75/q25=86.22 attn_vo:H=0.8008,top10E=0.13,eRank=279.3,q75/q25=inf mlp_w1:H=0.7923,top10E=0.25,eRank=218.1,q75/q25=17.86 mlp_w2:H=0.8513,top10E=0.14,eRank=294.1,q75/q25=25.60 vo_prod:H=0.6800,top10E=0.20,eRank=134.1,q75/q25=inf train_time:637608ms step_avg:75.91ms +[2025-09-02 10:01:20] [Rank 0] step:8401/10000 train_time:637618ms step_avg:75.90ms +[2025-09-02 10:01:20] [Rank 0] step:8401/10000 train_time:637618ms step_avg:75.90ms +[2025-09-02 10:01:21] [Rank 0] step:8421/10000 train_time:639101ms step_avg:75.89ms +[2025-09-02 10:01:21] [Rank 0] step:8421/10000 train_time:639101ms step_avg:75.89ms +[2025-09-02 10:01:23] [Rank 0] step:8441/10000 train_time:640730ms step_avg:75.91ms +[2025-09-02 10:01:23] [Rank 0] step:8441/10000 train_time:640730ms step_avg:75.91ms +[2025-09-02 10:01:24] [Rank 0] step:8461/10000 train_time:642355ms step_avg:75.92ms +[2025-09-02 10:01:24] [Rank 0] step:8461/10000 train_time:642355ms step_avg:75.92ms +[2025-09-02 10:01:26] [Rank 0] step:8481/10000 train_time:643988ms step_avg:75.93ms +[2025-09-02 10:01:26] [Rank 0] step:8481/10000 train_time:643988ms step_avg:75.93ms +[2025-09-02 10:01:28] [Rank 0] step:8501/10000 train_time:645638ms step_avg:75.95ms +[2025-09-02 10:01:28] [Rank 0] step:8501/10000 train_time:645638ms step_avg:75.95ms +[2025-09-02 10:01:29] [Rank 0] step:8521/10000 train_time:647276ms step_avg:75.96ms +[2025-09-02 10:01:29] [Rank 0] step:8521/10000 train_time:647276ms step_avg:75.96ms +[2025-09-02 10:01:31] [Rank 0] step:8541/10000 train_time:648917ms step_avg:75.98ms +[2025-09-02 10:01:31] [Rank 0] step:8541/10000 train_time:648917ms step_avg:75.98ms +[2025-09-02 10:01:33] [Rank 0] step:8561/10000 train_time:650548ms step_avg:75.99ms +[2025-09-02 10:01:33] [Rank 0] step:8561/10000 train_time:650548ms step_avg:75.99ms +[2025-09-02 10:01:34] [Rank 0] step:8581/10000 train_time:652180ms step_avg:76.00ms +[2025-09-02 10:01:34] [Rank 0] step:8581/10000 train_time:652180ms step_avg:76.00ms +[2025-09-02 10:01:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 10:01:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 10:01:47] [Rank 0] PRINT: step:8600/10000 val_loss:3.8149 svd_entropy: attn_qk:H=0.7746,top10E=0.24,eRank=192.6,q75/q25=86.26 attn_vo:H=0.8013,top10E=0.13,eRank=280.0,q75/q25=inf mlp_w1:H=0.7929,top10E=0.25,eRank=219.0,q75/q25=17.92 mlp_w2:H=0.8517,top10E=0.14,eRank=295.0,q75/q25=25.67 vo_prod:H=0.6809,top10E=0.20,eRank=134.9,q75/q25=inf train_time:653963ms step_avg:76.04ms +[2025-09-02 10:01:47] [Rank 0] PRINT: step:8600/10000 val_loss:3.8149 svd_entropy: attn_qk:H=0.7746,top10E=0.24,eRank=192.6,q75/q25=86.26 attn_vo:H=0.8013,top10E=0.13,eRank=280.0,q75/q25=inf mlp_w1:H=0.7929,top10E=0.25,eRank=219.0,q75/q25=17.92 mlp_w2:H=0.8517,top10E=0.14,eRank=295.0,q75/q25=25.67 vo_prod:H=0.6809,top10E=0.20,eRank=134.9,q75/q25=inf train_time:653963ms step_avg:76.04ms +[2025-09-02 10:01:48] [Rank 0] step:8601/10000 train_time:653973ms step_avg:76.03ms +[2025-09-02 10:01:48] [Rank 0] step:8601/10000 train_time:653973ms step_avg:76.03ms +[2025-09-02 10:01:49] [Rank 0] step:8621/10000 train_time:655454ms step_avg:76.03ms +[2025-09-02 10:01:49] [Rank 0] step:8621/10000 train_time:655454ms step_avg:76.03ms +[2025-09-02 10:01:51] [Rank 0] step:8641/10000 train_time:657083ms step_avg:76.04ms +[2025-09-02 10:01:51] [Rank 0] step:8641/10000 train_time:657083ms step_avg:76.04ms +[2025-09-02 10:01:52] [Rank 0] step:8661/10000 train_time:658714ms step_avg:76.06ms +[2025-09-02 10:01:52] [Rank 0] step:8661/10000 train_time:658714ms step_avg:76.06ms +[2025-09-02 10:01:54] [Rank 0] step:8681/10000 train_time:660341ms step_avg:76.07ms +[2025-09-02 10:01:54] [Rank 0] step:8681/10000 train_time:660341ms step_avg:76.07ms +[2025-09-02 10:01:56] [Rank 0] step:8701/10000 train_time:661962ms step_avg:76.08ms +[2025-09-02 10:01:56] [Rank 0] step:8701/10000 train_time:661962ms step_avg:76.08ms +[2025-09-02 10:01:57] [Rank 0] step:8721/10000 train_time:663596ms step_avg:76.09ms +[2025-09-02 10:01:57] [Rank 0] step:8721/10000 train_time:663596ms step_avg:76.09ms +[2025-09-02 10:01:59] [Rank 0] step:8741/10000 train_time:665216ms step_avg:76.10ms +[2025-09-02 10:01:59] [Rank 0] step:8741/10000 train_time:665216ms step_avg:76.10ms +[2025-09-02 10:02:01] [Rank 0] step:8761/10000 train_time:666842ms step_avg:76.11ms +[2025-09-02 10:02:01] [Rank 0] step:8761/10000 train_time:666842ms step_avg:76.11ms +[2025-09-02 10:02:02] [Rank 0] step:8781/10000 train_time:668482ms step_avg:76.13ms +[2025-09-02 10:02:02] [Rank 0] step:8781/10000 train_time:668482ms step_avg:76.13ms +[2025-09-02 10:02:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 10:02:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 10:02:15] [Rank 0] PRINT: step:8800/10000 val_loss:3.8049 svd_entropy: attn_qk:H=0.7751,top10E=0.24,eRank=193.1,q75/q25=86.03 attn_vo:H=0.8017,top10E=0.13,eRank=280.7,q75/q25=inf mlp_w1:H=0.7936,top10E=0.25,eRank=219.8,q75/q25=17.99 mlp_w2:H=0.8522,top10E=0.13,eRank=295.9,q75/q25=25.66 vo_prod:H=0.6816,top10E=0.20,eRank=135.6,q75/q25=inf train_time:670276ms step_avg:76.17ms +[2025-09-02 10:02:15] [Rank 0] PRINT: step:8800/10000 val_loss:3.8049 svd_entropy: attn_qk:H=0.7751,top10E=0.24,eRank=193.1,q75/q25=86.03 attn_vo:H=0.8017,top10E=0.13,eRank=280.7,q75/q25=inf mlp_w1:H=0.7936,top10E=0.25,eRank=219.8,q75/q25=17.99 mlp_w2:H=0.8522,top10E=0.13,eRank=295.9,q75/q25=25.66 vo_prod:H=0.6816,top10E=0.20,eRank=135.6,q75/q25=inf train_time:670276ms step_avg:76.17ms +[2025-09-02 10:02:15] [Rank 0] step:8801/10000 train_time:670287ms step_avg:76.16ms +[2025-09-02 10:02:15] [Rank 0] step:8801/10000 train_time:670287ms step_avg:76.16ms +[2025-09-02 10:02:17] [Rank 0] step:8821/10000 train_time:671767ms step_avg:76.16ms +[2025-09-02 10:02:17] [Rank 0] step:8821/10000 train_time:671767ms step_avg:76.16ms +[2025-09-02 10:02:19] [Rank 0] step:8841/10000 train_time:673413ms step_avg:76.17ms +[2025-09-02 10:02:19] [Rank 0] step:8841/10000 train_time:673413ms step_avg:76.17ms +[2025-09-02 10:02:20] [Rank 0] step:8861/10000 train_time:675040ms step_avg:76.18ms +[2025-09-02 10:02:20] [Rank 0] step:8861/10000 train_time:675040ms step_avg:76.18ms +[2025-09-02 10:02:22] [Rank 0] step:8881/10000 train_time:676674ms step_avg:76.19ms +[2025-09-02 10:02:22] [Rank 0] step:8881/10000 train_time:676674ms step_avg:76.19ms +[2025-09-02 10:02:24] [Rank 0] step:8901/10000 train_time:678310ms step_avg:76.21ms +[2025-09-02 10:02:24] [Rank 0] step:8901/10000 train_time:678310ms step_avg:76.21ms +[2025-09-02 10:02:25] [Rank 0] step:8921/10000 train_time:679945ms step_avg:76.22ms +[2025-09-02 10:02:25] [Rank 0] step:8921/10000 train_time:679945ms step_avg:76.22ms +[2025-09-02 10:02:27] [Rank 0] step:8941/10000 train_time:681586ms step_avg:76.23ms +[2025-09-02 10:02:27] [Rank 0] step:8941/10000 train_time:681586ms step_avg:76.23ms +[2025-09-02 10:02:29] [Rank 0] step:8961/10000 train_time:683209ms step_avg:76.24ms +[2025-09-02 10:02:29] [Rank 0] step:8961/10000 train_time:683209ms step_avg:76.24ms +[2025-09-02 10:02:30] [Rank 0] step:8981/10000 train_time:684837ms step_avg:76.25ms +[2025-09-02 10:02:30] [Rank 0] step:8981/10000 train_time:684837ms step_avg:76.25ms +[2025-09-02 10:02:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 10:02:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 10:02:43] [Rank 0] PRINT: step:9000/10000 val_loss:3.7968 svd_entropy: attn_qk:H=0.7755,top10E=0.24,eRank=193.6,q75/q25=85.89 attn_vo:H=0.8021,top10E=0.13,eRank=281.2,q75/q25=inf mlp_w1:H=0.7941,top10E=0.25,eRank=220.4,q75/q25=18.06 mlp_w2:H=0.8526,top10E=0.13,eRank=296.7,q75/q25=25.71 vo_prod:H=0.6823,top10E=0.20,eRank=136.3,q75/q25=inf train_time:686629ms step_avg:76.29ms +[2025-09-02 10:02:43] [Rank 0] PRINT: step:9000/10000 val_loss:3.7968 svd_entropy: attn_qk:H=0.7755,top10E=0.24,eRank=193.6,q75/q25=85.89 attn_vo:H=0.8021,top10E=0.13,eRank=281.2,q75/q25=inf mlp_w1:H=0.7941,top10E=0.25,eRank=220.4,q75/q25=18.06 mlp_w2:H=0.8526,top10E=0.13,eRank=296.7,q75/q25=25.71 vo_prod:H=0.6823,top10E=0.20,eRank=136.3,q75/q25=inf train_time:686629ms step_avg:76.29ms +[2025-09-02 10:02:44] [Rank 0] step:9001/10000 train_time:686639ms step_avg:76.28ms +[2025-09-02 10:02:44] [Rank 0] step:9001/10000 train_time:686639ms step_avg:76.28ms +[2025-09-02 10:02:45] [Rank 0] step:9021/10000 train_time:688126ms step_avg:76.28ms +[2025-09-02 10:02:45] [Rank 0] step:9021/10000 train_time:688126ms step_avg:76.28ms +[2025-09-02 10:02:47] [Rank 0] step:9041/10000 train_time:689751ms step_avg:76.29ms +[2025-09-02 10:02:47] [Rank 0] step:9041/10000 train_time:689751ms step_avg:76.29ms +[2025-09-02 10:02:48] [Rank 0] step:9061/10000 train_time:691393ms step_avg:76.30ms +[2025-09-02 10:02:48] [Rank 0] step:9061/10000 train_time:691393ms step_avg:76.30ms +[2025-09-02 10:02:50] [Rank 0] step:9081/10000 train_time:693035ms step_avg:76.32ms +[2025-09-02 10:02:50] [Rank 0] step:9081/10000 train_time:693035ms step_avg:76.32ms +[2025-09-02 10:02:52] [Rank 0] step:9101/10000 train_time:694686ms step_avg:76.33ms +[2025-09-02 10:02:52] [Rank 0] step:9101/10000 train_time:694686ms step_avg:76.33ms +[2025-09-02 10:02:53] [Rank 0] step:9121/10000 train_time:696323ms step_avg:76.34ms +[2025-09-02 10:02:53] [Rank 0] step:9121/10000 train_time:696323ms step_avg:76.34ms +[2025-09-02 10:02:55] [Rank 0] step:9141/10000 train_time:697944ms step_avg:76.35ms +[2025-09-02 10:02:55] [Rank 0] step:9141/10000 train_time:697944ms step_avg:76.35ms +[2025-09-02 10:02:57] [Rank 0] step:9161/10000 train_time:699569ms step_avg:76.36ms +[2025-09-02 10:02:57] [Rank 0] step:9161/10000 train_time:699569ms step_avg:76.36ms +[2025-09-02 10:02:58] [Rank 0] step:9181/10000 train_time:701233ms step_avg:76.38ms +[2025-09-02 10:02:58] [Rank 0] step:9181/10000 train_time:701233ms step_avg:76.38ms +[2025-09-02 10:03:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 10:03:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 10:03:12] [Rank 0] PRINT: step:9200/10000 val_loss:3.7902 svd_entropy: attn_qk:H=0.7759,top10E=0.24,eRank=193.9,q75/q25=85.84 attn_vo:H=0.8025,top10E=0.13,eRank=281.7,q75/q25=inf mlp_w1:H=0.7946,top10E=0.24,eRank=221.0,q75/q25=18.04 mlp_w2:H=0.8529,top10E=0.13,eRank=297.4,q75/q25=25.74 vo_prod:H=0.6828,top10E=0.20,eRank=136.8,q75/q25=inf train_time:703030ms step_avg:76.42ms +[2025-09-02 10:03:12] [Rank 0] PRINT: step:9200/10000 val_loss:3.7902 svd_entropy: attn_qk:H=0.7759,top10E=0.24,eRank=193.9,q75/q25=85.84 attn_vo:H=0.8025,top10E=0.13,eRank=281.7,q75/q25=inf mlp_w1:H=0.7946,top10E=0.24,eRank=221.0,q75/q25=18.04 mlp_w2:H=0.8529,top10E=0.13,eRank=297.4,q75/q25=25.74 vo_prod:H=0.6828,top10E=0.20,eRank=136.8,q75/q25=inf train_time:703030ms step_avg:76.42ms +[2025-09-02 10:03:12] [Rank 0] step:9201/10000 train_time:703042ms step_avg:76.41ms +[2025-09-02 10:03:12] [Rank 0] step:9201/10000 train_time:703042ms step_avg:76.41ms +[2025-09-02 10:03:13] [Rank 0] step:9221/10000 train_time:704539ms step_avg:76.41ms +[2025-09-02 10:03:13] [Rank 0] step:9221/10000 train_time:704539ms step_avg:76.41ms +[2025-09-02 10:03:15] [Rank 0] step:9241/10000 train_time:706181ms step_avg:76.42ms +[2025-09-02 10:03:15] [Rank 0] step:9241/10000 train_time:706181ms step_avg:76.42ms +[2025-09-02 10:03:17] [Rank 0] step:9261/10000 train_time:707823ms step_avg:76.43ms +[2025-09-02 10:03:17] [Rank 0] step:9261/10000 train_time:707823ms step_avg:76.43ms +[2025-09-02 10:03:18] [Rank 0] step:9281/10000 train_time:709448ms step_avg:76.44ms +[2025-09-02 10:03:18] [Rank 0] step:9281/10000 train_time:709448ms step_avg:76.44ms +[2025-09-02 10:03:20] [Rank 0] step:9301/10000 train_time:711084ms step_avg:76.45ms +[2025-09-02 10:03:20] [Rank 0] step:9301/10000 train_time:711084ms step_avg:76.45ms +[2025-09-02 10:03:21] [Rank 0] step:9321/10000 train_time:712724ms step_avg:76.46ms +[2025-09-02 10:03:21] [Rank 0] step:9321/10000 train_time:712724ms step_avg:76.46ms +[2025-09-02 10:03:23] [Rank 0] step:9341/10000 train_time:714361ms step_avg:76.48ms +[2025-09-02 10:03:23] [Rank 0] step:9341/10000 train_time:714361ms step_avg:76.48ms +[2025-09-02 10:03:25] [Rank 0] step:9361/10000 train_time:716000ms step_avg:76.49ms +[2025-09-02 10:03:25] [Rank 0] step:9361/10000 train_time:716000ms step_avg:76.49ms +[2025-09-02 10:03:26] [Rank 0] step:9381/10000 train_time:717649ms step_avg:76.50ms +[2025-09-02 10:03:26] [Rank 0] step:9381/10000 train_time:717649ms step_avg:76.50ms +[2025-09-02 10:03:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 10:03:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 10:03:40] [Rank 0] PRINT: step:9400/10000 val_loss:3.7825 svd_entropy: attn_qk:H=0.7762,top10E=0.24,eRank=194.2,q75/q25=85.76 attn_vo:H=0.8028,top10E=0.13,eRank=282.2,q75/q25=inf mlp_w1:H=0.7950,top10E=0.24,eRank=221.5,q75/q25=18.05 mlp_w2:H=0.8532,top10E=0.13,eRank=297.9,q75/q25=25.74 vo_prod:H=0.6833,top10E=0.20,eRank=137.3,q75/q25=inf train_time:719449ms step_avg:76.54ms +[2025-09-02 10:03:40] [Rank 0] PRINT: step:9400/10000 val_loss:3.7825 svd_entropy: attn_qk:H=0.7762,top10E=0.24,eRank=194.2,q75/q25=85.76 attn_vo:H=0.8028,top10E=0.13,eRank=282.2,q75/q25=inf mlp_w1:H=0.7950,top10E=0.24,eRank=221.5,q75/q25=18.05 mlp_w2:H=0.8532,top10E=0.13,eRank=297.9,q75/q25=25.74 vo_prod:H=0.6833,top10E=0.20,eRank=137.3,q75/q25=inf train_time:719449ms step_avg:76.54ms +[2025-09-02 10:03:40] [Rank 0] step:9401/10000 train_time:719460ms step_avg:76.53ms +[2025-09-02 10:03:40] [Rank 0] step:9401/10000 train_time:719460ms step_avg:76.53ms +[2025-09-02 10:03:41] [Rank 0] step:9421/10000 train_time:720936ms step_avg:76.52ms +[2025-09-02 10:03:41] [Rank 0] step:9421/10000 train_time:720936ms step_avg:76.52ms +[2025-09-02 10:03:43] [Rank 0] step:9441/10000 train_time:722570ms step_avg:76.54ms +[2025-09-02 10:03:43] [Rank 0] step:9441/10000 train_time:722570ms step_avg:76.54ms +[2025-09-02 10:03:45] [Rank 0] step:9461/10000 train_time:724214ms step_avg:76.55ms +[2025-09-02 10:03:45] [Rank 0] step:9461/10000 train_time:724214ms step_avg:76.55ms +[2025-09-02 10:03:46] [Rank 0] step:9481/10000 train_time:725853ms step_avg:76.56ms +[2025-09-02 10:03:46] [Rank 0] step:9481/10000 train_time:725853ms step_avg:76.56ms +[2025-09-02 10:03:48] [Rank 0] step:9501/10000 train_time:727502ms step_avg:76.57ms +[2025-09-02 10:03:48] [Rank 0] step:9501/10000 train_time:727502ms step_avg:76.57ms +[2025-09-02 10:03:50] [Rank 0] step:9521/10000 train_time:729127ms step_avg:76.58ms +[2025-09-02 10:03:50] [Rank 0] step:9521/10000 train_time:729127ms step_avg:76.58ms +[2025-09-02 10:03:51] [Rank 0] step:9541/10000 train_time:730763ms step_avg:76.59ms +[2025-09-02 10:03:51] [Rank 0] step:9541/10000 train_time:730763ms step_avg:76.59ms +[2025-09-02 10:03:53] [Rank 0] step:9561/10000 train_time:732396ms step_avg:76.60ms +[2025-09-02 10:03:53] [Rank 0] step:9561/10000 train_time:732396ms step_avg:76.60ms +[2025-09-02 10:03:55] [Rank 0] step:9581/10000 train_time:734034ms step_avg:76.61ms +[2025-09-02 10:03:55] [Rank 0] step:9581/10000 train_time:734034ms step_avg:76.61ms +[2025-09-02 10:03:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 10:03:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 10:04:08] [Rank 0] PRINT: step:9600/10000 val_loss:3.7771 svd_entropy: attn_qk:H=0.7765,top10E=0.23,eRank=194.5,q75/q25=85.45 attn_vo:H=0.8030,top10E=0.13,eRank=282.5,q75/q25=inf mlp_w1:H=0.7953,top10E=0.24,eRank=221.9,q75/q25=18.06 mlp_w2:H=0.8535,top10E=0.13,eRank=298.5,q75/q25=25.73 vo_prod:H=0.6838,top10E=0.20,eRank=137.7,q75/q25=inf train_time:735846ms step_avg:76.65ms +[2025-09-02 10:04:08] [Rank 0] PRINT: step:9600/10000 val_loss:3.7771 svd_entropy: attn_qk:H=0.7765,top10E=0.23,eRank=194.5,q75/q25=85.45 attn_vo:H=0.8030,top10E=0.13,eRank=282.5,q75/q25=inf mlp_w1:H=0.7953,top10E=0.24,eRank=221.9,q75/q25=18.06 mlp_w2:H=0.8535,top10E=0.13,eRank=298.5,q75/q25=25.73 vo_prod:H=0.6838,top10E=0.20,eRank=137.7,q75/q25=inf train_time:735846ms step_avg:76.65ms +[2025-09-02 10:04:08] [Rank 0] step:9601/10000 train_time:735857ms step_avg:76.64ms +[2025-09-02 10:04:08] [Rank 0] step:9601/10000 train_time:735857ms step_avg:76.64ms +[2025-09-02 10:04:10] [Rank 0] step:9621/10000 train_time:737354ms step_avg:76.64ms +[2025-09-02 10:04:10] [Rank 0] step:9621/10000 train_time:737354ms step_avg:76.64ms +[2025-09-02 10:04:11] [Rank 0] step:9641/10000 train_time:738990ms step_avg:76.65ms +[2025-09-02 10:04:11] [Rank 0] step:9641/10000 train_time:738990ms step_avg:76.65ms +[2025-09-02 10:04:13] [Rank 0] step:9661/10000 train_time:740653ms step_avg:76.66ms +[2025-09-02 10:04:13] [Rank 0] step:9661/10000 train_time:740653ms step_avg:76.66ms +[2025-09-02 10:04:15] [Rank 0] step:9681/10000 train_time:742309ms step_avg:76.68ms +[2025-09-02 10:04:15] [Rank 0] step:9681/10000 train_time:742309ms step_avg:76.68ms +[2025-09-02 10:04:16] [Rank 0] step:9701/10000 train_time:743981ms step_avg:76.69ms +[2025-09-02 10:04:16] [Rank 0] step:9701/10000 train_time:743981ms step_avg:76.69ms +[2025-09-02 10:04:18] [Rank 0] step:9721/10000 train_time:745631ms step_avg:76.70ms +[2025-09-02 10:04:18] [Rank 0] step:9721/10000 train_time:745631ms step_avg:76.70ms +[2025-09-02 10:04:20] [Rank 0] step:9741/10000 train_time:747309ms step_avg:76.72ms +[2025-09-02 10:04:20] [Rank 0] step:9741/10000 train_time:747309ms step_avg:76.72ms +[2025-09-02 10:04:21] [Rank 0] step:9761/10000 train_time:748966ms step_avg:76.73ms +[2025-09-02 10:04:21] [Rank 0] step:9761/10000 train_time:748966ms step_avg:76.73ms +[2025-09-02 10:04:23] [Rank 0] step:9781/10000 train_time:750638ms step_avg:76.74ms +[2025-09-02 10:04:23] [Rank 0] step:9781/10000 train_time:750638ms step_avg:76.74ms +[2025-09-02 10:04:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 10:04:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 10:04:36] [Rank 0] PRINT: step:9800/10000 val_loss:3.7721 svd_entropy: attn_qk:H=0.7767,top10E=0.23,eRank=194.7,q75/q25=85.54 attn_vo:H=0.8032,top10E=0.13,eRank=282.8,q75/q25=inf mlp_w1:H=0.7955,top10E=0.24,eRank=222.2,q75/q25=18.06 mlp_w2:H=0.8537,top10E=0.13,eRank=298.9,q75/q25=25.68 vo_prod:H=0.6841,top10E=0.20,eRank=138.1,q75/q25=inf train_time:752477ms step_avg:76.78ms +[2025-09-02 10:04:36] [Rank 0] PRINT: step:9800/10000 val_loss:3.7721 svd_entropy: attn_qk:H=0.7767,top10E=0.23,eRank=194.7,q75/q25=85.54 attn_vo:H=0.8032,top10E=0.13,eRank=282.8,q75/q25=inf mlp_w1:H=0.7955,top10E=0.24,eRank=222.2,q75/q25=18.06 mlp_w2:H=0.8537,top10E=0.13,eRank=298.9,q75/q25=25.68 vo_prod:H=0.6841,top10E=0.20,eRank=138.1,q75/q25=inf train_time:752477ms step_avg:76.78ms +[2025-09-02 10:04:36] [Rank 0] step:9801/10000 train_time:752488ms step_avg:76.78ms +[2025-09-02 10:04:36] [Rank 0] step:9801/10000 train_time:752488ms step_avg:76.78ms +[2025-09-02 10:04:38] [Rank 0] step:9821/10000 train_time:753986ms step_avg:76.77ms +[2025-09-02 10:04:38] [Rank 0] step:9821/10000 train_time:753986ms step_avg:76.77ms +[2025-09-02 10:04:40] [Rank 0] step:9841/10000 train_time:755656ms step_avg:76.79ms +[2025-09-02 10:04:40] [Rank 0] step:9841/10000 train_time:755656ms step_avg:76.79ms +[2025-09-02 10:04:41] [Rank 0] step:9861/10000 train_time:757303ms step_avg:76.80ms +[2025-09-02 10:04:41] [Rank 0] step:9861/10000 train_time:757303ms step_avg:76.80ms +[2025-09-02 10:04:43] [Rank 0] step:9881/10000 train_time:758954ms step_avg:76.81ms +[2025-09-02 10:04:43] [Rank 0] step:9881/10000 train_time:758954ms step_avg:76.81ms +[2025-09-02 10:04:45] [Rank 0] step:9901/10000 train_time:760618ms step_avg:76.82ms +[2025-09-02 10:04:45] [Rank 0] step:9901/10000 train_time:760618ms step_avg:76.82ms +[2025-09-02 10:04:46] [Rank 0] step:9921/10000 train_time:762271ms step_avg:76.83ms +[2025-09-02 10:04:46] [Rank 0] step:9921/10000 train_time:762271ms step_avg:76.83ms +[2025-09-02 10:04:48] [Rank 0] step:9941/10000 train_time:763935ms step_avg:76.85ms +[2025-09-02 10:04:48] [Rank 0] step:9941/10000 train_time:763935ms step_avg:76.85ms +[2025-09-02 10:04:50] [Rank 0] step:9961/10000 train_time:765593ms step_avg:76.86ms +[2025-09-02 10:04:50] [Rank 0] step:9961/10000 train_time:765593ms step_avg:76.86ms +[2025-09-02 10:04:51] [Rank 0] step:9981/10000 train_time:767248ms step_avg:76.87ms +[2025-09-02 10:04:51] [Rank 0] step:9981/10000 train_time:767248ms step_avg:76.87ms +[2025-09-02 10:04:53] [Rank 0] step:10000/10000 train_time:768833ms step_avg:76.88ms +[2025-09-02 10:04:53] [Rank 0] step:10000/10000 train_time:768833ms step_avg:76.88ms +[2025-09-02 10:04:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 10:04:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 10:05:04] [Rank 0] PRINT: step:10000/10000 val_loss:3.7663 svd_entropy: attn_qk:H=0.7768,top10E=0.23,eRank=194.9,q75/q25=85.53 attn_vo:H=0.8033,top10E=0.13,eRank=283.0,q75/q25=inf mlp_w1:H=0.7957,top10E=0.24,eRank=222.4,q75/q25=18.05 mlp_w2:H=0.8538,top10E=0.13,eRank=299.2,q75/q25=25.68 vo_prod:H=0.6844,top10E=0.20,eRank=138.3,q75/q25=inf train_time:769088ms step_avg:76.91ms +[2025-09-02 10:05:04] [Rank 0] PRINT: step:10000/10000 val_loss:3.7663 svd_entropy: attn_qk:H=0.7768,top10E=0.23,eRank=194.9,q75/q25=85.53 attn_vo:H=0.8033,top10E=0.13,eRank=283.0,q75/q25=inf mlp_w1:H=0.7957,top10E=0.24,eRank=222.4,q75/q25=18.05 mlp_w2:H=0.8538,top10E=0.13,eRank=299.2,q75/q25=25.68 vo_prod:H=0.6844,top10E=0.20,eRank=138.3,q75/q25=inf train_time:769088ms step_avg:76.91ms +[2025-09-02 10:05:04] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 10:05:04 2025 --- +[2025-09-02 10:05:04] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 10:05:04 2025 --- +[2025-09-02 10:05:04] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14416 MiB +[2025-09-02 10:05:04] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14416 MiB diff --git a/logs_svd_qkvo/mode_14_param_qkvo_seed_46/config.json b/logs_svd_qkvo/mode_14_param_qkvo_seed_46/config.json new file mode 100644 index 0000000000000000000000000000000000000000..6d8de4df1c78611fa87d17a9bc67c145ed1e7164 --- /dev/null +++ b/logs_svd_qkvo/mode_14_param_qkvo_seed_46/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 46, + "optimizer_mode": 14, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "c0d5a767-4025-4c8b-8007-7d02537a13ff", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_14_param_qkvo_seed_46/training_log_c0d5a767-4025-4c8b-8007-7d02537a13ff.txt b/logs_svd_qkvo/mode_14_param_qkvo_seed_46/training_log_c0d5a767-4025-4c8b-8007-7d02537a13ff.txt new file mode 100644 index 0000000000000000000000000000000000000000..766dfb85dfc73aad6d2613b3b12d331e0912dce1 --- /dev/null +++ b/logs_svd_qkvo/mode_14_param_qkvo_seed_46/training_log_c0d5a767-4025-4c8b-8007-7d02537a13ff.txt @@ -0,0 +1,2984 @@ +[2025-09-02 14:15:31] [Rank 0] PRINT: --- Script Start: Tue Sep 2 14:15:31 2025 --- +[2025-09-02 14:15:31] [Rank 0] PRINT: --- Script Start: Tue Sep 2 14:15:31 2025 --- +[2025-09-02 14:15:31] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=14, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 14:15:31] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=14, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 14:15:31] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 14:15:31] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 14:15:31] [Rank 0] PRINT: Using fixed seed: 46 +[2025-09-02 14:15:31] [Rank 0] PRINT: Using fixed seed: 46 +[2025-09-02 14:15:31] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_14_param_qkvo_seed_46 +[2025-09-02 14:15:31] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_14_param_qkvo_seed_46 +[2025-09-02 14:15:31] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 14:15:31] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 14:15:31] [Rank 0] PRINT: Constructing model... +[2025-09-02 14:15:31] [Rank 0] PRINT: Constructing model... +[2025-09-02 14:15:33] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 14:15:33] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 14:15:33] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 14:15:33] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 14:15:33] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 14:15:33] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 14:15:33] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 14 +[2025-09-02 14:15:33] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 14 +[2025-09-02 14:15:33] [Rank 0] PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 14:15:33] [Rank 0] PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 14:15:33] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 14:15:33] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 14:15:33] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 14:15:33] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 14:15:33] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 14:15:33] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 14:15:33] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 14:15:33] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 14:15:33] [Rank 0] PRINT: Starting warmup... +[2025-09-02 14:15:33] [Rank 0] PRINT: Starting warmup... +[2025-09-02 14:19:28] [Rank 0] PRINT: Warmup complete. +[2025-09-02 14:19:28] [Rank 0] PRINT: Warmup complete. +[2025-09-02 14:19:28] [Rank 0] PRINT: Starting training... +[2025-09-02 14:19:28] [Rank 0] PRINT: Starting training... +[2025-09-02 14:19:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:19:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:19:50] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.27 attn_vo:H=0.4624,top10E=0.02,eRank=233.0,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.7,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 14:19:50] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.27 attn_vo:H=0.4624,top10E=0.02,eRank=233.0,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.7,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 14:19:52] [Rank 0] step:21/10000 train_time:1290ms step_avg:61.43ms +[2025-09-02 14:19:52] [Rank 0] step:21/10000 train_time:1290ms step_avg:61.43ms +[2025-09-02 14:19:53] [Rank 0] step:41/10000 train_time:2684ms step_avg:65.46ms +[2025-09-02 14:19:53] [Rank 0] step:41/10000 train_time:2684ms step_avg:65.46ms +[2025-09-02 14:19:55] [Rank 0] step:61/10000 train_time:4083ms step_avg:66.93ms +[2025-09-02 14:19:55] [Rank 0] step:61/10000 train_time:4083ms step_avg:66.93ms +[2025-09-02 14:19:56] [Rank 0] step:81/10000 train_time:5486ms step_avg:67.72ms +[2025-09-02 14:19:56] [Rank 0] step:81/10000 train_time:5486ms step_avg:67.72ms +[2025-09-02 14:19:57] [Rank 0] step:101/10000 train_time:6890ms step_avg:68.21ms +[2025-09-02 14:19:57] [Rank 0] step:101/10000 train_time:6890ms step_avg:68.21ms +[2025-09-02 14:19:59] [Rank 0] step:121/10000 train_time:8294ms step_avg:68.54ms +[2025-09-02 14:19:59] [Rank 0] step:121/10000 train_time:8294ms step_avg:68.54ms +[2025-09-02 14:20:00] [Rank 0] step:141/10000 train_time:9699ms step_avg:68.78ms +[2025-09-02 14:20:00] [Rank 0] step:141/10000 train_time:9699ms step_avg:68.78ms +[2025-09-02 14:20:02] [Rank 0] step:161/10000 train_time:11106ms step_avg:68.98ms +[2025-09-02 14:20:02] [Rank 0] step:161/10000 train_time:11106ms step_avg:68.98ms +[2025-09-02 14:20:03] [Rank 0] step:181/10000 train_time:12518ms step_avg:69.16ms +[2025-09-02 14:20:03] [Rank 0] step:181/10000 train_time:12518ms step_avg:69.16ms +[2025-09-02 14:20:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:20:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:20:16] [Rank 0] PRINT: step:200/10000 val_loss:6.5631 svd_entropy: attn_qk:H=0.4918,top10E=0.73,eRank=73.9,q75/q25=12.02 attn_vo:H=0.4456,top10E=0.66,eRank=62.4,q75/q25=inf mlp_w1:H=0.4068,top10E=0.78,eRank=16.2,q75/q25=2.64 mlp_w2:H=0.1576,top10E=0.96,eRank=3.8,q75/q25=91.43 vo_prod:H=0.1894,top10E=0.87,eRank=6.2,q75/q25=inf train_time:14067ms step_avg:70.33ms +[2025-09-02 14:20:16] [Rank 0] PRINT: step:200/10000 val_loss:6.5631 svd_entropy: attn_qk:H=0.4918,top10E=0.73,eRank=73.9,q75/q25=12.02 attn_vo:H=0.4456,top10E=0.66,eRank=62.4,q75/q25=inf mlp_w1:H=0.4068,top10E=0.78,eRank=16.2,q75/q25=2.64 mlp_w2:H=0.1576,top10E=0.96,eRank=3.8,q75/q25=91.43 vo_prod:H=0.1894,top10E=0.87,eRank=6.2,q75/q25=inf train_time:14067ms step_avg:70.33ms +[2025-09-02 14:20:16] [Rank 0] step:201/10000 train_time:14079ms step_avg:70.04ms +[2025-09-02 14:20:16] [Rank 0] step:201/10000 train_time:14079ms step_avg:70.04ms +[2025-09-02 14:20:18] [Rank 0] step:221/10000 train_time:15365ms step_avg:69.52ms +[2025-09-02 14:20:18] [Rank 0] step:221/10000 train_time:15365ms step_avg:69.52ms +[2025-09-02 14:20:19] [Rank 0] step:241/10000 train_time:16771ms step_avg:69.59ms +[2025-09-02 14:20:19] [Rank 0] step:241/10000 train_time:16771ms step_avg:69.59ms +[2025-09-02 14:20:21] [Rank 0] step:261/10000 train_time:18179ms step_avg:69.65ms +[2025-09-02 14:20:21] [Rank 0] step:261/10000 train_time:18179ms step_avg:69.65ms +[2025-09-02 14:20:22] [Rank 0] step:281/10000 train_time:19586ms step_avg:69.70ms +[2025-09-02 14:20:22] [Rank 0] step:281/10000 train_time:19586ms step_avg:69.70ms +[2025-09-02 14:20:23] [Rank 0] step:301/10000 train_time:20994ms step_avg:69.75ms +[2025-09-02 14:20:23] [Rank 0] step:301/10000 train_time:20994ms step_avg:69.75ms +[2025-09-02 14:20:25] [Rank 0] step:321/10000 train_time:22403ms step_avg:69.79ms +[2025-09-02 14:20:25] [Rank 0] step:321/10000 train_time:22403ms step_avg:69.79ms +[2025-09-02 14:20:26] [Rank 0] step:341/10000 train_time:23809ms step_avg:69.82ms +[2025-09-02 14:20:26] [Rank 0] step:341/10000 train_time:23809ms step_avg:69.82ms +[2025-09-02 14:20:28] [Rank 0] step:361/10000 train_time:25216ms step_avg:69.85ms +[2025-09-02 14:20:28] [Rank 0] step:361/10000 train_time:25216ms step_avg:69.85ms +[2025-09-02 14:20:29] [Rank 0] step:381/10000 train_time:26625ms step_avg:69.88ms +[2025-09-02 14:20:29] [Rank 0] step:381/10000 train_time:26625ms step_avg:69.88ms +[2025-09-02 14:20:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:20:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:20:42] [Rank 0] PRINT: step:400/10000 val_loss:6.0285 svd_entropy: attn_qk:H=0.5389,top10E=0.65,eRank=81.4,q75/q25=13.31 attn_vo:H=0.5130,top10E=0.55,eRank=74.5,q75/q25=inf mlp_w1:H=0.4352,top10E=0.72,eRank=24.4,q75/q25=3.27 mlp_w2:H=0.5240,top10E=0.62,eRank=33.1,q75/q25=15.42 vo_prod:H=0.3263,top10E=0.81,eRank=13.4,q75/q25=inf train_time:28277ms step_avg:70.69ms +[2025-09-02 14:20:42] [Rank 0] PRINT: step:400/10000 val_loss:6.0285 svd_entropy: attn_qk:H=0.5389,top10E=0.65,eRank=81.4,q75/q25=13.31 attn_vo:H=0.5130,top10E=0.55,eRank=74.5,q75/q25=inf mlp_w1:H=0.4352,top10E=0.72,eRank=24.4,q75/q25=3.27 mlp_w2:H=0.5240,top10E=0.62,eRank=33.1,q75/q25=15.42 vo_prod:H=0.3263,top10E=0.81,eRank=13.4,q75/q25=inf train_time:28277ms step_avg:70.69ms +[2025-09-02 14:20:43] [Rank 0] step:401/10000 train_time:28289ms step_avg:70.54ms +[2025-09-02 14:20:43] [Rank 0] step:401/10000 train_time:28289ms step_avg:70.54ms +[2025-09-02 14:20:44] [Rank 0] step:421/10000 train_time:29564ms step_avg:70.22ms +[2025-09-02 14:20:44] [Rank 0] step:421/10000 train_time:29564ms step_avg:70.22ms +[2025-09-02 14:20:45] [Rank 0] step:441/10000 train_time:30970ms step_avg:70.23ms +[2025-09-02 14:20:45] [Rank 0] step:441/10000 train_time:30970ms step_avg:70.23ms +[2025-09-02 14:20:47] [Rank 0] step:461/10000 train_time:32478ms step_avg:70.45ms +[2025-09-02 14:20:47] [Rank 0] step:461/10000 train_time:32478ms step_avg:70.45ms +[2025-09-02 14:20:48] [Rank 0] step:481/10000 train_time:33886ms step_avg:70.45ms +[2025-09-02 14:20:48] [Rank 0] step:481/10000 train_time:33886ms step_avg:70.45ms +[2025-09-02 14:20:50] [Rank 0] step:501/10000 train_time:35294ms step_avg:70.45ms +[2025-09-02 14:20:50] [Rank 0] step:501/10000 train_time:35294ms step_avg:70.45ms +[2025-09-02 14:20:51] [Rank 0] step:521/10000 train_time:36701ms step_avg:70.44ms +[2025-09-02 14:20:51] [Rank 0] step:521/10000 train_time:36701ms step_avg:70.44ms +[2025-09-02 14:20:52] [Rank 0] step:541/10000 train_time:38109ms step_avg:70.44ms +[2025-09-02 14:20:52] [Rank 0] step:541/10000 train_time:38109ms step_avg:70.44ms +[2025-09-02 14:20:54] [Rank 0] step:561/10000 train_time:39517ms step_avg:70.44ms +[2025-09-02 14:20:54] [Rank 0] step:561/10000 train_time:39517ms step_avg:70.44ms +[2025-09-02 14:20:55] [Rank 0] step:581/10000 train_time:40926ms step_avg:70.44ms +[2025-09-02 14:20:55] [Rank 0] step:581/10000 train_time:40926ms step_avg:70.44ms +[2025-09-02 14:20:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:20:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:21:08] [Rank 0] PRINT: step:600/10000 val_loss:5.7296 svd_entropy: attn_qk:H=0.5698,top10E=0.58,eRank=87.7,q75/q25=14.76 attn_vo:H=0.5519,top10E=0.48,eRank=85.9,q75/q25=inf mlp_w1:H=0.4849,top10E=0.65,eRank=35.7,q75/q25=3.85 mlp_w2:H=0.6248,top10E=0.47,eRank=64.0,q75/q25=9.52 vo_prod:H=0.3902,top10E=0.71,eRank=19.2,q75/q25=inf train_time:42476ms step_avg:70.79ms +[2025-09-02 14:21:08] [Rank 0] PRINT: step:600/10000 val_loss:5.7296 svd_entropy: attn_qk:H=0.5698,top10E=0.58,eRank=87.7,q75/q25=14.76 attn_vo:H=0.5519,top10E=0.48,eRank=85.9,q75/q25=inf mlp_w1:H=0.4849,top10E=0.65,eRank=35.7,q75/q25=3.85 mlp_w2:H=0.6248,top10E=0.47,eRank=64.0,q75/q25=9.52 vo_prod:H=0.3902,top10E=0.71,eRank=19.2,q75/q25=inf train_time:42476ms step_avg:70.79ms +[2025-09-02 14:21:09] [Rank 0] step:601/10000 train_time:42488ms step_avg:70.70ms +[2025-09-02 14:21:09] [Rank 0] step:601/10000 train_time:42488ms step_avg:70.70ms +[2025-09-02 14:21:10] [Rank 0] step:621/10000 train_time:43762ms step_avg:70.47ms +[2025-09-02 14:21:10] [Rank 0] step:621/10000 train_time:43762ms step_avg:70.47ms +[2025-09-02 14:21:11] [Rank 0] step:641/10000 train_time:45167ms step_avg:70.46ms +[2025-09-02 14:21:11] [Rank 0] step:641/10000 train_time:45167ms step_avg:70.46ms +[2025-09-02 14:21:13] [Rank 0] step:661/10000 train_time:46574ms step_avg:70.46ms +[2025-09-02 14:21:13] [Rank 0] step:661/10000 train_time:46574ms step_avg:70.46ms +[2025-09-02 14:21:14] [Rank 0] step:681/10000 train_time:48112ms step_avg:70.65ms +[2025-09-02 14:21:14] [Rank 0] step:681/10000 train_time:48112ms step_avg:70.65ms +[2025-09-02 14:21:16] [Rank 0] step:701/10000 train_time:49424ms step_avg:70.51ms +[2025-09-02 14:21:16] [Rank 0] step:701/10000 train_time:49424ms step_avg:70.51ms +[2025-09-02 14:21:17] [Rank 0] step:721/10000 train_time:50831ms step_avg:70.50ms +[2025-09-02 14:21:17] [Rank 0] step:721/10000 train_time:50831ms step_avg:70.50ms +[2025-09-02 14:21:18] [Rank 0] step:741/10000 train_time:52240ms step_avg:70.50ms +[2025-09-02 14:21:18] [Rank 0] step:741/10000 train_time:52240ms step_avg:70.50ms +[2025-09-02 14:21:20] [Rank 0] step:761/10000 train_time:53658ms step_avg:70.51ms +[2025-09-02 14:21:20] [Rank 0] step:761/10000 train_time:53658ms step_avg:70.51ms +[2025-09-02 14:21:21] [Rank 0] step:781/10000 train_time:55079ms step_avg:70.52ms +[2025-09-02 14:21:21] [Rank 0] step:781/10000 train_time:55079ms step_avg:70.52ms +[2025-09-02 14:21:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:21:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:21:34] [Rank 0] PRINT: step:800/10000 val_loss:5.4984 svd_entropy: attn_qk:H=0.5929,top10E=0.53,eRank=92.8,q75/q25=16.42 attn_vo:H=0.5794,top10E=0.44,eRank=96.4,q75/q25=inf mlp_w1:H=0.5213,top10E=0.60,eRank=45.4,q75/q25=4.29 mlp_w2:H=0.6720,top10E=0.39,eRank=87.3,q75/q25=9.10 vo_prod:H=0.4315,top10E=0.64,eRank=24.7,q75/q25=inf train_time:56643ms step_avg:70.80ms +[2025-09-02 14:21:34] [Rank 0] PRINT: step:800/10000 val_loss:5.4984 svd_entropy: attn_qk:H=0.5929,top10E=0.53,eRank=92.8,q75/q25=16.42 attn_vo:H=0.5794,top10E=0.44,eRank=96.4,q75/q25=inf mlp_w1:H=0.5213,top10E=0.60,eRank=45.4,q75/q25=4.29 mlp_w2:H=0.6720,top10E=0.39,eRank=87.3,q75/q25=9.10 vo_prod:H=0.4315,top10E=0.64,eRank=24.7,q75/q25=inf train_time:56643ms step_avg:70.80ms +[2025-09-02 14:21:34] [Rank 0] step:801/10000 train_time:56654ms step_avg:70.73ms +[2025-09-02 14:21:34] [Rank 0] step:801/10000 train_time:56654ms step_avg:70.73ms +[2025-09-02 14:21:36] [Rank 0] step:821/10000 train_time:57964ms step_avg:70.60ms +[2025-09-02 14:21:36] [Rank 0] step:821/10000 train_time:57964ms step_avg:70.60ms +[2025-09-02 14:21:37] [Rank 0] step:841/10000 train_time:59482ms step_avg:70.73ms +[2025-09-02 14:21:37] [Rank 0] step:841/10000 train_time:59482ms step_avg:70.73ms +[2025-09-02 14:21:39] [Rank 0] step:861/10000 train_time:60899ms step_avg:70.73ms +[2025-09-02 14:21:39] [Rank 0] step:861/10000 train_time:60899ms step_avg:70.73ms +[2025-09-02 14:21:40] [Rank 0] step:881/10000 train_time:62316ms step_avg:70.73ms +[2025-09-02 14:21:40] [Rank 0] step:881/10000 train_time:62316ms step_avg:70.73ms +[2025-09-02 14:21:42] [Rank 0] step:901/10000 train_time:63735ms step_avg:70.74ms +[2025-09-02 14:21:42] [Rank 0] step:901/10000 train_time:63735ms step_avg:70.74ms +[2025-09-02 14:21:43] [Rank 0] step:921/10000 train_time:65155ms step_avg:70.74ms +[2025-09-02 14:21:43] [Rank 0] step:921/10000 train_time:65155ms step_avg:70.74ms +[2025-09-02 14:21:44] [Rank 0] step:941/10000 train_time:66576ms step_avg:70.75ms +[2025-09-02 14:21:44] [Rank 0] step:941/10000 train_time:66576ms step_avg:70.75ms +[2025-09-02 14:21:46] [Rank 0] step:961/10000 train_time:67996ms step_avg:70.75ms +[2025-09-02 14:21:46] [Rank 0] step:961/10000 train_time:67996ms step_avg:70.75ms +[2025-09-02 14:21:47] [Rank 0] step:981/10000 train_time:69417ms step_avg:70.76ms +[2025-09-02 14:21:47] [Rank 0] step:981/10000 train_time:69417ms step_avg:70.76ms +[2025-09-02 14:21:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:21:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:22:00] [Rank 0] PRINT: step:1000/10000 val_loss:5.3383 svd_entropy: attn_qk:H=0.6106,top10E=0.50,eRank=97.6,q75/q25=18.43 attn_vo:H=0.6012,top10E=0.41,eRank=106.7,q75/q25=inf mlp_w1:H=0.5524,top10E=0.57,eRank=54.3,q75/q25=4.71 mlp_w2:H=0.7013,top10E=0.34,eRank=106.0,q75/q25=9.73 vo_prod:H=0.4580,top10E=0.58,eRank=29.1,q75/q25=inf train_time:70981ms step_avg:70.98ms +[2025-09-02 14:22:00] [Rank 0] PRINT: step:1000/10000 val_loss:5.3383 svd_entropy: attn_qk:H=0.6106,top10E=0.50,eRank=97.6,q75/q25=18.43 attn_vo:H=0.6012,top10E=0.41,eRank=106.7,q75/q25=inf mlp_w1:H=0.5524,top10E=0.57,eRank=54.3,q75/q25=4.71 mlp_w2:H=0.7013,top10E=0.34,eRank=106.0,q75/q25=9.73 vo_prod:H=0.4580,top10E=0.58,eRank=29.1,q75/q25=inf train_time:70981ms step_avg:70.98ms +[2025-09-02 14:22:00] [Rank 0] step:1001/10000 train_time:70991ms step_avg:70.92ms +[2025-09-02 14:22:00] [Rank 0] step:1001/10000 train_time:70991ms step_avg:70.92ms +[2025-09-02 14:22:02] [Rank 0] step:1021/10000 train_time:72274ms step_avg:70.79ms +[2025-09-02 14:22:02] [Rank 0] step:1021/10000 train_time:72274ms step_avg:70.79ms +[2025-09-02 14:22:03] [Rank 0] step:1041/10000 train_time:73694ms step_avg:70.79ms +[2025-09-02 14:22:03] [Rank 0] step:1041/10000 train_time:73694ms step_avg:70.79ms +[2025-09-02 14:22:05] [Rank 0] step:1061/10000 train_time:75114ms step_avg:70.80ms +[2025-09-02 14:22:05] [Rank 0] step:1061/10000 train_time:75114ms step_avg:70.80ms +[2025-09-02 14:22:06] [Rank 0] step:1081/10000 train_time:76533ms step_avg:70.80ms +[2025-09-02 14:22:06] [Rank 0] step:1081/10000 train_time:76533ms step_avg:70.80ms +[2025-09-02 14:22:08] [Rank 0] step:1101/10000 train_time:77953ms step_avg:70.80ms +[2025-09-02 14:22:08] [Rank 0] step:1101/10000 train_time:77953ms step_avg:70.80ms +[2025-09-02 14:22:09] [Rank 0] step:1121/10000 train_time:79374ms step_avg:70.81ms +[2025-09-02 14:22:09] [Rank 0] step:1121/10000 train_time:79374ms step_avg:70.81ms +[2025-09-02 14:22:10] [Rank 0] step:1141/10000 train_time:80897ms step_avg:70.90ms +[2025-09-02 14:22:10] [Rank 0] step:1141/10000 train_time:80897ms step_avg:70.90ms +[2025-09-02 14:22:12] [Rank 0] step:1161/10000 train_time:82318ms step_avg:70.90ms +[2025-09-02 14:22:12] [Rank 0] step:1161/10000 train_time:82318ms step_avg:70.90ms +[2025-09-02 14:22:13] [Rank 0] step:1181/10000 train_time:83739ms step_avg:70.91ms +[2025-09-02 14:22:13] [Rank 0] step:1181/10000 train_time:83739ms step_avg:70.91ms +[2025-09-02 14:22:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:22:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:22:27] [Rank 0] PRINT: step:1200/10000 val_loss:5.1804 svd_entropy: attn_qk:H=0.6255,top10E=0.47,eRank=102.2,q75/q25=20.95 attn_vo:H=0.6204,top10E=0.38,eRank=117.6,q75/q25=inf mlp_w1:H=0.5764,top10E=0.54,eRank=62.2,q75/q25=5.22 mlp_w2:H=0.7228,top10E=0.30,eRank=122.2,q75/q25=11.12 vo_prod:H=0.4793,top10E=0.54,eRank=33.6,q75/q25=inf train_time:85403ms step_avg:71.17ms +[2025-09-02 14:22:27] [Rank 0] PRINT: step:1200/10000 val_loss:5.1804 svd_entropy: attn_qk:H=0.6255,top10E=0.47,eRank=102.2,q75/q25=20.95 attn_vo:H=0.6204,top10E=0.38,eRank=117.6,q75/q25=inf mlp_w1:H=0.5764,top10E=0.54,eRank=62.2,q75/q25=5.22 mlp_w2:H=0.7228,top10E=0.30,eRank=122.2,q75/q25=11.12 vo_prod:H=0.4793,top10E=0.54,eRank=33.6,q75/q25=inf train_time:85403ms step_avg:71.17ms +[2025-09-02 14:22:27] [Rank 0] step:1201/10000 train_time:85414ms step_avg:71.12ms +[2025-09-02 14:22:27] [Rank 0] step:1201/10000 train_time:85414ms step_avg:71.12ms +[2025-09-02 14:22:28] [Rank 0] step:1221/10000 train_time:86709ms step_avg:71.01ms +[2025-09-02 14:22:28] [Rank 0] step:1221/10000 train_time:86709ms step_avg:71.01ms +[2025-09-02 14:22:30] [Rank 0] step:1241/10000 train_time:88128ms step_avg:71.01ms +[2025-09-02 14:22:30] [Rank 0] step:1241/10000 train_time:88128ms step_avg:71.01ms +[2025-09-02 14:22:31] [Rank 0] step:1261/10000 train_time:89546ms step_avg:71.01ms +[2025-09-02 14:22:31] [Rank 0] step:1261/10000 train_time:89546ms step_avg:71.01ms +[2025-09-02 14:22:32] [Rank 0] step:1281/10000 train_time:90964ms step_avg:71.01ms +[2025-09-02 14:22:32] [Rank 0] step:1281/10000 train_time:90964ms step_avg:71.01ms +[2025-09-02 14:22:34] [Rank 0] step:1301/10000 train_time:92386ms step_avg:71.01ms +[2025-09-02 14:22:34] [Rank 0] step:1301/10000 train_time:92386ms step_avg:71.01ms +[2025-09-02 14:22:35] [Rank 0] step:1321/10000 train_time:93807ms step_avg:71.01ms +[2025-09-02 14:22:35] [Rank 0] step:1321/10000 train_time:93807ms step_avg:71.01ms +[2025-09-02 14:22:37] [Rank 0] step:1341/10000 train_time:95230ms step_avg:71.01ms +[2025-09-02 14:22:37] [Rank 0] step:1341/10000 train_time:95230ms step_avg:71.01ms +[2025-09-02 14:22:38] [Rank 0] step:1361/10000 train_time:96651ms step_avg:71.01ms +[2025-09-02 14:22:38] [Rank 0] step:1361/10000 train_time:96651ms step_avg:71.01ms +[2025-09-02 14:22:39] [Rank 0] step:1381/10000 train_time:98072ms step_avg:71.02ms +[2025-09-02 14:22:39] [Rank 0] step:1381/10000 train_time:98072ms step_avg:71.02ms +[2025-09-02 14:22:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:22:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:22:53] [Rank 0] PRINT: step:1400/10000 val_loss:5.0487 svd_entropy: attn_qk:H=0.6389,top10E=0.44,eRank=106.9,q75/q25=24.16 attn_vo:H=0.6372,top10E=0.36,eRank=128.4,q75/q25=inf mlp_w1:H=0.5973,top10E=0.51,eRank=69.8,q75/q25=5.76 mlp_w2:H=0.7401,top10E=0.27,eRank=137.3,q75/q25=12.90 vo_prod:H=0.4966,top10E=0.51,eRank=37.8,q75/q25=inf train_time:99638ms step_avg:71.17ms +[2025-09-02 14:22:53] [Rank 0] PRINT: step:1400/10000 val_loss:5.0487 svd_entropy: attn_qk:H=0.6389,top10E=0.44,eRank=106.9,q75/q25=24.16 attn_vo:H=0.6372,top10E=0.36,eRank=128.4,q75/q25=inf mlp_w1:H=0.5973,top10E=0.51,eRank=69.8,q75/q25=5.76 mlp_w2:H=0.7401,top10E=0.27,eRank=137.3,q75/q25=12.90 vo_prod:H=0.4966,top10E=0.51,eRank=37.8,q75/q25=inf train_time:99638ms step_avg:71.17ms +[2025-09-02 14:22:53] [Rank 0] step:1401/10000 train_time:99649ms step_avg:71.13ms +[2025-09-02 14:22:53] [Rank 0] step:1401/10000 train_time:99649ms step_avg:71.13ms +[2025-09-02 14:22:54] [Rank 0] step:1421/10000 train_time:100931ms step_avg:71.03ms +[2025-09-02 14:22:54] [Rank 0] step:1421/10000 train_time:100931ms step_avg:71.03ms +[2025-09-02 14:22:56] [Rank 0] step:1441/10000 train_time:102353ms step_avg:71.03ms +[2025-09-02 14:22:56] [Rank 0] step:1441/10000 train_time:102353ms step_avg:71.03ms +[2025-09-02 14:22:57] [Rank 0] step:1461/10000 train_time:103774ms step_avg:71.03ms +[2025-09-02 14:22:57] [Rank 0] step:1461/10000 train_time:103774ms step_avg:71.03ms +[2025-09-02 14:22:58] [Rank 0] step:1481/10000 train_time:105196ms step_avg:71.03ms +[2025-09-02 14:22:58] [Rank 0] step:1481/10000 train_time:105196ms step_avg:71.03ms +[2025-09-02 14:23:00] [Rank 0] step:1501/10000 train_time:106627ms step_avg:71.04ms +[2025-09-02 14:23:00] [Rank 0] step:1501/10000 train_time:106627ms step_avg:71.04ms +[2025-09-02 14:23:01] [Rank 0] step:1521/10000 train_time:108061ms step_avg:71.05ms +[2025-09-02 14:23:01] [Rank 0] step:1521/10000 train_time:108061ms step_avg:71.05ms +[2025-09-02 14:23:03] [Rank 0] step:1541/10000 train_time:109495ms step_avg:71.05ms +[2025-09-02 14:23:03] [Rank 0] step:1541/10000 train_time:109495ms step_avg:71.05ms +[2025-09-02 14:23:04] [Rank 0] step:1561/10000 train_time:110930ms step_avg:71.06ms +[2025-09-02 14:23:04] [Rank 0] step:1561/10000 train_time:110930ms step_avg:71.06ms +[2025-09-02 14:23:06] [Rank 0] step:1581/10000 train_time:112365ms step_avg:71.07ms +[2025-09-02 14:23:06] [Rank 0] step:1581/10000 train_time:112365ms step_avg:71.07ms +[2025-09-02 14:23:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:23:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:23:19] [Rank 0] PRINT: step:1600/10000 val_loss:4.9049 svd_entropy: attn_qk:H=0.6499,top10E=0.42,eRank=110.9,q75/q25=28.19 attn_vo:H=0.6520,top10E=0.34,eRank=138.9,q75/q25=inf mlp_w1:H=0.6152,top10E=0.49,eRank=77.2,q75/q25=6.40 mlp_w2:H=0.7548,top10E=0.25,eRank=151.6,q75/q25=14.99 vo_prod:H=0.5128,top10E=0.48,eRank=42.2,q75/q25=inf train_time:113946ms step_avg:71.22ms +[2025-09-02 14:23:19] [Rank 0] PRINT: step:1600/10000 val_loss:4.9049 svd_entropy: attn_qk:H=0.6499,top10E=0.42,eRank=110.9,q75/q25=28.19 attn_vo:H=0.6520,top10E=0.34,eRank=138.9,q75/q25=inf mlp_w1:H=0.6152,top10E=0.49,eRank=77.2,q75/q25=6.40 mlp_w2:H=0.7548,top10E=0.25,eRank=151.6,q75/q25=14.99 vo_prod:H=0.5128,top10E=0.48,eRank=42.2,q75/q25=inf train_time:113946ms step_avg:71.22ms +[2025-09-02 14:23:19] [Rank 0] step:1601/10000 train_time:113957ms step_avg:71.18ms +[2025-09-02 14:23:19] [Rank 0] step:1601/10000 train_time:113957ms step_avg:71.18ms +[2025-09-02 14:23:20] [Rank 0] step:1621/10000 train_time:115267ms step_avg:71.11ms +[2025-09-02 14:23:20] [Rank 0] step:1621/10000 train_time:115267ms step_avg:71.11ms +[2025-09-02 14:23:22] [Rank 0] step:1641/10000 train_time:116751ms step_avg:71.15ms +[2025-09-02 14:23:22] [Rank 0] step:1641/10000 train_time:116751ms step_avg:71.15ms +[2025-09-02 14:23:23] [Rank 0] step:1661/10000 train_time:118182ms step_avg:71.15ms +[2025-09-02 14:23:23] [Rank 0] step:1661/10000 train_time:118182ms step_avg:71.15ms +[2025-09-02 14:23:25] [Rank 0] step:1681/10000 train_time:119619ms step_avg:71.16ms +[2025-09-02 14:23:25] [Rank 0] step:1681/10000 train_time:119619ms step_avg:71.16ms +[2025-09-02 14:23:26] [Rank 0] step:1701/10000 train_time:121153ms step_avg:71.22ms +[2025-09-02 14:23:26] [Rank 0] step:1701/10000 train_time:121153ms step_avg:71.22ms +[2025-09-02 14:23:28] [Rank 0] step:1721/10000 train_time:122585ms step_avg:71.23ms +[2025-09-02 14:23:28] [Rank 0] step:1721/10000 train_time:122585ms step_avg:71.23ms +[2025-09-02 14:23:29] [Rank 0] step:1741/10000 train_time:124017ms step_avg:71.23ms +[2025-09-02 14:23:29] [Rank 0] step:1741/10000 train_time:124017ms step_avg:71.23ms +[2025-09-02 14:23:30] [Rank 0] step:1761/10000 train_time:125449ms step_avg:71.24ms +[2025-09-02 14:23:30] [Rank 0] step:1761/10000 train_time:125449ms step_avg:71.24ms +[2025-09-02 14:23:32] [Rank 0] step:1781/10000 train_time:126881ms step_avg:71.24ms +[2025-09-02 14:23:32] [Rank 0] step:1781/10000 train_time:126881ms step_avg:71.24ms +[2025-09-02 14:23:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:23:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:23:45] [Rank 0] PRINT: step:1800/10000 val_loss:4.7975 svd_entropy: attn_qk:H=0.6596,top10E=0.41,eRank=114.9,q75/q25=32.56 attn_vo:H=0.6647,top10E=0.32,eRank=148.3,q75/q25=inf mlp_w1:H=0.6311,top10E=0.47,eRank=84.2,q75/q25=7.07 mlp_w2:H=0.7672,top10E=0.24,eRank=164.7,q75/q25=17.01 vo_prod:H=0.5263,top10E=0.45,eRank=46.4,q75/q25=inf train_time:128457ms step_avg:71.36ms +[2025-09-02 14:23:45] [Rank 0] PRINT: step:1800/10000 val_loss:4.7975 svd_entropy: attn_qk:H=0.6596,top10E=0.41,eRank=114.9,q75/q25=32.56 attn_vo:H=0.6647,top10E=0.32,eRank=148.3,q75/q25=inf mlp_w1:H=0.6311,top10E=0.47,eRank=84.2,q75/q25=7.07 mlp_w2:H=0.7672,top10E=0.24,eRank=164.7,q75/q25=17.01 vo_prod:H=0.5263,top10E=0.45,eRank=46.4,q75/q25=inf train_time:128457ms step_avg:71.36ms +[2025-09-02 14:23:45] [Rank 0] step:1801/10000 train_time:128468ms step_avg:71.33ms +[2025-09-02 14:23:45] [Rank 0] step:1801/10000 train_time:128468ms step_avg:71.33ms +[2025-09-02 14:23:47] [Rank 0] step:1821/10000 train_time:129758ms step_avg:71.26ms +[2025-09-02 14:23:47] [Rank 0] step:1821/10000 train_time:129758ms step_avg:71.26ms +[2025-09-02 14:23:48] [Rank 0] step:1841/10000 train_time:131187ms step_avg:71.26ms +[2025-09-02 14:23:48] [Rank 0] step:1841/10000 train_time:131187ms step_avg:71.26ms +[2025-09-02 14:23:49] [Rank 0] step:1861/10000 train_time:132618ms step_avg:71.26ms +[2025-09-02 14:23:49] [Rank 0] step:1861/10000 train_time:132618ms step_avg:71.26ms +[2025-09-02 14:23:51] [Rank 0] step:1881/10000 train_time:134049ms step_avg:71.26ms +[2025-09-02 14:23:51] [Rank 0] step:1881/10000 train_time:134049ms step_avg:71.26ms +[2025-09-02 14:23:52] [Rank 0] step:1901/10000 train_time:135480ms step_avg:71.27ms +[2025-09-02 14:23:52] [Rank 0] step:1901/10000 train_time:135480ms step_avg:71.27ms +[2025-09-02 14:23:54] [Rank 0] step:1921/10000 train_time:136913ms step_avg:71.27ms +[2025-09-02 14:23:54] [Rank 0] step:1921/10000 train_time:136913ms step_avg:71.27ms +[2025-09-02 14:23:55] [Rank 0] step:1941/10000 train_time:138345ms step_avg:71.28ms +[2025-09-02 14:23:55] [Rank 0] step:1941/10000 train_time:138345ms step_avg:71.28ms +[2025-09-02 14:23:57] [Rank 0] step:1961/10000 train_time:139777ms step_avg:71.28ms +[2025-09-02 14:23:57] [Rank 0] step:1961/10000 train_time:139777ms step_avg:71.28ms +[2025-09-02 14:23:58] [Rank 0] step:1981/10000 train_time:141213ms step_avg:71.28ms +[2025-09-02 14:23:58] [Rank 0] step:1981/10000 train_time:141213ms step_avg:71.28ms +[2025-09-02 14:23:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:23:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:24:11] [Rank 0] PRINT: step:2000/10000 val_loss:4.7211 svd_entropy: attn_qk:H=0.6679,top10E=0.39,eRank=118.5,q75/q25=37.34 attn_vo:H=0.6758,top10E=0.31,eRank=156.7,q75/q25=inf mlp_w1:H=0.6449,top10E=0.45,eRank=90.9,q75/q25=7.79 mlp_w2:H=0.7772,top10E=0.22,eRank=176.2,q75/q25=18.99 vo_prod:H=0.5385,top10E=0.43,eRank=50.5,q75/q25=inf train_time:142788ms step_avg:71.39ms +[2025-09-02 14:24:11] [Rank 0] PRINT: step:2000/10000 val_loss:4.7211 svd_entropy: attn_qk:H=0.6679,top10E=0.39,eRank=118.5,q75/q25=37.34 attn_vo:H=0.6758,top10E=0.31,eRank=156.7,q75/q25=inf mlp_w1:H=0.6449,top10E=0.45,eRank=90.9,q75/q25=7.79 mlp_w2:H=0.7772,top10E=0.22,eRank=176.2,q75/q25=18.99 vo_prod:H=0.5385,top10E=0.43,eRank=50.5,q75/q25=inf train_time:142788ms step_avg:71.39ms +[2025-09-02 14:24:11] [Rank 0] step:2001/10000 train_time:142800ms step_avg:71.36ms +[2025-09-02 14:24:11] [Rank 0] step:2001/10000 train_time:142800ms step_avg:71.36ms +[2025-09-02 14:24:13] [Rank 0] step:2021/10000 train_time:144105ms step_avg:71.30ms +[2025-09-02 14:24:13] [Rank 0] step:2021/10000 train_time:144105ms step_avg:71.30ms +[2025-09-02 14:24:14] [Rank 0] step:2041/10000 train_time:145656ms step_avg:71.37ms +[2025-09-02 14:24:14] [Rank 0] step:2041/10000 train_time:145656ms step_avg:71.37ms +[2025-09-02 14:24:16] [Rank 0] step:2061/10000 train_time:147088ms step_avg:71.37ms +[2025-09-02 14:24:16] [Rank 0] step:2061/10000 train_time:147088ms step_avg:71.37ms +[2025-09-02 14:24:17] [Rank 0] step:2081/10000 train_time:148519ms step_avg:71.37ms +[2025-09-02 14:24:17] [Rank 0] step:2081/10000 train_time:148519ms step_avg:71.37ms +[2025-09-02 14:24:19] [Rank 0] step:2101/10000 train_time:149951ms step_avg:71.37ms +[2025-09-02 14:24:19] [Rank 0] step:2101/10000 train_time:149951ms step_avg:71.37ms +[2025-09-02 14:24:20] [Rank 0] step:2121/10000 train_time:151383ms step_avg:71.37ms +[2025-09-02 14:24:20] [Rank 0] step:2121/10000 train_time:151383ms step_avg:71.37ms +[2025-09-02 14:24:21] [Rank 0] step:2141/10000 train_time:152816ms step_avg:71.38ms +[2025-09-02 14:24:21] [Rank 0] step:2141/10000 train_time:152816ms step_avg:71.38ms +[2025-09-02 14:24:23] [Rank 0] step:2161/10000 train_time:154249ms step_avg:71.38ms +[2025-09-02 14:24:23] [Rank 0] step:2161/10000 train_time:154249ms step_avg:71.38ms +[2025-09-02 14:24:24] [Rank 0] step:2181/10000 train_time:155682ms step_avg:71.38ms +[2025-09-02 14:24:24] [Rank 0] step:2181/10000 train_time:155682ms step_avg:71.38ms +[2025-09-02 14:24:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:24:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:24:38] [Rank 0] PRINT: step:2200/10000 val_loss:4.6361 svd_entropy: attn_qk:H=0.6755,top10E=0.38,eRank=122.0,q75/q25=42.09 attn_vo:H=0.6851,top10E=0.29,eRank=163.9,q75/q25=inf mlp_w1:H=0.6569,top10E=0.43,eRank=97.2,q75/q25=8.53 mlp_w2:H=0.7856,top10E=0.21,eRank=186.5,q75/q25=20.63 vo_prod:H=0.5484,top10E=0.41,eRank=54.2,q75/q25=inf train_time:157277ms step_avg:71.49ms +[2025-09-02 14:24:38] [Rank 0] PRINT: step:2200/10000 val_loss:4.6361 svd_entropy: attn_qk:H=0.6755,top10E=0.38,eRank=122.0,q75/q25=42.09 attn_vo:H=0.6851,top10E=0.29,eRank=163.9,q75/q25=inf mlp_w1:H=0.6569,top10E=0.43,eRank=97.2,q75/q25=8.53 mlp_w2:H=0.7856,top10E=0.21,eRank=186.5,q75/q25=20.63 vo_prod:H=0.5484,top10E=0.41,eRank=54.2,q75/q25=inf train_time:157277ms step_avg:71.49ms +[2025-09-02 14:24:38] [Rank 0] step:2201/10000 train_time:157288ms step_avg:71.46ms +[2025-09-02 14:24:38] [Rank 0] step:2201/10000 train_time:157288ms step_avg:71.46ms +[2025-09-02 14:24:39] [Rank 0] step:2221/10000 train_time:158608ms step_avg:71.41ms +[2025-09-02 14:24:39] [Rank 0] step:2221/10000 train_time:158608ms step_avg:71.41ms +[2025-09-02 14:24:41] [Rank 0] step:2241/10000 train_time:160072ms step_avg:71.43ms +[2025-09-02 14:24:41] [Rank 0] step:2241/10000 train_time:160072ms step_avg:71.43ms +[2025-09-02 14:24:42] [Rank 0] step:2261/10000 train_time:161549ms step_avg:71.45ms +[2025-09-02 14:24:42] [Rank 0] step:2261/10000 train_time:161549ms step_avg:71.45ms +[2025-09-02 14:24:44] [Rank 0] step:2281/10000 train_time:163025ms step_avg:71.47ms +[2025-09-02 14:24:44] [Rank 0] step:2281/10000 train_time:163025ms step_avg:71.47ms +[2025-09-02 14:24:45] [Rank 0] step:2301/10000 train_time:164501ms step_avg:71.49ms +[2025-09-02 14:24:45] [Rank 0] step:2301/10000 train_time:164501ms step_avg:71.49ms +[2025-09-02 14:24:47] [Rank 0] step:2321/10000 train_time:165978ms step_avg:71.51ms +[2025-09-02 14:24:47] [Rank 0] step:2321/10000 train_time:165978ms step_avg:71.51ms +[2025-09-02 14:24:48] [Rank 0] step:2341/10000 train_time:167455ms step_avg:71.53ms +[2025-09-02 14:24:48] [Rank 0] step:2341/10000 train_time:167455ms step_avg:71.53ms +[2025-09-02 14:24:49] [Rank 0] step:2361/10000 train_time:168933ms step_avg:71.55ms +[2025-09-02 14:24:49] [Rank 0] step:2361/10000 train_time:168933ms step_avg:71.55ms +[2025-09-02 14:24:51] [Rank 0] step:2381/10000 train_time:170411ms step_avg:71.57ms +[2025-09-02 14:24:51] [Rank 0] step:2381/10000 train_time:170411ms step_avg:71.57ms +[2025-09-02 14:24:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:24:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:25:04] [Rank 0] PRINT: step:2400/10000 val_loss:4.5597 svd_entropy: attn_qk:H=0.6815,top10E=0.37,eRank=124.9,q75/q25=46.77 attn_vo:H=0.6935,top10E=0.28,eRank=170.4,q75/q25=inf mlp_w1:H=0.6678,top10E=0.42,eRank=103.3,q75/q25=9.27 mlp_w2:H=0.7930,top10E=0.20,eRank=196.1,q75/q25=22.02 vo_prod:H=0.5570,top10E=0.39,eRank=57.5,q75/q25=inf train_time:172038ms step_avg:71.68ms +[2025-09-02 14:25:04] [Rank 0] PRINT: step:2400/10000 val_loss:4.5597 svd_entropy: attn_qk:H=0.6815,top10E=0.37,eRank=124.9,q75/q25=46.77 attn_vo:H=0.6935,top10E=0.28,eRank=170.4,q75/q25=inf mlp_w1:H=0.6678,top10E=0.42,eRank=103.3,q75/q25=9.27 mlp_w2:H=0.7930,top10E=0.20,eRank=196.1,q75/q25=22.02 vo_prod:H=0.5570,top10E=0.39,eRank=57.5,q75/q25=inf train_time:172038ms step_avg:71.68ms +[2025-09-02 14:25:04] [Rank 0] step:2401/10000 train_time:172049ms step_avg:71.66ms +[2025-09-02 14:25:04] [Rank 0] step:2401/10000 train_time:172049ms step_avg:71.66ms +[2025-09-02 14:25:06] [Rank 0] step:2421/10000 train_time:173394ms step_avg:71.62ms +[2025-09-02 14:25:06] [Rank 0] step:2421/10000 train_time:173394ms step_avg:71.62ms +[2025-09-02 14:25:07] [Rank 0] step:2441/10000 train_time:174868ms step_avg:71.64ms +[2025-09-02 14:25:07] [Rank 0] step:2441/10000 train_time:174868ms step_avg:71.64ms +[2025-09-02 14:25:09] [Rank 0] step:2461/10000 train_time:176344ms step_avg:71.66ms +[2025-09-02 14:25:09] [Rank 0] step:2461/10000 train_time:176344ms step_avg:71.66ms +[2025-09-02 14:25:10] [Rank 0] step:2481/10000 train_time:177820ms step_avg:71.67ms +[2025-09-02 14:25:10] [Rank 0] step:2481/10000 train_time:177820ms step_avg:71.67ms +[2025-09-02 14:25:12] [Rank 0] step:2501/10000 train_time:179297ms step_avg:71.69ms +[2025-09-02 14:25:12] [Rank 0] step:2501/10000 train_time:179297ms step_avg:71.69ms +[2025-09-02 14:25:13] [Rank 0] step:2521/10000 train_time:180773ms step_avg:71.71ms +[2025-09-02 14:25:13] [Rank 0] step:2521/10000 train_time:180773ms step_avg:71.71ms +[2025-09-02 14:25:15] [Rank 0] step:2541/10000 train_time:182249ms step_avg:71.72ms +[2025-09-02 14:25:15] [Rank 0] step:2541/10000 train_time:182249ms step_avg:71.72ms +[2025-09-02 14:25:16] [Rank 0] step:2561/10000 train_time:183725ms step_avg:71.74ms +[2025-09-02 14:25:16] [Rank 0] step:2561/10000 train_time:183725ms step_avg:71.74ms +[2025-09-02 14:25:18] [Rank 0] step:2581/10000 train_time:185203ms step_avg:71.76ms +[2025-09-02 14:25:18] [Rank 0] step:2581/10000 train_time:185203ms step_avg:71.76ms +[2025-09-02 14:25:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:25:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:25:31] [Rank 0] PRINT: step:2600/10000 val_loss:4.4961 svd_entropy: attn_qk:H=0.6877,top10E=0.36,eRank=127.9,q75/q25=50.80 attn_vo:H=0.7009,top10E=0.27,eRank=176.3,q75/q25=inf mlp_w1:H=0.6778,top10E=0.41,eRank=109.3,q75/q25=9.97 mlp_w2:H=0.7995,top10E=0.19,eRank=205.0,q75/q25=23.41 vo_prod:H=0.5655,top10E=0.38,eRank=60.9,q75/q25=inf train_time:186827ms step_avg:71.86ms +[2025-09-02 14:25:31] [Rank 0] PRINT: step:2600/10000 val_loss:4.4961 svd_entropy: attn_qk:H=0.6877,top10E=0.36,eRank=127.9,q75/q25=50.80 attn_vo:H=0.7009,top10E=0.27,eRank=176.3,q75/q25=inf mlp_w1:H=0.6778,top10E=0.41,eRank=109.3,q75/q25=9.97 mlp_w2:H=0.7995,top10E=0.19,eRank=205.0,q75/q25=23.41 vo_prod:H=0.5655,top10E=0.38,eRank=60.9,q75/q25=inf train_time:186827ms step_avg:71.86ms +[2025-09-02 14:25:31] [Rank 0] step:2601/10000 train_time:186838ms step_avg:71.83ms +[2025-09-02 14:25:31] [Rank 0] step:2601/10000 train_time:186838ms step_avg:71.83ms +[2025-09-02 14:25:32] [Rank 0] step:2621/10000 train_time:188168ms step_avg:71.79ms +[2025-09-02 14:25:32] [Rank 0] step:2621/10000 train_time:188168ms step_avg:71.79ms +[2025-09-02 14:25:34] [Rank 0] step:2641/10000 train_time:189642ms step_avg:71.81ms +[2025-09-02 14:25:34] [Rank 0] step:2641/10000 train_time:189642ms step_avg:71.81ms +[2025-09-02 14:25:35] [Rank 0] step:2661/10000 train_time:191116ms step_avg:71.82ms +[2025-09-02 14:25:35] [Rank 0] step:2661/10000 train_time:191116ms step_avg:71.82ms +[2025-09-02 14:25:37] [Rank 0] step:2681/10000 train_time:192592ms step_avg:71.84ms +[2025-09-02 14:25:37] [Rank 0] step:2681/10000 train_time:192592ms step_avg:71.84ms +[2025-09-02 14:25:38] [Rank 0] step:2701/10000 train_time:194067ms step_avg:71.85ms +[2025-09-02 14:25:38] [Rank 0] step:2701/10000 train_time:194067ms step_avg:71.85ms +[2025-09-02 14:25:40] [Rank 0] step:2721/10000 train_time:195543ms step_avg:71.86ms +[2025-09-02 14:25:40] [Rank 0] step:2721/10000 train_time:195543ms step_avg:71.86ms +[2025-09-02 14:25:41] [Rank 0] step:2741/10000 train_time:197020ms step_avg:71.88ms +[2025-09-02 14:25:41] [Rank 0] step:2741/10000 train_time:197020ms step_avg:71.88ms +[2025-09-02 14:25:43] [Rank 0] step:2761/10000 train_time:198497ms step_avg:71.89ms +[2025-09-02 14:25:43] [Rank 0] step:2761/10000 train_time:198497ms step_avg:71.89ms +[2025-09-02 14:25:44] [Rank 0] step:2781/10000 train_time:199973ms step_avg:71.91ms +[2025-09-02 14:25:44] [Rank 0] step:2781/10000 train_time:199973ms step_avg:71.91ms +[2025-09-02 14:25:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:25:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:25:57] [Rank 0] PRINT: step:2800/10000 val_loss:4.4524 svd_entropy: attn_qk:H=0.6935,top10E=0.35,eRank=131.0,q75/q25=55.34 attn_vo:H=0.7077,top10E=0.26,eRank=181.8,q75/q25=inf mlp_w1:H=0.6873,top10E=0.39,eRank=115.3,q75/q25=10.58 mlp_w2:H=0.8057,top10E=0.19,eRank=213.7,q75/q25=23.89 vo_prod:H=0.5733,top10E=0.36,eRank=64.3,q75/q25=inf train_time:201598ms step_avg:72.00ms +[2025-09-02 14:25:57] [Rank 0] PRINT: step:2800/10000 val_loss:4.4524 svd_entropy: attn_qk:H=0.6935,top10E=0.35,eRank=131.0,q75/q25=55.34 attn_vo:H=0.7077,top10E=0.26,eRank=181.8,q75/q25=inf mlp_w1:H=0.6873,top10E=0.39,eRank=115.3,q75/q25=10.58 mlp_w2:H=0.8057,top10E=0.19,eRank=213.7,q75/q25=23.89 vo_prod:H=0.5733,top10E=0.36,eRank=64.3,q75/q25=inf train_time:201598ms step_avg:72.00ms +[2025-09-02 14:25:58] [Rank 0] step:2801/10000 train_time:201609ms step_avg:71.98ms +[2025-09-02 14:25:58] [Rank 0] step:2801/10000 train_time:201609ms step_avg:71.98ms +[2025-09-02 14:25:59] [Rank 0] step:2821/10000 train_time:202944ms step_avg:71.94ms +[2025-09-02 14:25:59] [Rank 0] step:2821/10000 train_time:202944ms step_avg:71.94ms +[2025-09-02 14:26:01] [Rank 0] step:2841/10000 train_time:204420ms step_avg:71.95ms +[2025-09-02 14:26:01] [Rank 0] step:2841/10000 train_time:204420ms step_avg:71.95ms +[2025-09-02 14:26:02] [Rank 0] step:2861/10000 train_time:205895ms step_avg:71.97ms +[2025-09-02 14:26:02] [Rank 0] step:2861/10000 train_time:205895ms step_avg:71.97ms +[2025-09-02 14:26:03] [Rank 0] step:2881/10000 train_time:207372ms step_avg:71.98ms +[2025-09-02 14:26:03] [Rank 0] step:2881/10000 train_time:207372ms step_avg:71.98ms +[2025-09-02 14:26:05] [Rank 0] step:2901/10000 train_time:208848ms step_avg:71.99ms +[2025-09-02 14:26:05] [Rank 0] step:2901/10000 train_time:208848ms step_avg:71.99ms +[2025-09-02 14:26:06] [Rank 0] step:2921/10000 train_time:210324ms step_avg:72.00ms +[2025-09-02 14:26:06] [Rank 0] step:2921/10000 train_time:210324ms step_avg:72.00ms +[2025-09-02 14:26:08] [Rank 0] step:2941/10000 train_time:211801ms step_avg:72.02ms +[2025-09-02 14:26:08] [Rank 0] step:2941/10000 train_time:211801ms step_avg:72.02ms +[2025-09-02 14:26:09] [Rank 0] step:2961/10000 train_time:213279ms step_avg:72.03ms +[2025-09-02 14:26:09] [Rank 0] step:2961/10000 train_time:213279ms step_avg:72.03ms +[2025-09-02 14:26:11] [Rank 0] step:2981/10000 train_time:214763ms step_avg:72.04ms +[2025-09-02 14:26:11] [Rank 0] step:2981/10000 train_time:214763ms step_avg:72.04ms +[2025-09-02 14:26:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:26:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:26:24] [Rank 0] PRINT: step:3000/10000 val_loss:4.4061 svd_entropy: attn_qk:H=0.6986,top10E=0.34,eRank=133.7,q75/q25=59.31 attn_vo:H=0.7137,top10E=0.25,eRank=186.7,q75/q25=inf mlp_w1:H=0.6954,top10E=0.38,eRank=120.8,q75/q25=11.24 mlp_w2:H=0.8107,top10E=0.18,eRank=221.1,q75/q25=24.47 vo_prod:H=0.5800,top10E=0.35,eRank=67.3,q75/q25=inf train_time:216397ms step_avg:72.13ms +[2025-09-02 14:26:24] [Rank 0] PRINT: step:3000/10000 val_loss:4.4061 svd_entropy: attn_qk:H=0.6986,top10E=0.34,eRank=133.7,q75/q25=59.31 attn_vo:H=0.7137,top10E=0.25,eRank=186.7,q75/q25=inf mlp_w1:H=0.6954,top10E=0.38,eRank=120.8,q75/q25=11.24 mlp_w2:H=0.8107,top10E=0.18,eRank=221.1,q75/q25=24.47 vo_prod:H=0.5800,top10E=0.35,eRank=67.3,q75/q25=inf train_time:216397ms step_avg:72.13ms +[2025-09-02 14:26:24] [Rank 0] step:3001/10000 train_time:216408ms step_avg:72.11ms +[2025-09-02 14:26:24] [Rank 0] step:3001/10000 train_time:216408ms step_avg:72.11ms +[2025-09-02 14:26:26] [Rank 0] step:3021/10000 train_time:217863ms step_avg:72.12ms +[2025-09-02 14:26:26] [Rank 0] step:3021/10000 train_time:217863ms step_avg:72.12ms +[2025-09-02 14:26:27] [Rank 0] step:3041/10000 train_time:219345ms step_avg:72.13ms +[2025-09-02 14:26:27] [Rank 0] step:3041/10000 train_time:219345ms step_avg:72.13ms +[2025-09-02 14:26:29] [Rank 0] step:3061/10000 train_time:220830ms step_avg:72.14ms +[2025-09-02 14:26:29] [Rank 0] step:3061/10000 train_time:220830ms step_avg:72.14ms +[2025-09-02 14:26:30] [Rank 0] step:3081/10000 train_time:222417ms step_avg:72.19ms +[2025-09-02 14:26:30] [Rank 0] step:3081/10000 train_time:222417ms step_avg:72.19ms +[2025-09-02 14:26:32] [Rank 0] step:3101/10000 train_time:223903ms step_avg:72.20ms +[2025-09-02 14:26:32] [Rank 0] step:3101/10000 train_time:223903ms step_avg:72.20ms +[2025-09-02 14:26:33] [Rank 0] step:3121/10000 train_time:225390ms step_avg:72.22ms +[2025-09-02 14:26:33] [Rank 0] step:3121/10000 train_time:225390ms step_avg:72.22ms +[2025-09-02 14:26:35] [Rank 0] step:3141/10000 train_time:226875ms step_avg:72.23ms +[2025-09-02 14:26:35] [Rank 0] step:3141/10000 train_time:226875ms step_avg:72.23ms +[2025-09-02 14:26:36] [Rank 0] step:3161/10000 train_time:228361ms step_avg:72.24ms +[2025-09-02 14:26:36] [Rank 0] step:3161/10000 train_time:228361ms step_avg:72.24ms +[2025-09-02 14:26:38] [Rank 0] step:3181/10000 train_time:229847ms step_avg:72.26ms +[2025-09-02 14:26:38] [Rank 0] step:3181/10000 train_time:229847ms step_avg:72.26ms +[2025-09-02 14:26:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:26:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:26:51] [Rank 0] PRINT: step:3200/10000 val_loss:4.3670 svd_entropy: attn_qk:H=0.7034,top10E=0.34,eRank=136.4,q75/q25=63.36 attn_vo:H=0.7193,top10E=0.24,eRank=191.4,q75/q25=inf mlp_w1:H=0.7033,top10E=0.37,eRank=126.4,q75/q25=11.83 mlp_w2:H=0.8155,top10E=0.17,eRank=228.3,q75/q25=25.06 vo_prod:H=0.5860,top10E=0.34,eRank=70.2,q75/q25=inf train_time:231484ms step_avg:72.34ms +[2025-09-02 14:26:51] [Rank 0] PRINT: step:3200/10000 val_loss:4.3670 svd_entropy: attn_qk:H=0.7034,top10E=0.34,eRank=136.4,q75/q25=63.36 attn_vo:H=0.7193,top10E=0.24,eRank=191.4,q75/q25=inf mlp_w1:H=0.7033,top10E=0.37,eRank=126.4,q75/q25=11.83 mlp_w2:H=0.8155,top10E=0.17,eRank=228.3,q75/q25=25.06 vo_prod:H=0.5860,top10E=0.34,eRank=70.2,q75/q25=inf train_time:231484ms step_avg:72.34ms +[2025-09-02 14:26:51] [Rank 0] step:3201/10000 train_time:231495ms step_avg:72.32ms +[2025-09-02 14:26:51] [Rank 0] step:3201/10000 train_time:231495ms step_avg:72.32ms +[2025-09-02 14:26:53] [Rank 0] step:3221/10000 train_time:232851ms step_avg:72.29ms +[2025-09-02 14:26:53] [Rank 0] step:3221/10000 train_time:232851ms step_avg:72.29ms +[2025-09-02 14:26:54] [Rank 0] step:3241/10000 train_time:234335ms step_avg:72.30ms +[2025-09-02 14:26:54] [Rank 0] step:3241/10000 train_time:234335ms step_avg:72.30ms +[2025-09-02 14:26:56] [Rank 0] step:3261/10000 train_time:235816ms step_avg:72.31ms +[2025-09-02 14:26:56] [Rank 0] step:3261/10000 train_time:235816ms step_avg:72.31ms +[2025-09-02 14:26:57] [Rank 0] step:3281/10000 train_time:237300ms step_avg:72.33ms +[2025-09-02 14:26:57] [Rank 0] step:3281/10000 train_time:237300ms step_avg:72.33ms +[2025-09-02 14:26:58] [Rank 0] step:3301/10000 train_time:238783ms step_avg:72.34ms +[2025-09-02 14:26:58] [Rank 0] step:3301/10000 train_time:238783ms step_avg:72.34ms +[2025-09-02 14:27:00] [Rank 0] step:3321/10000 train_time:240267ms step_avg:72.35ms +[2025-09-02 14:27:00] [Rank 0] step:3321/10000 train_time:240267ms step_avg:72.35ms +[2025-09-02 14:27:01] [Rank 0] step:3341/10000 train_time:241751ms step_avg:72.36ms +[2025-09-02 14:27:01] [Rank 0] step:3341/10000 train_time:241751ms step_avg:72.36ms +[2025-09-02 14:27:03] [Rank 0] step:3361/10000 train_time:243237ms step_avg:72.37ms +[2025-09-02 14:27:03] [Rank 0] step:3361/10000 train_time:243237ms step_avg:72.37ms +[2025-09-02 14:27:04] [Rank 0] step:3381/10000 train_time:244721ms step_avg:72.38ms +[2025-09-02 14:27:04] [Rank 0] step:3381/10000 train_time:244721ms step_avg:72.38ms +[2025-09-02 14:27:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:27:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:27:18] [Rank 0] PRINT: step:3400/10000 val_loss:4.3201 svd_entropy: attn_qk:H=0.7078,top10E=0.33,eRank=139.0,q75/q25=67.18 attn_vo:H=0.7244,top10E=0.24,eRank=196.0,q75/q25=inf mlp_w1:H=0.7109,top10E=0.36,eRank=132.1,q75/q25=12.42 mlp_w2:H=0.8199,top10E=0.17,eRank=235.2,q75/q25=25.25 vo_prod:H=0.5916,top10E=0.33,eRank=73.0,q75/q25=inf train_time:246355ms step_avg:72.46ms +[2025-09-02 14:27:18] [Rank 0] PRINT: step:3400/10000 val_loss:4.3201 svd_entropy: attn_qk:H=0.7078,top10E=0.33,eRank=139.0,q75/q25=67.18 attn_vo:H=0.7244,top10E=0.24,eRank=196.0,q75/q25=inf mlp_w1:H=0.7109,top10E=0.36,eRank=132.1,q75/q25=12.42 mlp_w2:H=0.8199,top10E=0.17,eRank=235.2,q75/q25=25.25 vo_prod:H=0.5916,top10E=0.33,eRank=73.0,q75/q25=inf train_time:246355ms step_avg:72.46ms +[2025-09-02 14:27:18] [Rank 0] step:3401/10000 train_time:246366ms step_avg:72.44ms +[2025-09-02 14:27:18] [Rank 0] step:3401/10000 train_time:246366ms step_avg:72.44ms +[2025-09-02 14:27:19] [Rank 0] step:3421/10000 train_time:247708ms step_avg:72.41ms +[2025-09-02 14:27:19] [Rank 0] step:3421/10000 train_time:247708ms step_avg:72.41ms +[2025-09-02 14:27:21] [Rank 0] step:3441/10000 train_time:249191ms step_avg:72.42ms +[2025-09-02 14:27:21] [Rank 0] step:3441/10000 train_time:249191ms step_avg:72.42ms +[2025-09-02 14:27:22] [Rank 0] step:3461/10000 train_time:250673ms step_avg:72.43ms +[2025-09-02 14:27:22] [Rank 0] step:3461/10000 train_time:250673ms step_avg:72.43ms +[2025-09-02 14:27:24] [Rank 0] step:3481/10000 train_time:252156ms step_avg:72.44ms +[2025-09-02 14:27:24] [Rank 0] step:3481/10000 train_time:252156ms step_avg:72.44ms +[2025-09-02 14:27:25] [Rank 0] step:3501/10000 train_time:253639ms step_avg:72.45ms +[2025-09-02 14:27:25] [Rank 0] step:3501/10000 train_time:253639ms step_avg:72.45ms +[2025-09-02 14:27:27] [Rank 0] step:3521/10000 train_time:255123ms step_avg:72.46ms +[2025-09-02 14:27:27] [Rank 0] step:3521/10000 train_time:255123ms step_avg:72.46ms +[2025-09-02 14:27:28] [Rank 0] step:3541/10000 train_time:256608ms step_avg:72.47ms +[2025-09-02 14:27:28] [Rank 0] step:3541/10000 train_time:256608ms step_avg:72.47ms +[2025-09-02 14:27:30] [Rank 0] step:3561/10000 train_time:258092ms step_avg:72.48ms +[2025-09-02 14:27:30] [Rank 0] step:3561/10000 train_time:258092ms step_avg:72.48ms +[2025-09-02 14:27:31] [Rank 0] step:3581/10000 train_time:259578ms step_avg:72.49ms +[2025-09-02 14:27:31] [Rank 0] step:3581/10000 train_time:259578ms step_avg:72.49ms +[2025-09-02 14:27:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:27:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:27:44] [Rank 0] PRINT: step:3600/10000 val_loss:4.3095 svd_entropy: attn_qk:H=0.7119,top10E=0.32,eRank=141.5,q75/q25=70.16 attn_vo:H=0.7289,top10E=0.23,eRank=200.0,q75/q25=inf mlp_w1:H=0.7175,top10E=0.35,eRank=137.3,q75/q25=12.96 mlp_w2:H=0.8238,top10E=0.17,eRank=241.4,q75/q25=25.35 vo_prod:H=0.5965,top10E=0.33,eRank=75.6,q75/q25=inf train_time:261214ms step_avg:72.56ms +[2025-09-02 14:27:44] [Rank 0] PRINT: step:3600/10000 val_loss:4.3095 svd_entropy: attn_qk:H=0.7119,top10E=0.32,eRank=141.5,q75/q25=70.16 attn_vo:H=0.7289,top10E=0.23,eRank=200.0,q75/q25=inf mlp_w1:H=0.7175,top10E=0.35,eRank=137.3,q75/q25=12.96 mlp_w2:H=0.8238,top10E=0.17,eRank=241.4,q75/q25=25.35 vo_prod:H=0.5965,top10E=0.33,eRank=75.6,q75/q25=inf train_time:261214ms step_avg:72.56ms +[2025-09-02 14:27:44] [Rank 0] step:3601/10000 train_time:261226ms step_avg:72.54ms +[2025-09-02 14:27:44] [Rank 0] step:3601/10000 train_time:261226ms step_avg:72.54ms +[2025-09-02 14:27:46] [Rank 0] step:3621/10000 train_time:262570ms step_avg:72.51ms +[2025-09-02 14:27:46] [Rank 0] step:3621/10000 train_time:262570ms step_avg:72.51ms +[2025-09-02 14:27:47] [Rank 0] step:3641/10000 train_time:264054ms step_avg:72.52ms +[2025-09-02 14:27:47] [Rank 0] step:3641/10000 train_time:264054ms step_avg:72.52ms +[2025-09-02 14:27:49] [Rank 0] step:3661/10000 train_time:265537ms step_avg:72.53ms +[2025-09-02 14:27:49] [Rank 0] step:3661/10000 train_time:265537ms step_avg:72.53ms +[2025-09-02 14:27:50] [Rank 0] step:3681/10000 train_time:267022ms step_avg:72.54ms +[2025-09-02 14:27:50] [Rank 0] step:3681/10000 train_time:267022ms step_avg:72.54ms +[2025-09-02 14:27:52] [Rank 0] step:3701/10000 train_time:268507ms step_avg:72.55ms +[2025-09-02 14:27:52] [Rank 0] step:3701/10000 train_time:268507ms step_avg:72.55ms +[2025-09-02 14:27:53] [Rank 0] step:3721/10000 train_time:270017ms step_avg:72.57ms +[2025-09-02 14:27:53] [Rank 0] step:3721/10000 train_time:270017ms step_avg:72.57ms +[2025-09-02 14:27:55] [Rank 0] step:3741/10000 train_time:271539ms step_avg:72.58ms +[2025-09-02 14:27:55] [Rank 0] step:3741/10000 train_time:271539ms step_avg:72.58ms +[2025-09-02 14:27:56] [Rank 0] step:3761/10000 train_time:273060ms step_avg:72.60ms +[2025-09-02 14:27:56] [Rank 0] step:3761/10000 train_time:273060ms step_avg:72.60ms +[2025-09-02 14:27:58] [Rank 0] step:3781/10000 train_time:274583ms step_avg:72.62ms +[2025-09-02 14:27:58] [Rank 0] step:3781/10000 train_time:274583ms step_avg:72.62ms +[2025-09-02 14:27:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:27:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:28:11] [Rank 0] PRINT: step:3800/10000 val_loss:4.2455 svd_entropy: attn_qk:H=0.7155,top10E=0.32,eRank=143.7,q75/q25=72.69 attn_vo:H=0.7332,top10E=0.22,eRank=204.0,q75/q25=inf mlp_w1:H=0.7238,top10E=0.34,eRank=142.5,q75/q25=13.50 mlp_w2:H=0.8272,top10E=0.16,eRank=247.1,q75/q25=25.63 vo_prod:H=0.6012,top10E=0.32,eRank=78.2,q75/q25=inf train_time:276259ms step_avg:72.70ms +[2025-09-02 14:28:11] [Rank 0] PRINT: step:3800/10000 val_loss:4.2455 svd_entropy: attn_qk:H=0.7155,top10E=0.32,eRank=143.7,q75/q25=72.69 attn_vo:H=0.7332,top10E=0.22,eRank=204.0,q75/q25=inf mlp_w1:H=0.7238,top10E=0.34,eRank=142.5,q75/q25=13.50 mlp_w2:H=0.8272,top10E=0.16,eRank=247.1,q75/q25=25.63 vo_prod:H=0.6012,top10E=0.32,eRank=78.2,q75/q25=inf train_time:276259ms step_avg:72.70ms +[2025-09-02 14:28:11] [Rank 0] step:3801/10000 train_time:276271ms step_avg:72.68ms +[2025-09-02 14:28:11] [Rank 0] step:3801/10000 train_time:276271ms step_avg:72.68ms +[2025-09-02 14:28:13] [Rank 0] step:3821/10000 train_time:277653ms step_avg:72.66ms +[2025-09-02 14:28:13] [Rank 0] step:3821/10000 train_time:277653ms step_avg:72.66ms +[2025-09-02 14:28:14] [Rank 0] step:3841/10000 train_time:279179ms step_avg:72.68ms +[2025-09-02 14:28:14] [Rank 0] step:3841/10000 train_time:279179ms step_avg:72.68ms +[2025-09-02 14:28:16] [Rank 0] step:3861/10000 train_time:280701ms step_avg:72.70ms +[2025-09-02 14:28:16] [Rank 0] step:3861/10000 train_time:280701ms step_avg:72.70ms +[2025-09-02 14:28:17] [Rank 0] step:3881/10000 train_time:282222ms step_avg:72.72ms +[2025-09-02 14:28:17] [Rank 0] step:3881/10000 train_time:282222ms step_avg:72.72ms +[2025-09-02 14:28:19] [Rank 0] step:3901/10000 train_time:283743ms step_avg:72.74ms +[2025-09-02 14:28:19] [Rank 0] step:3901/10000 train_time:283743ms step_avg:72.74ms +[2025-09-02 14:28:20] [Rank 0] step:3921/10000 train_time:285266ms step_avg:72.75ms +[2025-09-02 14:28:20] [Rank 0] step:3921/10000 train_time:285266ms step_avg:72.75ms +[2025-09-02 14:28:22] [Rank 0] step:3941/10000 train_time:286788ms step_avg:72.77ms +[2025-09-02 14:28:22] [Rank 0] step:3941/10000 train_time:286788ms step_avg:72.77ms +[2025-09-02 14:28:23] [Rank 0] step:3961/10000 train_time:288309ms step_avg:72.79ms +[2025-09-02 14:28:23] [Rank 0] step:3961/10000 train_time:288309ms step_avg:72.79ms +[2025-09-02 14:28:25] [Rank 0] step:3981/10000 train_time:289831ms step_avg:72.80ms +[2025-09-02 14:28:25] [Rank 0] step:3981/10000 train_time:289831ms step_avg:72.80ms +[2025-09-02 14:28:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:28:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:28:38] [Rank 0] PRINT: step:4000/10000 val_loss:4.2157 svd_entropy: attn_qk:H=0.7192,top10E=0.31,eRank=146.1,q75/q25=75.05 attn_vo:H=0.7371,top10E=0.22,eRank=207.6,q75/q25=inf mlp_w1:H=0.7298,top10E=0.34,eRank=147.7,q75/q25=14.03 mlp_w2:H=0.8305,top10E=0.16,eRank=252.5,q75/q25=25.82 vo_prod:H=0.6057,top10E=0.31,eRank=80.7,q75/q25=inf train_time:291507ms step_avg:72.88ms +[2025-09-02 14:28:38] [Rank 0] PRINT: step:4000/10000 val_loss:4.2157 svd_entropy: attn_qk:H=0.7192,top10E=0.31,eRank=146.1,q75/q25=75.05 attn_vo:H=0.7371,top10E=0.22,eRank=207.6,q75/q25=inf mlp_w1:H=0.7298,top10E=0.34,eRank=147.7,q75/q25=14.03 mlp_w2:H=0.8305,top10E=0.16,eRank=252.5,q75/q25=25.82 vo_prod:H=0.6057,top10E=0.31,eRank=80.7,q75/q25=inf train_time:291507ms step_avg:72.88ms +[2025-09-02 14:28:38] [Rank 0] step:4001/10000 train_time:291519ms step_avg:72.86ms +[2025-09-02 14:28:38] [Rank 0] step:4001/10000 train_time:291519ms step_avg:72.86ms +[2025-09-02 14:28:40] [Rank 0] step:4021/10000 train_time:292895ms step_avg:72.84ms +[2025-09-02 14:28:40] [Rank 0] step:4021/10000 train_time:292895ms step_avg:72.84ms +[2025-09-02 14:28:41] [Rank 0] step:4041/10000 train_time:294417ms step_avg:72.86ms +[2025-09-02 14:28:41] [Rank 0] step:4041/10000 train_time:294417ms step_avg:72.86ms +[2025-09-02 14:28:43] [Rank 0] step:4061/10000 train_time:295945ms step_avg:72.87ms +[2025-09-02 14:28:43] [Rank 0] step:4061/10000 train_time:295945ms step_avg:72.87ms +[2025-09-02 14:28:44] [Rank 0] step:4081/10000 train_time:297576ms step_avg:72.92ms +[2025-09-02 14:28:44] [Rank 0] step:4081/10000 train_time:297576ms step_avg:72.92ms +[2025-09-02 14:28:46] [Rank 0] step:4101/10000 train_time:299100ms step_avg:72.93ms +[2025-09-02 14:28:46] [Rank 0] step:4101/10000 train_time:299100ms step_avg:72.93ms +[2025-09-02 14:28:47] [Rank 0] step:4121/10000 train_time:300621ms step_avg:72.95ms +[2025-09-02 14:28:47] [Rank 0] step:4121/10000 train_time:300621ms step_avg:72.95ms +[2025-09-02 14:28:49] [Rank 0] step:4141/10000 train_time:302144ms step_avg:72.96ms +[2025-09-02 14:28:49] [Rank 0] step:4141/10000 train_time:302144ms step_avg:72.96ms +[2025-09-02 14:28:50] [Rank 0] step:4161/10000 train_time:303666ms step_avg:72.98ms +[2025-09-02 14:28:50] [Rank 0] step:4161/10000 train_time:303666ms step_avg:72.98ms +[2025-09-02 14:28:52] [Rank 0] step:4181/10000 train_time:305189ms step_avg:72.99ms +[2025-09-02 14:28:52] [Rank 0] step:4181/10000 train_time:305189ms step_avg:72.99ms +[2025-09-02 14:28:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:28:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:29:05] [Rank 0] PRINT: step:4200/10000 val_loss:4.1949 svd_entropy: attn_qk:H=0.7225,top10E=0.31,eRank=148.3,q75/q25=76.95 attn_vo:H=0.7408,top10E=0.21,eRank=211.1,q75/q25=inf mlp_w1:H=0.7352,top10E=0.33,eRank=152.5,q75/q25=14.56 mlp_w2:H=0.8332,top10E=0.15,eRank=257.2,q75/q25=26.18 vo_prod:H=0.6100,top10E=0.30,eRank=83.1,q75/q25=inf train_time:306863ms step_avg:73.06ms +[2025-09-02 14:29:05] [Rank 0] PRINT: step:4200/10000 val_loss:4.1949 svd_entropy: attn_qk:H=0.7225,top10E=0.31,eRank=148.3,q75/q25=76.95 attn_vo:H=0.7408,top10E=0.21,eRank=211.1,q75/q25=inf mlp_w1:H=0.7352,top10E=0.33,eRank=152.5,q75/q25=14.56 mlp_w2:H=0.8332,top10E=0.15,eRank=257.2,q75/q25=26.18 vo_prod:H=0.6100,top10E=0.30,eRank=83.1,q75/q25=inf train_time:306863ms step_avg:73.06ms +[2025-09-02 14:29:05] [Rank 0] step:4201/10000 train_time:306875ms step_avg:73.05ms +[2025-09-02 14:29:05] [Rank 0] step:4201/10000 train_time:306875ms step_avg:73.05ms +[2025-09-02 14:29:07] [Rank 0] step:4221/10000 train_time:308247ms step_avg:73.03ms +[2025-09-02 14:29:07] [Rank 0] step:4221/10000 train_time:308247ms step_avg:73.03ms +[2025-09-02 14:29:08] [Rank 0] step:4241/10000 train_time:309767ms step_avg:73.04ms +[2025-09-02 14:29:08] [Rank 0] step:4241/10000 train_time:309767ms step_avg:73.04ms +[2025-09-02 14:29:10] [Rank 0] step:4261/10000 train_time:311289ms step_avg:73.06ms +[2025-09-02 14:29:10] [Rank 0] step:4261/10000 train_time:311289ms step_avg:73.06ms +[2025-09-02 14:29:11] [Rank 0] step:4281/10000 train_time:312809ms step_avg:73.07ms +[2025-09-02 14:29:11] [Rank 0] step:4281/10000 train_time:312809ms step_avg:73.07ms +[2025-09-02 14:29:13] [Rank 0] step:4301/10000 train_time:314330ms step_avg:73.08ms +[2025-09-02 14:29:13] [Rank 0] step:4301/10000 train_time:314330ms step_avg:73.08ms +[2025-09-02 14:29:14] [Rank 0] step:4321/10000 train_time:315850ms step_avg:73.10ms +[2025-09-02 14:29:14] [Rank 0] step:4321/10000 train_time:315850ms step_avg:73.10ms +[2025-09-02 14:29:16] [Rank 0] step:4341/10000 train_time:317368ms step_avg:73.11ms +[2025-09-02 14:29:16] [Rank 0] step:4341/10000 train_time:317368ms step_avg:73.11ms +[2025-09-02 14:29:17] [Rank 0] step:4361/10000 train_time:318887ms step_avg:73.12ms +[2025-09-02 14:29:17] [Rank 0] step:4361/10000 train_time:318887ms step_avg:73.12ms +[2025-09-02 14:29:19] [Rank 0] step:4381/10000 train_time:320404ms step_avg:73.13ms +[2025-09-02 14:29:19] [Rank 0] step:4381/10000 train_time:320404ms step_avg:73.13ms +[2025-09-02 14:29:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:29:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:29:32] [Rank 0] PRINT: step:4400/10000 val_loss:4.1724 svd_entropy: attn_qk:H=0.7255,top10E=0.30,eRank=150.4,q75/q25=78.98 attn_vo:H=0.7441,top10E=0.21,eRank=214.4,q75/q25=inf mlp_w1:H=0.7402,top10E=0.32,eRank=157.2,q75/q25=15.13 mlp_w2:H=0.8357,top10E=0.15,eRank=261.6,q75/q25=26.38 vo_prod:H=0.6139,top10E=0.30,eRank=85.4,q75/q25=inf train_time:322077ms step_avg:73.20ms +[2025-09-02 14:29:32] [Rank 0] PRINT: step:4400/10000 val_loss:4.1724 svd_entropy: attn_qk:H=0.7255,top10E=0.30,eRank=150.4,q75/q25=78.98 attn_vo:H=0.7441,top10E=0.21,eRank=214.4,q75/q25=inf mlp_w1:H=0.7402,top10E=0.32,eRank=157.2,q75/q25=15.13 mlp_w2:H=0.8357,top10E=0.15,eRank=261.6,q75/q25=26.38 vo_prod:H=0.6139,top10E=0.30,eRank=85.4,q75/q25=inf train_time:322077ms step_avg:73.20ms +[2025-09-02 14:29:32] [Rank 0] step:4401/10000 train_time:322089ms step_avg:73.19ms +[2025-09-02 14:29:32] [Rank 0] step:4401/10000 train_time:322089ms step_avg:73.19ms +[2025-09-02 14:29:34] [Rank 0] step:4421/10000 train_time:323489ms step_avg:73.17ms +[2025-09-02 14:29:34] [Rank 0] step:4421/10000 train_time:323489ms step_avg:73.17ms +[2025-09-02 14:29:35] [Rank 0] step:4441/10000 train_time:325004ms step_avg:73.18ms +[2025-09-02 14:29:35] [Rank 0] step:4441/10000 train_time:325004ms step_avg:73.18ms +[2025-09-02 14:29:37] [Rank 0] step:4461/10000 train_time:326525ms step_avg:73.20ms +[2025-09-02 14:29:37] [Rank 0] step:4461/10000 train_time:326525ms step_avg:73.20ms +[2025-09-02 14:29:38] [Rank 0] step:4481/10000 train_time:328048ms step_avg:73.21ms +[2025-09-02 14:29:38] [Rank 0] step:4481/10000 train_time:328048ms step_avg:73.21ms +[2025-09-02 14:29:40] [Rank 0] step:4501/10000 train_time:329573ms step_avg:73.22ms +[2025-09-02 14:29:40] [Rank 0] step:4501/10000 train_time:329573ms step_avg:73.22ms +[2025-09-02 14:29:41] [Rank 0] step:4521/10000 train_time:331096ms step_avg:73.24ms +[2025-09-02 14:29:41] [Rank 0] step:4521/10000 train_time:331096ms step_avg:73.24ms +[2025-09-02 14:29:43] [Rank 0] step:4541/10000 train_time:332620ms step_avg:73.25ms +[2025-09-02 14:29:43] [Rank 0] step:4541/10000 train_time:332620ms step_avg:73.25ms +[2025-09-02 14:29:44] [Rank 0] step:4561/10000 train_time:334146ms step_avg:73.26ms +[2025-09-02 14:29:44] [Rank 0] step:4561/10000 train_time:334146ms step_avg:73.26ms +[2025-09-02 14:29:46] [Rank 0] step:4581/10000 train_time:335673ms step_avg:73.28ms +[2025-09-02 14:29:46] [Rank 0] step:4581/10000 train_time:335673ms step_avg:73.28ms +[2025-09-02 14:29:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:29:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:29:59] [Rank 0] PRINT: step:4600/10000 val_loss:4.1347 svd_entropy: attn_qk:H=0.7286,top10E=0.30,eRank=152.5,q75/q25=81.40 attn_vo:H=0.7475,top10E=0.20,eRank=217.8,q75/q25=inf mlp_w1:H=0.7451,top10E=0.32,eRank=161.9,q75/q25=15.67 mlp_w2:H=0.8381,top10E=0.15,eRank=265.9,q75/q25=26.70 vo_prod:H=0.6180,top10E=0.29,eRank=87.9,q75/q25=inf train_time:337353ms step_avg:73.34ms +[2025-09-02 14:29:59] [Rank 0] PRINT: step:4600/10000 val_loss:4.1347 svd_entropy: attn_qk:H=0.7286,top10E=0.30,eRank=152.5,q75/q25=81.40 attn_vo:H=0.7475,top10E=0.20,eRank=217.8,q75/q25=inf mlp_w1:H=0.7451,top10E=0.32,eRank=161.9,q75/q25=15.67 mlp_w2:H=0.8381,top10E=0.15,eRank=265.9,q75/q25=26.70 vo_prod:H=0.6180,top10E=0.29,eRank=87.9,q75/q25=inf train_time:337353ms step_avg:73.34ms +[2025-09-02 14:29:59] [Rank 0] step:4601/10000 train_time:337365ms step_avg:73.32ms +[2025-09-02 14:29:59] [Rank 0] step:4601/10000 train_time:337365ms step_avg:73.32ms +[2025-09-02 14:30:01] [Rank 0] step:4621/10000 train_time:338740ms step_avg:73.30ms +[2025-09-02 14:30:01] [Rank 0] step:4621/10000 train_time:338740ms step_avg:73.30ms +[2025-09-02 14:30:02] [Rank 0] step:4641/10000 train_time:340266ms step_avg:73.32ms +[2025-09-02 14:30:02] [Rank 0] step:4641/10000 train_time:340266ms step_avg:73.32ms +[2025-09-02 14:30:04] [Rank 0] step:4661/10000 train_time:341790ms step_avg:73.33ms +[2025-09-02 14:30:04] [Rank 0] step:4661/10000 train_time:341790ms step_avg:73.33ms +[2025-09-02 14:30:05] [Rank 0] step:4681/10000 train_time:343317ms step_avg:73.34ms +[2025-09-02 14:30:05] [Rank 0] step:4681/10000 train_time:343317ms step_avg:73.34ms +[2025-09-02 14:30:07] [Rank 0] step:4701/10000 train_time:344844ms step_avg:73.36ms +[2025-09-02 14:30:07] [Rank 0] step:4701/10000 train_time:344844ms step_avg:73.36ms +[2025-09-02 14:30:08] [Rank 0] step:4721/10000 train_time:346369ms step_avg:73.37ms +[2025-09-02 14:30:08] [Rank 0] step:4721/10000 train_time:346369ms step_avg:73.37ms +[2025-09-02 14:30:10] [Rank 0] step:4741/10000 train_time:347897ms step_avg:73.38ms +[2025-09-02 14:30:10] [Rank 0] step:4741/10000 train_time:347897ms step_avg:73.38ms +[2025-09-02 14:30:11] [Rank 0] step:4761/10000 train_time:349424ms step_avg:73.39ms +[2025-09-02 14:30:11] [Rank 0] step:4761/10000 train_time:349424ms step_avg:73.39ms +[2025-09-02 14:30:13] [Rank 0] step:4781/10000 train_time:350952ms step_avg:73.41ms +[2025-09-02 14:30:13] [Rank 0] step:4781/10000 train_time:350952ms step_avg:73.41ms +[2025-09-02 14:30:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:30:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:30:26] [Rank 0] PRINT: step:4800/10000 val_loss:4.1191 svd_entropy: attn_qk:H=0.7315,top10E=0.30,eRank=154.6,q75/q25=82.66 attn_vo:H=0.7505,top10E=0.20,eRank=220.8,q75/q25=inf mlp_w1:H=0.7497,top10E=0.31,eRank=166.4,q75/q25=16.16 mlp_w2:H=0.8405,top10E=0.15,eRank=270.2,q75/q25=27.01 vo_prod:H=0.6215,top10E=0.29,eRank=90.0,q75/q25=inf train_time:352634ms step_avg:73.47ms +[2025-09-02 14:30:26] [Rank 0] PRINT: step:4800/10000 val_loss:4.1191 svd_entropy: attn_qk:H=0.7315,top10E=0.30,eRank=154.6,q75/q25=82.66 attn_vo:H=0.7505,top10E=0.20,eRank=220.8,q75/q25=inf mlp_w1:H=0.7497,top10E=0.31,eRank=166.4,q75/q25=16.16 mlp_w2:H=0.8405,top10E=0.15,eRank=270.2,q75/q25=27.01 vo_prod:H=0.6215,top10E=0.29,eRank=90.0,q75/q25=inf train_time:352634ms step_avg:73.47ms +[2025-09-02 14:30:26] [Rank 0] step:4801/10000 train_time:352645ms step_avg:73.45ms +[2025-09-02 14:30:26] [Rank 0] step:4801/10000 train_time:352645ms step_avg:73.45ms +[2025-09-02 14:30:28] [Rank 0] step:4821/10000 train_time:354027ms step_avg:73.43ms +[2025-09-02 14:30:28] [Rank 0] step:4821/10000 train_time:354027ms step_avg:73.43ms +[2025-09-02 14:30:29] [Rank 0] step:4841/10000 train_time:355550ms step_avg:73.45ms +[2025-09-02 14:30:29] [Rank 0] step:4841/10000 train_time:355550ms step_avg:73.45ms +[2025-09-02 14:30:31] [Rank 0] step:4861/10000 train_time:357077ms step_avg:73.46ms +[2025-09-02 14:30:31] [Rank 0] step:4861/10000 train_time:357077ms step_avg:73.46ms +[2025-09-02 14:30:32] [Rank 0] step:4881/10000 train_time:358601ms step_avg:73.47ms +[2025-09-02 14:30:32] [Rank 0] step:4881/10000 train_time:358601ms step_avg:73.47ms +[2025-09-02 14:30:34] [Rank 0] step:4901/10000 train_time:360126ms step_avg:73.48ms +[2025-09-02 14:30:34] [Rank 0] step:4901/10000 train_time:360126ms step_avg:73.48ms +[2025-09-02 14:30:35] [Rank 0] step:4921/10000 train_time:361655ms step_avg:73.49ms +[2025-09-02 14:30:35] [Rank 0] step:4921/10000 train_time:361655ms step_avg:73.49ms +[2025-09-02 14:30:37] [Rank 0] step:4941/10000 train_time:363184ms step_avg:73.50ms +[2025-09-02 14:30:37] [Rank 0] step:4941/10000 train_time:363184ms step_avg:73.50ms +[2025-09-02 14:30:38] [Rank 0] step:4961/10000 train_time:364709ms step_avg:73.52ms +[2025-09-02 14:30:38] [Rank 0] step:4961/10000 train_time:364709ms step_avg:73.52ms +[2025-09-02 14:30:40] [Rank 0] step:4981/10000 train_time:366239ms step_avg:73.53ms +[2025-09-02 14:30:40] [Rank 0] step:4981/10000 train_time:366239ms step_avg:73.53ms +[2025-09-02 14:30:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:30:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:30:53] [Rank 0] PRINT: step:5000/10000 val_loss:4.0982 svd_entropy: attn_qk:H=0.7341,top10E=0.29,eRank=156.5,q75/q25=84.36 attn_vo:H=0.7533,top10E=0.20,eRank=223.7,q75/q25=inf mlp_w1:H=0.7539,top10E=0.30,eRank=170.6,q75/q25=16.69 mlp_w2:H=0.8426,top10E=0.15,eRank=274.0,q75/q25=27.02 vo_prod:H=0.6249,top10E=0.28,eRank=92.2,q75/q25=inf train_time:367920ms step_avg:73.58ms +[2025-09-02 14:30:53] [Rank 0] PRINT: step:5000/10000 val_loss:4.0982 svd_entropy: attn_qk:H=0.7341,top10E=0.29,eRank=156.5,q75/q25=84.36 attn_vo:H=0.7533,top10E=0.20,eRank=223.7,q75/q25=inf mlp_w1:H=0.7539,top10E=0.30,eRank=170.6,q75/q25=16.69 mlp_w2:H=0.8426,top10E=0.15,eRank=274.0,q75/q25=27.02 vo_prod:H=0.6249,top10E=0.28,eRank=92.2,q75/q25=inf train_time:367920ms step_avg:73.58ms +[2025-09-02 14:30:53] [Rank 0] step:5001/10000 train_time:367931ms step_avg:73.57ms +[2025-09-02 14:30:53] [Rank 0] step:5001/10000 train_time:367931ms step_avg:73.57ms +[2025-09-02 14:30:55] [Rank 0] step:5021/10000 train_time:369313ms step_avg:73.55ms +[2025-09-02 14:30:55] [Rank 0] step:5021/10000 train_time:369313ms step_avg:73.55ms +[2025-09-02 14:30:56] [Rank 0] step:5041/10000 train_time:370840ms step_avg:73.56ms +[2025-09-02 14:30:56] [Rank 0] step:5041/10000 train_time:370840ms step_avg:73.56ms +[2025-09-02 14:30:58] [Rank 0] step:5061/10000 train_time:372366ms step_avg:73.58ms +[2025-09-02 14:30:58] [Rank 0] step:5061/10000 train_time:372366ms step_avg:73.58ms +[2025-09-02 14:30:59] [Rank 0] step:5081/10000 train_time:373894ms step_avg:73.59ms +[2025-09-02 14:30:59] [Rank 0] step:5081/10000 train_time:373894ms step_avg:73.59ms +[2025-09-02 14:31:01] [Rank 0] step:5101/10000 train_time:375423ms step_avg:73.60ms +[2025-09-02 14:31:01] [Rank 0] step:5101/10000 train_time:375423ms step_avg:73.60ms +[2025-09-02 14:31:02] [Rank 0] step:5121/10000 train_time:376951ms step_avg:73.61ms +[2025-09-02 14:31:02] [Rank 0] step:5121/10000 train_time:376951ms step_avg:73.61ms +[2025-09-02 14:31:04] [Rank 0] step:5141/10000 train_time:378484ms step_avg:73.62ms +[2025-09-02 14:31:04] [Rank 0] step:5141/10000 train_time:378484ms step_avg:73.62ms +[2025-09-02 14:31:05] [Rank 0] step:5161/10000 train_time:380015ms step_avg:73.63ms +[2025-09-02 14:31:05] [Rank 0] step:5161/10000 train_time:380015ms step_avg:73.63ms +[2025-09-02 14:31:07] [Rank 0] step:5181/10000 train_time:381548ms step_avg:73.64ms +[2025-09-02 14:31:07] [Rank 0] step:5181/10000 train_time:381548ms step_avg:73.64ms +[2025-09-02 14:31:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:31:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:31:20] [Rank 0] PRINT: step:5200/10000 val_loss:4.0759 svd_entropy: attn_qk:H=0.7367,top10E=0.29,eRank=158.4,q75/q25=85.66 attn_vo:H=0.7560,top10E=0.19,eRank=226.5,q75/q25=inf mlp_w1:H=0.7579,top10E=0.30,eRank=174.6,q75/q25=17.20 mlp_w2:H=0.8445,top10E=0.14,eRank=277.5,q75/q25=27.20 vo_prod:H=0.6280,top10E=0.28,eRank=94.2,q75/q25=inf train_time:383259ms step_avg:73.70ms +[2025-09-02 14:31:20] [Rank 0] PRINT: step:5200/10000 val_loss:4.0759 svd_entropy: attn_qk:H=0.7367,top10E=0.29,eRank=158.4,q75/q25=85.66 attn_vo:H=0.7560,top10E=0.19,eRank=226.5,q75/q25=inf mlp_w1:H=0.7579,top10E=0.30,eRank=174.6,q75/q25=17.20 mlp_w2:H=0.8445,top10E=0.14,eRank=277.5,q75/q25=27.20 vo_prod:H=0.6280,top10E=0.28,eRank=94.2,q75/q25=inf train_time:383259ms step_avg:73.70ms +[2025-09-02 14:31:20] [Rank 0] step:5201/10000 train_time:383270ms step_avg:73.69ms +[2025-09-02 14:31:20] [Rank 0] step:5201/10000 train_time:383270ms step_avg:73.69ms +[2025-09-02 14:31:22] [Rank 0] step:5221/10000 train_time:384672ms step_avg:73.68ms +[2025-09-02 14:31:22] [Rank 0] step:5221/10000 train_time:384672ms step_avg:73.68ms +[2025-09-02 14:31:23] [Rank 0] step:5241/10000 train_time:386228ms step_avg:73.69ms +[2025-09-02 14:31:23] [Rank 0] step:5241/10000 train_time:386228ms step_avg:73.69ms +[2025-09-02 14:31:25] [Rank 0] step:5261/10000 train_time:387784ms step_avg:73.71ms +[2025-09-02 14:31:25] [Rank 0] step:5261/10000 train_time:387784ms step_avg:73.71ms +[2025-09-02 14:31:26] [Rank 0] step:5281/10000 train_time:389342ms step_avg:73.72ms +[2025-09-02 14:31:26] [Rank 0] step:5281/10000 train_time:389342ms step_avg:73.72ms +[2025-09-02 14:31:28] [Rank 0] step:5301/10000 train_time:390908ms step_avg:73.74ms +[2025-09-02 14:31:28] [Rank 0] step:5301/10000 train_time:390908ms step_avg:73.74ms +[2025-09-02 14:31:30] [Rank 0] step:5321/10000 train_time:392467ms step_avg:73.76ms +[2025-09-02 14:31:30] [Rank 0] step:5321/10000 train_time:392467ms step_avg:73.76ms +[2025-09-02 14:31:31] [Rank 0] step:5341/10000 train_time:394022ms step_avg:73.77ms +[2025-09-02 14:31:31] [Rank 0] step:5341/10000 train_time:394022ms step_avg:73.77ms +[2025-09-02 14:31:33] [Rank 0] step:5361/10000 train_time:395584ms step_avg:73.79ms +[2025-09-02 14:31:33] [Rank 0] step:5361/10000 train_time:395584ms step_avg:73.79ms +[2025-09-02 14:31:34] [Rank 0] step:5381/10000 train_time:397143ms step_avg:73.80ms +[2025-09-02 14:31:34] [Rank 0] step:5381/10000 train_time:397143ms step_avg:73.80ms +[2025-09-02 14:31:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:31:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:31:47] [Rank 0] PRINT: step:5400/10000 val_loss:4.0569 svd_entropy: attn_qk:H=0.7390,top10E=0.28,eRank=160.2,q75/q25=86.61 attn_vo:H=0.7585,top10E=0.19,eRank=229.2,q75/q25=inf mlp_w1:H=0.7617,top10E=0.29,eRank=178.6,q75/q25=17.73 mlp_w2:H=0.8462,top10E=0.14,eRank=280.7,q75/q25=27.50 vo_prod:H=0.6310,top10E=0.27,eRank=96.2,q75/q25=inf train_time:398859ms step_avg:73.86ms +[2025-09-02 14:31:47] [Rank 0] PRINT: step:5400/10000 val_loss:4.0569 svd_entropy: attn_qk:H=0.7390,top10E=0.28,eRank=160.2,q75/q25=86.61 attn_vo:H=0.7585,top10E=0.19,eRank=229.2,q75/q25=inf mlp_w1:H=0.7617,top10E=0.29,eRank=178.6,q75/q25=17.73 mlp_w2:H=0.8462,top10E=0.14,eRank=280.7,q75/q25=27.50 vo_prod:H=0.6310,top10E=0.27,eRank=96.2,q75/q25=inf train_time:398859ms step_avg:73.86ms +[2025-09-02 14:31:47] [Rank 0] step:5401/10000 train_time:398871ms step_avg:73.85ms +[2025-09-02 14:31:47] [Rank 0] step:5401/10000 train_time:398871ms step_avg:73.85ms +[2025-09-02 14:31:49] [Rank 0] step:5421/10000 train_time:400301ms step_avg:73.84ms +[2025-09-02 14:31:49] [Rank 0] step:5421/10000 train_time:400301ms step_avg:73.84ms +[2025-09-02 14:31:51] [Rank 0] step:5441/10000 train_time:401853ms step_avg:73.86ms +[2025-09-02 14:31:51] [Rank 0] step:5441/10000 train_time:401853ms step_avg:73.86ms +[2025-09-02 14:31:52] [Rank 0] step:5461/10000 train_time:403414ms step_avg:73.87ms +[2025-09-02 14:31:52] [Rank 0] step:5461/10000 train_time:403414ms step_avg:73.87ms +[2025-09-02 14:31:54] [Rank 0] step:5481/10000 train_time:404973ms step_avg:73.89ms +[2025-09-02 14:31:54] [Rank 0] step:5481/10000 train_time:404973ms step_avg:73.89ms +[2025-09-02 14:31:55] [Rank 0] step:5501/10000 train_time:406537ms step_avg:73.90ms +[2025-09-02 14:31:55] [Rank 0] step:5501/10000 train_time:406537ms step_avg:73.90ms +[2025-09-02 14:31:57] [Rank 0] step:5521/10000 train_time:408100ms step_avg:73.92ms +[2025-09-02 14:31:57] [Rank 0] step:5521/10000 train_time:408100ms step_avg:73.92ms +[2025-09-02 14:31:58] [Rank 0] step:5541/10000 train_time:409658ms step_avg:73.93ms +[2025-09-02 14:31:58] [Rank 0] step:5541/10000 train_time:409658ms step_avg:73.93ms +[2025-09-02 14:32:00] [Rank 0] step:5561/10000 train_time:411217ms step_avg:73.95ms +[2025-09-02 14:32:00] [Rank 0] step:5561/10000 train_time:411217ms step_avg:73.95ms +[2025-09-02 14:32:02] [Rank 0] step:5581/10000 train_time:412776ms step_avg:73.96ms +[2025-09-02 14:32:02] [Rank 0] step:5581/10000 train_time:412776ms step_avg:73.96ms +[2025-09-02 14:32:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:32:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:32:15] [Rank 0] PRINT: step:5600/10000 val_loss:4.0420 svd_entropy: attn_qk:H=0.7414,top10E=0.28,eRank=162.1,q75/q25=87.65 attn_vo:H=0.7608,top10E=0.19,eRank=231.7,q75/q25=inf mlp_w1:H=0.7651,top10E=0.29,eRank=182.3,q75/q25=18.21 mlp_w2:H=0.8478,top10E=0.14,eRank=283.8,q75/q25=27.75 vo_prod:H=0.6337,top10E=0.27,eRank=98.0,q75/q25=inf train_time:414493ms step_avg:74.02ms +[2025-09-02 14:32:15] [Rank 0] PRINT: step:5600/10000 val_loss:4.0420 svd_entropy: attn_qk:H=0.7414,top10E=0.28,eRank=162.1,q75/q25=87.65 attn_vo:H=0.7608,top10E=0.19,eRank=231.7,q75/q25=inf mlp_w1:H=0.7651,top10E=0.29,eRank=182.3,q75/q25=18.21 mlp_w2:H=0.8478,top10E=0.14,eRank=283.8,q75/q25=27.75 vo_prod:H=0.6337,top10E=0.27,eRank=98.0,q75/q25=inf train_time:414493ms step_avg:74.02ms +[2025-09-02 14:32:15] [Rank 0] step:5601/10000 train_time:414504ms step_avg:74.01ms +[2025-09-02 14:32:15] [Rank 0] step:5601/10000 train_time:414504ms step_avg:74.01ms +[2025-09-02 14:32:17] [Rank 0] step:5621/10000 train_time:415930ms step_avg:74.00ms +[2025-09-02 14:32:17] [Rank 0] step:5621/10000 train_time:415930ms step_avg:74.00ms +[2025-09-02 14:32:18] [Rank 0] step:5641/10000 train_time:417490ms step_avg:74.01ms +[2025-09-02 14:32:18] [Rank 0] step:5641/10000 train_time:417490ms step_avg:74.01ms +[2025-09-02 14:32:20] [Rank 0] step:5661/10000 train_time:419045ms step_avg:74.02ms +[2025-09-02 14:32:20] [Rank 0] step:5661/10000 train_time:419045ms step_avg:74.02ms +[2025-09-02 14:32:21] [Rank 0] step:5681/10000 train_time:420606ms step_avg:74.04ms +[2025-09-02 14:32:21] [Rank 0] step:5681/10000 train_time:420606ms step_avg:74.04ms +[2025-09-02 14:32:23] [Rank 0] step:5701/10000 train_time:422163ms step_avg:74.05ms +[2025-09-02 14:32:23] [Rank 0] step:5701/10000 train_time:422163ms step_avg:74.05ms +[2025-09-02 14:32:24] [Rank 0] step:5721/10000 train_time:423723ms step_avg:74.06ms +[2025-09-02 14:32:24] [Rank 0] step:5721/10000 train_time:423723ms step_avg:74.06ms +[2025-09-02 14:32:26] [Rank 0] step:5741/10000 train_time:425282ms step_avg:74.08ms +[2025-09-02 14:32:26] [Rank 0] step:5741/10000 train_time:425282ms step_avg:74.08ms +[2025-09-02 14:32:28] [Rank 0] step:5761/10000 train_time:426841ms step_avg:74.09ms +[2025-09-02 14:32:28] [Rank 0] step:5761/10000 train_time:426841ms step_avg:74.09ms +[2025-09-02 14:32:29] [Rank 0] step:5781/10000 train_time:428401ms step_avg:74.10ms +[2025-09-02 14:32:29] [Rank 0] step:5781/10000 train_time:428401ms step_avg:74.10ms +[2025-09-02 14:32:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:32:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:32:42] [Rank 0] PRINT: step:5800/10000 val_loss:4.0315 svd_entropy: attn_qk:H=0.7436,top10E=0.28,eRank=163.8,q75/q25=88.55 attn_vo:H=0.7630,top10E=0.18,eRank=234.1,q75/q25=inf mlp_w1:H=0.7684,top10E=0.28,eRank=185.9,q75/q25=18.73 mlp_w2:H=0.8494,top10E=0.14,eRank=286.8,q75/q25=27.73 vo_prod:H=0.6362,top10E=0.27,eRank=99.7,q75/q25=inf train_time:430121ms step_avg:74.16ms +[2025-09-02 14:32:42] [Rank 0] PRINT: step:5800/10000 val_loss:4.0315 svd_entropy: attn_qk:H=0.7436,top10E=0.28,eRank=163.8,q75/q25=88.55 attn_vo:H=0.7630,top10E=0.18,eRank=234.1,q75/q25=inf mlp_w1:H=0.7684,top10E=0.28,eRank=185.9,q75/q25=18.73 mlp_w2:H=0.8494,top10E=0.14,eRank=286.8,q75/q25=27.73 vo_prod:H=0.6362,top10E=0.27,eRank=99.7,q75/q25=inf train_time:430121ms step_avg:74.16ms +[2025-09-02 14:32:43] [Rank 0] step:5801/10000 train_time:430132ms step_avg:74.15ms +[2025-09-02 14:32:43] [Rank 0] step:5801/10000 train_time:430132ms step_avg:74.15ms +[2025-09-02 14:32:44] [Rank 0] step:5821/10000 train_time:431554ms step_avg:74.14ms +[2025-09-02 14:32:44] [Rank 0] step:5821/10000 train_time:431554ms step_avg:74.14ms +[2025-09-02 14:32:46] [Rank 0] step:5841/10000 train_time:433111ms step_avg:74.15ms +[2025-09-02 14:32:46] [Rank 0] step:5841/10000 train_time:433111ms step_avg:74.15ms +[2025-09-02 14:32:47] [Rank 0] step:5861/10000 train_time:434672ms step_avg:74.16ms +[2025-09-02 14:32:47] [Rank 0] step:5861/10000 train_time:434672ms step_avg:74.16ms +[2025-09-02 14:32:49] [Rank 0] step:5881/10000 train_time:436236ms step_avg:74.18ms +[2025-09-02 14:32:49] [Rank 0] step:5881/10000 train_time:436236ms step_avg:74.18ms +[2025-09-02 14:32:50] [Rank 0] step:5901/10000 train_time:437797ms step_avg:74.19ms +[2025-09-02 14:32:50] [Rank 0] step:5901/10000 train_time:437797ms step_avg:74.19ms +[2025-09-02 14:32:52] [Rank 0] step:5921/10000 train_time:439358ms step_avg:74.20ms +[2025-09-02 14:32:52] [Rank 0] step:5921/10000 train_time:439358ms step_avg:74.20ms +[2025-09-02 14:32:54] [Rank 0] step:5941/10000 train_time:440943ms step_avg:74.22ms +[2025-09-02 14:32:54] [Rank 0] step:5941/10000 train_time:440943ms step_avg:74.22ms +[2025-09-02 14:32:55] [Rank 0] step:5961/10000 train_time:442512ms step_avg:74.23ms +[2025-09-02 14:32:55] [Rank 0] step:5961/10000 train_time:442512ms step_avg:74.23ms +[2025-09-02 14:32:57] [Rank 0] step:5981/10000 train_time:444074ms step_avg:74.25ms +[2025-09-02 14:32:57] [Rank 0] step:5981/10000 train_time:444074ms step_avg:74.25ms +[2025-09-02 14:32:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:32:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:33:10] [Rank 0] PRINT: step:6000/10000 val_loss:4.0062 svd_entropy: attn_qk:H=0.7457,top10E=0.28,eRank=165.5,q75/q25=89.42 attn_vo:H=0.7651,top10E=0.18,eRank=236.5,q75/q25=inf mlp_w1:H=0.7716,top10E=0.28,eRank=189.5,q75/q25=19.01 mlp_w2:H=0.8510,top10E=0.14,eRank=289.9,q75/q25=27.69 vo_prod:H=0.6386,top10E=0.26,eRank=101.4,q75/q25=inf train_time:445790ms step_avg:74.30ms +[2025-09-02 14:33:10] [Rank 0] PRINT: step:6000/10000 val_loss:4.0062 svd_entropy: attn_qk:H=0.7457,top10E=0.28,eRank=165.5,q75/q25=89.42 attn_vo:H=0.7651,top10E=0.18,eRank=236.5,q75/q25=inf mlp_w1:H=0.7716,top10E=0.28,eRank=189.5,q75/q25=19.01 mlp_w2:H=0.8510,top10E=0.14,eRank=289.9,q75/q25=27.69 vo_prod:H=0.6386,top10E=0.26,eRank=101.4,q75/q25=inf train_time:445790ms step_avg:74.30ms +[2025-09-02 14:33:10] [Rank 0] step:6001/10000 train_time:445801ms step_avg:74.29ms +[2025-09-02 14:33:10] [Rank 0] step:6001/10000 train_time:445801ms step_avg:74.29ms +[2025-09-02 14:33:12] [Rank 0] step:6021/10000 train_time:447220ms step_avg:74.28ms +[2025-09-02 14:33:12] [Rank 0] step:6021/10000 train_time:447220ms step_avg:74.28ms +[2025-09-02 14:33:13] [Rank 0] step:6041/10000 train_time:448779ms step_avg:74.29ms +[2025-09-02 14:33:13] [Rank 0] step:6041/10000 train_time:448779ms step_avg:74.29ms +[2025-09-02 14:33:15] [Rank 0] step:6061/10000 train_time:450348ms step_avg:74.30ms +[2025-09-02 14:33:15] [Rank 0] step:6061/10000 train_time:450348ms step_avg:74.30ms +[2025-09-02 14:33:16] [Rank 0] step:6081/10000 train_time:451911ms step_avg:74.32ms +[2025-09-02 14:33:16] [Rank 0] step:6081/10000 train_time:451911ms step_avg:74.32ms +[2025-09-02 14:33:18] [Rank 0] step:6101/10000 train_time:453477ms step_avg:74.33ms +[2025-09-02 14:33:18] [Rank 0] step:6101/10000 train_time:453477ms step_avg:74.33ms +[2025-09-02 14:33:20] [Rank 0] step:6121/10000 train_time:455106ms step_avg:74.35ms +[2025-09-02 14:33:20] [Rank 0] step:6121/10000 train_time:455106ms step_avg:74.35ms +[2025-09-02 14:33:21] [Rank 0] step:6141/10000 train_time:456678ms step_avg:74.37ms +[2025-09-02 14:33:21] [Rank 0] step:6141/10000 train_time:456678ms step_avg:74.37ms +[2025-09-02 14:33:23] [Rank 0] step:6161/10000 train_time:458245ms step_avg:74.38ms +[2025-09-02 14:33:23] [Rank 0] step:6161/10000 train_time:458245ms step_avg:74.38ms +[2025-09-02 14:33:24] [Rank 0] step:6181/10000 train_time:459809ms step_avg:74.39ms +[2025-09-02 14:33:24] [Rank 0] step:6181/10000 train_time:459809ms step_avg:74.39ms +[2025-09-02 14:33:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:33:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:33:37] [Rank 0] PRINT: step:6200/10000 val_loss:3.9906 svd_entropy: attn_qk:H=0.7477,top10E=0.27,eRank=167.1,q75/q25=90.41 attn_vo:H=0.7671,top10E=0.18,eRank=238.7,q75/q25=inf mlp_w1:H=0.7746,top10E=0.27,eRank=192.8,q75/q25=19.50 mlp_w2:H=0.8524,top10E=0.14,eRank=292.7,q75/q25=27.76 vo_prod:H=0.6408,top10E=0.26,eRank=103.0,q75/q25=inf train_time:461532ms step_avg:74.44ms +[2025-09-02 14:33:37] [Rank 0] PRINT: step:6200/10000 val_loss:3.9906 svd_entropy: attn_qk:H=0.7477,top10E=0.27,eRank=167.1,q75/q25=90.41 attn_vo:H=0.7671,top10E=0.18,eRank=238.7,q75/q25=inf mlp_w1:H=0.7746,top10E=0.27,eRank=192.8,q75/q25=19.50 mlp_w2:H=0.8524,top10E=0.14,eRank=292.7,q75/q25=27.76 vo_prod:H=0.6408,top10E=0.26,eRank=103.0,q75/q25=inf train_time:461532ms step_avg:74.44ms +[2025-09-02 14:33:38] [Rank 0] step:6201/10000 train_time:461543ms step_avg:74.43ms +[2025-09-02 14:33:38] [Rank 0] step:6201/10000 train_time:461543ms step_avg:74.43ms +[2025-09-02 14:33:39] [Rank 0] step:6221/10000 train_time:462954ms step_avg:74.42ms +[2025-09-02 14:33:39] [Rank 0] step:6221/10000 train_time:462954ms step_avg:74.42ms +[2025-09-02 14:33:41] [Rank 0] step:6241/10000 train_time:464512ms step_avg:74.43ms +[2025-09-02 14:33:41] [Rank 0] step:6241/10000 train_time:464512ms step_avg:74.43ms +[2025-09-02 14:33:42] [Rank 0] step:6261/10000 train_time:466072ms step_avg:74.44ms +[2025-09-02 14:33:42] [Rank 0] step:6261/10000 train_time:466072ms step_avg:74.44ms +[2025-09-02 14:33:44] [Rank 0] step:6281/10000 train_time:467641ms step_avg:74.45ms +[2025-09-02 14:33:44] [Rank 0] step:6281/10000 train_time:467641ms step_avg:74.45ms +[2025-09-02 14:33:45] [Rank 0] step:6301/10000 train_time:469206ms step_avg:74.47ms +[2025-09-02 14:33:45] [Rank 0] step:6301/10000 train_time:469206ms step_avg:74.47ms +[2025-09-02 14:33:47] [Rank 0] step:6321/10000 train_time:470766ms step_avg:74.48ms +[2025-09-02 14:33:47] [Rank 0] step:6321/10000 train_time:470766ms step_avg:74.48ms +[2025-09-02 14:33:49] [Rank 0] step:6341/10000 train_time:472336ms step_avg:74.49ms +[2025-09-02 14:33:49] [Rank 0] step:6341/10000 train_time:472336ms step_avg:74.49ms +[2025-09-02 14:33:50] [Rank 0] step:6361/10000 train_time:473901ms step_avg:74.50ms +[2025-09-02 14:33:50] [Rank 0] step:6361/10000 train_time:473901ms step_avg:74.50ms +[2025-09-02 14:33:52] [Rank 0] step:6381/10000 train_time:475473ms step_avg:74.51ms +[2025-09-02 14:33:52] [Rank 0] step:6381/10000 train_time:475473ms step_avg:74.51ms +[2025-09-02 14:33:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:33:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:34:05] [Rank 0] PRINT: step:6400/10000 val_loss:3.9710 svd_entropy: attn_qk:H=0.7495,top10E=0.27,eRank=168.6,q75/q25=90.21 attn_vo:H=0.7690,top10E=0.18,eRank=240.8,q75/q25=inf mlp_w1:H=0.7772,top10E=0.27,eRank=195.7,q75/q25=19.93 mlp_w2:H=0.8536,top10E=0.13,eRank=295.2,q75/q25=27.82 vo_prod:H=0.6432,top10E=0.26,eRank=104.7,q75/q25=inf train_time:477192ms step_avg:74.56ms +[2025-09-02 14:34:05] [Rank 0] PRINT: step:6400/10000 val_loss:3.9710 svd_entropy: attn_qk:H=0.7495,top10E=0.27,eRank=168.6,q75/q25=90.21 attn_vo:H=0.7690,top10E=0.18,eRank=240.8,q75/q25=inf mlp_w1:H=0.7772,top10E=0.27,eRank=195.7,q75/q25=19.93 mlp_w2:H=0.8536,top10E=0.13,eRank=295.2,q75/q25=27.82 vo_prod:H=0.6432,top10E=0.26,eRank=104.7,q75/q25=inf train_time:477192ms step_avg:74.56ms +[2025-09-02 14:34:05] [Rank 0] step:6401/10000 train_time:477204ms step_avg:74.55ms +[2025-09-02 14:34:05] [Rank 0] step:6401/10000 train_time:477204ms step_avg:74.55ms +[2025-09-02 14:34:07] [Rank 0] step:6421/10000 train_time:478615ms step_avg:74.54ms +[2025-09-02 14:34:07] [Rank 0] step:6421/10000 train_time:478615ms step_avg:74.54ms +[2025-09-02 14:34:08] [Rank 0] step:6441/10000 train_time:480178ms step_avg:74.55ms +[2025-09-02 14:34:08] [Rank 0] step:6441/10000 train_time:480178ms step_avg:74.55ms +[2025-09-02 14:34:10] [Rank 0] step:6461/10000 train_time:481745ms step_avg:74.56ms +[2025-09-02 14:34:10] [Rank 0] step:6461/10000 train_time:481745ms step_avg:74.56ms +[2025-09-02 14:34:11] [Rank 0] step:6481/10000 train_time:483317ms step_avg:74.57ms +[2025-09-02 14:34:11] [Rank 0] step:6481/10000 train_time:483317ms step_avg:74.57ms +[2025-09-02 14:34:13] [Rank 0] step:6501/10000 train_time:484877ms step_avg:74.58ms +[2025-09-02 14:34:13] [Rank 0] step:6501/10000 train_time:484877ms step_avg:74.58ms +[2025-09-02 14:34:14] [Rank 0] step:6521/10000 train_time:486438ms step_avg:74.60ms +[2025-09-02 14:34:14] [Rank 0] step:6521/10000 train_time:486438ms step_avg:74.60ms +[2025-09-02 14:34:16] [Rank 0] step:6541/10000 train_time:488002ms step_avg:74.61ms +[2025-09-02 14:34:16] [Rank 0] step:6541/10000 train_time:488002ms step_avg:74.61ms +[2025-09-02 14:34:18] [Rank 0] step:6561/10000 train_time:489571ms step_avg:74.62ms +[2025-09-02 14:34:18] [Rank 0] step:6561/10000 train_time:489571ms step_avg:74.62ms +[2025-09-02 14:34:19] [Rank 0] step:6581/10000 train_time:491135ms step_avg:74.63ms +[2025-09-02 14:34:19] [Rank 0] step:6581/10000 train_time:491135ms step_avg:74.63ms +[2025-09-02 14:34:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:34:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:34:33] [Rank 0] PRINT: step:6600/10000 val_loss:3.9605 svd_entropy: attn_qk:H=0.7511,top10E=0.27,eRank=170.0,q75/q25=91.13 attn_vo:H=0.7707,top10E=0.17,eRank=242.8,q75/q25=inf mlp_w1:H=0.7795,top10E=0.27,eRank=198.4,q75/q25=20.31 mlp_w2:H=0.8548,top10E=0.13,eRank=297.5,q75/q25=28.05 vo_prod:H=0.6454,top10E=0.25,eRank=106.3,q75/q25=inf train_time:492858ms step_avg:74.68ms +[2025-09-02 14:34:33] [Rank 0] PRINT: step:6600/10000 val_loss:3.9605 svd_entropy: attn_qk:H=0.7511,top10E=0.27,eRank=170.0,q75/q25=91.13 attn_vo:H=0.7707,top10E=0.17,eRank=242.8,q75/q25=inf mlp_w1:H=0.7795,top10E=0.27,eRank=198.4,q75/q25=20.31 mlp_w2:H=0.8548,top10E=0.13,eRank=297.5,q75/q25=28.05 vo_prod:H=0.6454,top10E=0.25,eRank=106.3,q75/q25=inf train_time:492858ms step_avg:74.68ms +[2025-09-02 14:34:33] [Rank 0] step:6601/10000 train_time:492870ms step_avg:74.67ms +[2025-09-02 14:34:33] [Rank 0] step:6601/10000 train_time:492870ms step_avg:74.67ms +[2025-09-02 14:34:34] [Rank 0] step:6621/10000 train_time:494280ms step_avg:74.65ms +[2025-09-02 14:34:34] [Rank 0] step:6621/10000 train_time:494280ms step_avg:74.65ms +[2025-09-02 14:34:36] [Rank 0] step:6641/10000 train_time:495849ms step_avg:74.66ms +[2025-09-02 14:34:36] [Rank 0] step:6641/10000 train_time:495849ms step_avg:74.66ms +[2025-09-02 14:34:37] [Rank 0] step:6661/10000 train_time:497415ms step_avg:74.68ms +[2025-09-02 14:34:37] [Rank 0] step:6661/10000 train_time:497415ms step_avg:74.68ms +[2025-09-02 14:34:39] [Rank 0] step:6681/10000 train_time:498995ms step_avg:74.69ms +[2025-09-02 14:34:39] [Rank 0] step:6681/10000 train_time:498995ms step_avg:74.69ms +[2025-09-02 14:34:41] [Rank 0] step:6701/10000 train_time:500596ms step_avg:74.70ms +[2025-09-02 14:34:41] [Rank 0] step:6701/10000 train_time:500596ms step_avg:74.70ms +[2025-09-02 14:34:42] [Rank 0] step:6721/10000 train_time:502190ms step_avg:74.72ms +[2025-09-02 14:34:42] [Rank 0] step:6721/10000 train_time:502190ms step_avg:74.72ms +[2025-09-02 14:34:44] [Rank 0] step:6741/10000 train_time:503778ms step_avg:74.73ms +[2025-09-02 14:34:44] [Rank 0] step:6741/10000 train_time:503778ms step_avg:74.73ms +[2025-09-02 14:34:45] [Rank 0] step:6761/10000 train_time:505372ms step_avg:74.75ms +[2025-09-02 14:34:45] [Rank 0] step:6761/10000 train_time:505372ms step_avg:74.75ms +[2025-09-02 14:34:47] [Rank 0] step:6781/10000 train_time:506970ms step_avg:74.76ms +[2025-09-02 14:34:47] [Rank 0] step:6781/10000 train_time:506970ms step_avg:74.76ms +[2025-09-02 14:34:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:34:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:35:00] [Rank 0] PRINT: step:6800/10000 val_loss:3.9440 svd_entropy: attn_qk:H=0.7526,top10E=0.27,eRank=171.3,q75/q25=91.21 attn_vo:H=0.7722,top10E=0.17,eRank=244.5,q75/q25=inf mlp_w1:H=0.7816,top10E=0.26,eRank=200.8,q75/q25=20.74 mlp_w2:H=0.8559,top10E=0.13,eRank=299.6,q75/q25=28.08 vo_prod:H=0.6473,top10E=0.25,eRank=107.7,q75/q25=inf train_time:508726ms step_avg:74.81ms +[2025-09-02 14:35:00] [Rank 0] PRINT: step:6800/10000 val_loss:3.9440 svd_entropy: attn_qk:H=0.7526,top10E=0.27,eRank=171.3,q75/q25=91.21 attn_vo:H=0.7722,top10E=0.17,eRank=244.5,q75/q25=inf mlp_w1:H=0.7816,top10E=0.26,eRank=200.8,q75/q25=20.74 mlp_w2:H=0.8559,top10E=0.13,eRank=299.6,q75/q25=28.08 vo_prod:H=0.6473,top10E=0.25,eRank=107.7,q75/q25=inf train_time:508726ms step_avg:74.81ms +[2025-09-02 14:35:01] [Rank 0] step:6801/10000 train_time:508738ms step_avg:74.80ms +[2025-09-02 14:35:01] [Rank 0] step:6801/10000 train_time:508738ms step_avg:74.80ms +[2025-09-02 14:35:02] [Rank 0] step:6821/10000 train_time:510190ms step_avg:74.80ms +[2025-09-02 14:35:02] [Rank 0] step:6821/10000 train_time:510190ms step_avg:74.80ms +[2025-09-02 14:35:04] [Rank 0] step:6841/10000 train_time:511776ms step_avg:74.81ms +[2025-09-02 14:35:04] [Rank 0] step:6841/10000 train_time:511776ms step_avg:74.81ms +[2025-09-02 14:35:05] [Rank 0] step:6861/10000 train_time:513369ms step_avg:74.82ms +[2025-09-02 14:35:05] [Rank 0] step:6861/10000 train_time:513369ms step_avg:74.82ms +[2025-09-02 14:35:07] [Rank 0] step:6881/10000 train_time:514962ms step_avg:74.84ms +[2025-09-02 14:35:07] [Rank 0] step:6881/10000 train_time:514962ms step_avg:74.84ms +[2025-09-02 14:35:08] [Rank 0] step:6901/10000 train_time:516556ms step_avg:74.85ms +[2025-09-02 14:35:08] [Rank 0] step:6901/10000 train_time:516556ms step_avg:74.85ms +[2025-09-02 14:35:10] [Rank 0] step:6921/10000 train_time:518146ms step_avg:74.87ms +[2025-09-02 14:35:10] [Rank 0] step:6921/10000 train_time:518146ms step_avg:74.87ms +[2025-09-02 14:35:12] [Rank 0] step:6941/10000 train_time:519744ms step_avg:74.88ms +[2025-09-02 14:35:12] [Rank 0] step:6941/10000 train_time:519744ms step_avg:74.88ms +[2025-09-02 14:35:13] [Rank 0] step:6961/10000 train_time:521352ms step_avg:74.90ms +[2025-09-02 14:35:13] [Rank 0] step:6961/10000 train_time:521352ms step_avg:74.90ms +[2025-09-02 14:35:15] [Rank 0] step:6981/10000 train_time:522950ms step_avg:74.91ms +[2025-09-02 14:35:15] [Rank 0] step:6981/10000 train_time:522950ms step_avg:74.91ms +[2025-09-02 14:35:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:35:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:35:28] [Rank 0] PRINT: step:7000/10000 val_loss:3.9278 svd_entropy: attn_qk:H=0.7540,top10E=0.26,eRank=172.5,q75/q25=91.24 attn_vo:H=0.7736,top10E=0.17,eRank=246.2,q75/q25=inf mlp_w1:H=0.7835,top10E=0.26,eRank=203.1,q75/q25=20.99 mlp_w2:H=0.8569,top10E=0.13,eRank=301.7,q75/q25=28.14 vo_prod:H=0.6491,top10E=0.25,eRank=109.1,q75/q25=inf train_time:524711ms step_avg:74.96ms +[2025-09-02 14:35:28] [Rank 0] PRINT: step:7000/10000 val_loss:3.9278 svd_entropy: attn_qk:H=0.7540,top10E=0.26,eRank=172.5,q75/q25=91.24 attn_vo:H=0.7736,top10E=0.17,eRank=246.2,q75/q25=inf mlp_w1:H=0.7835,top10E=0.26,eRank=203.1,q75/q25=20.99 mlp_w2:H=0.8569,top10E=0.13,eRank=301.7,q75/q25=28.14 vo_prod:H=0.6491,top10E=0.25,eRank=109.1,q75/q25=inf train_time:524711ms step_avg:74.96ms +[2025-09-02 14:35:28] [Rank 0] step:7001/10000 train_time:524723ms step_avg:74.95ms +[2025-09-02 14:35:28] [Rank 0] step:7001/10000 train_time:524723ms step_avg:74.95ms +[2025-09-02 14:35:30] [Rank 0] step:7021/10000 train_time:526177ms step_avg:74.94ms +[2025-09-02 14:35:30] [Rank 0] step:7021/10000 train_time:526177ms step_avg:74.94ms +[2025-09-02 14:35:32] [Rank 0] step:7041/10000 train_time:527770ms step_avg:74.96ms +[2025-09-02 14:35:32] [Rank 0] step:7041/10000 train_time:527770ms step_avg:74.96ms +[2025-09-02 14:35:33] [Rank 0] step:7061/10000 train_time:529363ms step_avg:74.97ms +[2025-09-02 14:35:33] [Rank 0] step:7061/10000 train_time:529363ms step_avg:74.97ms +[2025-09-02 14:35:35] [Rank 0] step:7081/10000 train_time:530959ms step_avg:74.98ms +[2025-09-02 14:35:35] [Rank 0] step:7081/10000 train_time:530959ms step_avg:74.98ms +[2025-09-02 14:35:36] [Rank 0] step:7101/10000 train_time:532552ms step_avg:75.00ms +[2025-09-02 14:35:36] [Rank 0] step:7101/10000 train_time:532552ms step_avg:75.00ms +[2025-09-02 14:35:38] [Rank 0] step:7121/10000 train_time:534146ms step_avg:75.01ms +[2025-09-02 14:35:38] [Rank 0] step:7121/10000 train_time:534146ms step_avg:75.01ms +[2025-09-02 14:35:40] [Rank 0] step:7141/10000 train_time:535741ms step_avg:75.02ms +[2025-09-02 14:35:40] [Rank 0] step:7141/10000 train_time:535741ms step_avg:75.02ms +[2025-09-02 14:35:41] [Rank 0] step:7161/10000 train_time:537339ms step_avg:75.04ms +[2025-09-02 14:35:41] [Rank 0] step:7161/10000 train_time:537339ms step_avg:75.04ms +[2025-09-02 14:35:43] [Rank 0] step:7181/10000 train_time:538938ms step_avg:75.05ms +[2025-09-02 14:35:43] [Rank 0] step:7181/10000 train_time:538938ms step_avg:75.05ms +[2025-09-02 14:35:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:35:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:35:56] [Rank 0] PRINT: step:7200/10000 val_loss:3.9170 svd_entropy: attn_qk:H=0.7553,top10E=0.26,eRank=173.7,q75/q25=91.72 attn_vo:H=0.7749,top10E=0.17,eRank=247.7,q75/q25=inf mlp_w1:H=0.7851,top10E=0.26,eRank=205.0,q75/q25=21.19 mlp_w2:H=0.8579,top10E=0.13,eRank=303.7,q75/q25=28.03 vo_prod:H=0.6508,top10E=0.25,eRank=110.3,q75/q25=inf train_time:540699ms step_avg:75.10ms +[2025-09-02 14:35:56] [Rank 0] PRINT: step:7200/10000 val_loss:3.9170 svd_entropy: attn_qk:H=0.7553,top10E=0.26,eRank=173.7,q75/q25=91.72 attn_vo:H=0.7749,top10E=0.17,eRank=247.7,q75/q25=inf mlp_w1:H=0.7851,top10E=0.26,eRank=205.0,q75/q25=21.19 mlp_w2:H=0.8579,top10E=0.13,eRank=303.7,q75/q25=28.03 vo_prod:H=0.6508,top10E=0.25,eRank=110.3,q75/q25=inf train_time:540699ms step_avg:75.10ms +[2025-09-02 14:35:56] [Rank 0] step:7201/10000 train_time:540711ms step_avg:75.09ms +[2025-09-02 14:35:56] [Rank 0] step:7201/10000 train_time:540711ms step_avg:75.09ms +[2025-09-02 14:35:58] [Rank 0] step:7221/10000 train_time:542160ms step_avg:75.08ms +[2025-09-02 14:35:58] [Rank 0] step:7221/10000 train_time:542160ms step_avg:75.08ms +[2025-09-02 14:36:00] [Rank 0] step:7241/10000 train_time:543748ms step_avg:75.09ms +[2025-09-02 14:36:00] [Rank 0] step:7241/10000 train_time:543748ms step_avg:75.09ms +[2025-09-02 14:36:01] [Rank 0] step:7261/10000 train_time:545395ms step_avg:75.11ms +[2025-09-02 14:36:01] [Rank 0] step:7261/10000 train_time:545395ms step_avg:75.11ms +[2025-09-02 14:36:03] [Rank 0] step:7281/10000 train_time:547001ms step_avg:75.13ms +[2025-09-02 14:36:03] [Rank 0] step:7281/10000 train_time:547001ms step_avg:75.13ms +[2025-09-02 14:36:04] [Rank 0] step:7301/10000 train_time:548597ms step_avg:75.14ms +[2025-09-02 14:36:04] [Rank 0] step:7301/10000 train_time:548597ms step_avg:75.14ms +[2025-09-02 14:36:06] [Rank 0] step:7321/10000 train_time:550201ms step_avg:75.15ms +[2025-09-02 14:36:06] [Rank 0] step:7321/10000 train_time:550201ms step_avg:75.15ms +[2025-09-02 14:36:08] [Rank 0] step:7341/10000 train_time:551800ms step_avg:75.17ms +[2025-09-02 14:36:08] [Rank 0] step:7341/10000 train_time:551800ms step_avg:75.17ms +[2025-09-02 14:36:09] [Rank 0] step:7361/10000 train_time:553399ms step_avg:75.18ms +[2025-09-02 14:36:09] [Rank 0] step:7361/10000 train_time:553399ms step_avg:75.18ms +[2025-09-02 14:36:11] [Rank 0] step:7381/10000 train_time:555003ms step_avg:75.19ms +[2025-09-02 14:36:11] [Rank 0] step:7381/10000 train_time:555003ms step_avg:75.19ms +[2025-09-02 14:36:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:36:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:36:24] [Rank 0] PRINT: step:7400/10000 val_loss:3.8993 svd_entropy: attn_qk:H=0.7564,top10E=0.26,eRank=174.7,q75/q25=91.62 attn_vo:H=0.7760,top10E=0.17,eRank=249.0,q75/q25=inf mlp_w1:H=0.7867,top10E=0.26,eRank=206.8,q75/q25=21.42 mlp_w2:H=0.8587,top10E=0.13,eRank=305.5,q75/q25=28.08 vo_prod:H=0.6521,top10E=0.25,eRank=111.4,q75/q25=inf train_time:556744ms step_avg:75.24ms +[2025-09-02 14:36:24] [Rank 0] PRINT: step:7400/10000 val_loss:3.8993 svd_entropy: attn_qk:H=0.7564,top10E=0.26,eRank=174.7,q75/q25=91.62 attn_vo:H=0.7760,top10E=0.17,eRank=249.0,q75/q25=inf mlp_w1:H=0.7867,top10E=0.26,eRank=206.8,q75/q25=21.42 mlp_w2:H=0.8587,top10E=0.13,eRank=305.5,q75/q25=28.08 vo_prod:H=0.6521,top10E=0.25,eRank=111.4,q75/q25=inf train_time:556744ms step_avg:75.24ms +[2025-09-02 14:36:24] [Rank 0] step:7401/10000 train_time:556756ms step_avg:75.23ms +[2025-09-02 14:36:24] [Rank 0] step:7401/10000 train_time:556756ms step_avg:75.23ms +[2025-09-02 14:36:26] [Rank 0] step:7421/10000 train_time:558212ms step_avg:75.22ms +[2025-09-02 14:36:26] [Rank 0] step:7421/10000 train_time:558212ms step_avg:75.22ms +[2025-09-02 14:36:28] [Rank 0] step:7441/10000 train_time:559805ms step_avg:75.23ms +[2025-09-02 14:36:28] [Rank 0] step:7441/10000 train_time:559805ms step_avg:75.23ms +[2025-09-02 14:36:29] [Rank 0] step:7461/10000 train_time:561401ms step_avg:75.24ms +[2025-09-02 14:36:29] [Rank 0] step:7461/10000 train_time:561401ms step_avg:75.24ms +[2025-09-02 14:36:31] [Rank 0] step:7481/10000 train_time:563001ms step_avg:75.26ms +[2025-09-02 14:36:31] [Rank 0] step:7481/10000 train_time:563001ms step_avg:75.26ms +[2025-09-02 14:36:32] [Rank 0] step:7501/10000 train_time:564603ms step_avg:75.27ms +[2025-09-02 14:36:32] [Rank 0] step:7501/10000 train_time:564603ms step_avg:75.27ms +[2025-09-02 14:36:34] [Rank 0] step:7521/10000 train_time:566201ms step_avg:75.28ms +[2025-09-02 14:36:34] [Rank 0] step:7521/10000 train_time:566201ms step_avg:75.28ms +[2025-09-02 14:36:36] [Rank 0] step:7541/10000 train_time:567809ms step_avg:75.30ms +[2025-09-02 14:36:36] [Rank 0] step:7541/10000 train_time:567809ms step_avg:75.30ms +[2025-09-02 14:36:37] [Rank 0] step:7561/10000 train_time:569396ms step_avg:75.31ms +[2025-09-02 14:36:37] [Rank 0] step:7561/10000 train_time:569396ms step_avg:75.31ms +[2025-09-02 14:36:39] [Rank 0] step:7581/10000 train_time:571001ms step_avg:75.32ms +[2025-09-02 14:36:39] [Rank 0] step:7581/10000 train_time:571001ms step_avg:75.32ms +[2025-09-02 14:36:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:36:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:36:52] [Rank 0] PRINT: step:7600/10000 val_loss:3.8980 svd_entropy: attn_qk:H=0.7575,top10E=0.26,eRank=175.6,q75/q25=91.48 attn_vo:H=0.7770,top10E=0.17,eRank=250.3,q75/q25=inf mlp_w1:H=0.7880,top10E=0.26,eRank=208.5,q75/q25=21.50 mlp_w2:H=0.8596,top10E=0.13,eRank=307.1,q75/q25=28.11 vo_prod:H=0.6536,top10E=0.24,eRank=112.6,q75/q25=inf train_time:572769ms step_avg:75.36ms +[2025-09-02 14:36:52] [Rank 0] PRINT: step:7600/10000 val_loss:3.8980 svd_entropy: attn_qk:H=0.7575,top10E=0.26,eRank=175.6,q75/q25=91.48 attn_vo:H=0.7770,top10E=0.17,eRank=250.3,q75/q25=inf mlp_w1:H=0.7880,top10E=0.26,eRank=208.5,q75/q25=21.50 mlp_w2:H=0.8596,top10E=0.13,eRank=307.1,q75/q25=28.11 vo_prod:H=0.6536,top10E=0.24,eRank=112.6,q75/q25=inf train_time:572769ms step_avg:75.36ms +[2025-09-02 14:36:52] [Rank 0] step:7601/10000 train_time:572780ms step_avg:75.36ms +[2025-09-02 14:36:52] [Rank 0] step:7601/10000 train_time:572780ms step_avg:75.36ms +[2025-09-02 14:36:54] [Rank 0] step:7621/10000 train_time:574246ms step_avg:75.35ms +[2025-09-02 14:36:54] [Rank 0] step:7621/10000 train_time:574246ms step_avg:75.35ms +[2025-09-02 14:36:56] [Rank 0] step:7641/10000 train_time:575841ms step_avg:75.36ms +[2025-09-02 14:36:56] [Rank 0] step:7641/10000 train_time:575841ms step_avg:75.36ms +[2025-09-02 14:36:57] [Rank 0] step:7661/10000 train_time:577441ms step_avg:75.37ms +[2025-09-02 14:36:57] [Rank 0] step:7661/10000 train_time:577441ms step_avg:75.37ms +[2025-09-02 14:36:59] [Rank 0] step:7681/10000 train_time:579031ms step_avg:75.38ms +[2025-09-02 14:36:59] [Rank 0] step:7681/10000 train_time:579031ms step_avg:75.38ms +[2025-09-02 14:37:00] [Rank 0] step:7701/10000 train_time:580624ms step_avg:75.40ms +[2025-09-02 14:37:00] [Rank 0] step:7701/10000 train_time:580624ms step_avg:75.40ms +[2025-09-02 14:37:02] [Rank 0] step:7721/10000 train_time:582235ms step_avg:75.41ms +[2025-09-02 14:37:02] [Rank 0] step:7721/10000 train_time:582235ms step_avg:75.41ms +[2025-09-02 14:37:04] [Rank 0] step:7741/10000 train_time:583834ms step_avg:75.42ms +[2025-09-02 14:37:04] [Rank 0] step:7741/10000 train_time:583834ms step_avg:75.42ms +[2025-09-02 14:37:05] [Rank 0] step:7761/10000 train_time:585438ms step_avg:75.43ms +[2025-09-02 14:37:05] [Rank 0] step:7761/10000 train_time:585438ms step_avg:75.43ms +[2025-09-02 14:37:07] [Rank 0] step:7781/10000 train_time:587042ms step_avg:75.45ms +[2025-09-02 14:37:07] [Rank 0] step:7781/10000 train_time:587042ms step_avg:75.45ms +[2025-09-02 14:37:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:37:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:37:20] [Rank 0] PRINT: step:7800/10000 val_loss:3.8810 svd_entropy: attn_qk:H=0.7584,top10E=0.26,eRank=176.5,q75/q25=91.48 attn_vo:H=0.7779,top10E=0.16,eRank=251.4,q75/q25=inf mlp_w1:H=0.7893,top10E=0.25,eRank=210.1,q75/q25=21.71 mlp_w2:H=0.8603,top10E=0.13,eRank=308.7,q75/q25=28.09 vo_prod:H=0.6548,top10E=0.24,eRank=113.5,q75/q25=inf train_time:588814ms step_avg:75.49ms +[2025-09-02 14:37:20] [Rank 0] PRINT: step:7800/10000 val_loss:3.8810 svd_entropy: attn_qk:H=0.7584,top10E=0.26,eRank=176.5,q75/q25=91.48 attn_vo:H=0.7779,top10E=0.16,eRank=251.4,q75/q25=inf mlp_w1:H=0.7893,top10E=0.25,eRank=210.1,q75/q25=21.71 mlp_w2:H=0.8603,top10E=0.13,eRank=308.7,q75/q25=28.09 vo_prod:H=0.6548,top10E=0.24,eRank=113.5,q75/q25=inf train_time:588814ms step_avg:75.49ms +[2025-09-02 14:37:20] [Rank 0] step:7801/10000 train_time:588825ms step_avg:75.48ms +[2025-09-02 14:37:20] [Rank 0] step:7801/10000 train_time:588825ms step_avg:75.48ms +[2025-09-02 14:37:22] [Rank 0] step:7821/10000 train_time:590275ms step_avg:75.47ms +[2025-09-02 14:37:22] [Rank 0] step:7821/10000 train_time:590275ms step_avg:75.47ms +[2025-09-02 14:37:24] [Rank 0] step:7841/10000 train_time:591871ms step_avg:75.48ms +[2025-09-02 14:37:24] [Rank 0] step:7841/10000 train_time:591871ms step_avg:75.48ms +[2025-09-02 14:37:25] [Rank 0] step:7861/10000 train_time:593477ms step_avg:75.50ms +[2025-09-02 14:37:25] [Rank 0] step:7861/10000 train_time:593477ms step_avg:75.50ms +[2025-09-02 14:37:27] [Rank 0] step:7881/10000 train_time:595089ms step_avg:75.51ms +[2025-09-02 14:37:27] [Rank 0] step:7881/10000 train_time:595089ms step_avg:75.51ms +[2025-09-02 14:37:28] [Rank 0] step:7901/10000 train_time:596687ms step_avg:75.52ms +[2025-09-02 14:37:28] [Rank 0] step:7901/10000 train_time:596687ms step_avg:75.52ms +[2025-09-02 14:37:30] [Rank 0] step:7921/10000 train_time:598288ms step_avg:75.53ms +[2025-09-02 14:37:30] [Rank 0] step:7921/10000 train_time:598288ms step_avg:75.53ms +[2025-09-02 14:37:32] [Rank 0] step:7941/10000 train_time:599904ms step_avg:75.55ms +[2025-09-02 14:37:32] [Rank 0] step:7941/10000 train_time:599904ms step_avg:75.55ms +[2025-09-02 14:37:33] [Rank 0] step:7961/10000 train_time:601512ms step_avg:75.56ms +[2025-09-02 14:37:33] [Rank 0] step:7961/10000 train_time:601512ms step_avg:75.56ms +[2025-09-02 14:37:35] [Rank 0] step:7981/10000 train_time:603110ms step_avg:75.57ms +[2025-09-02 14:37:35] [Rank 0] step:7981/10000 train_time:603110ms step_avg:75.57ms +[2025-09-02 14:37:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:37:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:37:48] [Rank 0] PRINT: step:8000/10000 val_loss:3.8660 svd_entropy: attn_qk:H=0.7593,top10E=0.26,eRank=177.3,q75/q25=91.38 attn_vo:H=0.7788,top10E=0.16,eRank=252.5,q75/q25=inf mlp_w1:H=0.7904,top10E=0.25,eRank=211.4,q75/q25=21.85 mlp_w2:H=0.8610,top10E=0.13,eRank=310.1,q75/q25=28.12 vo_prod:H=0.6560,top10E=0.24,eRank=114.5,q75/q25=inf train_time:604870ms step_avg:75.61ms +[2025-09-02 14:37:48] [Rank 0] PRINT: step:8000/10000 val_loss:3.8660 svd_entropy: attn_qk:H=0.7593,top10E=0.26,eRank=177.3,q75/q25=91.38 attn_vo:H=0.7788,top10E=0.16,eRank=252.5,q75/q25=inf mlp_w1:H=0.7904,top10E=0.25,eRank=211.4,q75/q25=21.85 mlp_w2:H=0.8610,top10E=0.13,eRank=310.1,q75/q25=28.12 vo_prod:H=0.6560,top10E=0.24,eRank=114.5,q75/q25=inf train_time:604870ms step_avg:75.61ms +[2025-09-02 14:37:48] [Rank 0] step:8001/10000 train_time:604881ms step_avg:75.60ms +[2025-09-02 14:37:48] [Rank 0] step:8001/10000 train_time:604881ms step_avg:75.60ms +[2025-09-02 14:37:50] [Rank 0] step:8021/10000 train_time:606327ms step_avg:75.59ms +[2025-09-02 14:37:50] [Rank 0] step:8021/10000 train_time:606327ms step_avg:75.59ms +[2025-09-02 14:37:52] [Rank 0] step:8041/10000 train_time:607938ms step_avg:75.60ms +[2025-09-02 14:37:52] [Rank 0] step:8041/10000 train_time:607938ms step_avg:75.60ms +[2025-09-02 14:37:53] [Rank 0] step:8061/10000 train_time:609536ms step_avg:75.62ms +[2025-09-02 14:37:53] [Rank 0] step:8061/10000 train_time:609536ms step_avg:75.62ms +[2025-09-02 14:37:55] [Rank 0] step:8081/10000 train_time:611127ms step_avg:75.63ms +[2025-09-02 14:37:55] [Rank 0] step:8081/10000 train_time:611127ms step_avg:75.63ms +[2025-09-02 14:37:56] [Rank 0] step:8101/10000 train_time:612739ms step_avg:75.64ms +[2025-09-02 14:37:56] [Rank 0] step:8101/10000 train_time:612739ms step_avg:75.64ms +[2025-09-02 14:37:58] [Rank 0] step:8121/10000 train_time:614336ms step_avg:75.65ms +[2025-09-02 14:37:58] [Rank 0] step:8121/10000 train_time:614336ms step_avg:75.65ms +[2025-09-02 14:38:00] [Rank 0] step:8141/10000 train_time:616039ms step_avg:75.67ms +[2025-09-02 14:38:00] [Rank 0] step:8141/10000 train_time:616039ms step_avg:75.67ms +[2025-09-02 14:38:01] [Rank 0] step:8161/10000 train_time:617752ms step_avg:75.70ms +[2025-09-02 14:38:01] [Rank 0] step:8161/10000 train_time:617752ms step_avg:75.70ms +[2025-09-02 14:38:03] [Rank 0] step:8181/10000 train_time:619386ms step_avg:75.71ms +[2025-09-02 14:38:03] [Rank 0] step:8181/10000 train_time:619386ms step_avg:75.71ms +[2025-09-02 14:38:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:38:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:38:17] [Rank 0] PRINT: step:8200/10000 val_loss:3.8565 svd_entropy: attn_qk:H=0.7601,top10E=0.26,eRank=178.0,q75/q25=91.14 attn_vo:H=0.7796,top10E=0.16,eRank=253.5,q75/q25=inf mlp_w1:H=0.7913,top10E=0.25,eRank=212.6,q75/q25=21.84 mlp_w2:H=0.8617,top10E=0.13,eRank=311.5,q75/q25=28.07 vo_prod:H=0.6572,top10E=0.24,eRank=115.4,q75/q25=inf train_time:621204ms step_avg:75.76ms +[2025-09-02 14:38:17] [Rank 0] PRINT: step:8200/10000 val_loss:3.8565 svd_entropy: attn_qk:H=0.7601,top10E=0.26,eRank=178.0,q75/q25=91.14 attn_vo:H=0.7796,top10E=0.16,eRank=253.5,q75/q25=inf mlp_w1:H=0.7913,top10E=0.25,eRank=212.6,q75/q25=21.84 mlp_w2:H=0.8617,top10E=0.13,eRank=311.5,q75/q25=28.07 vo_prod:H=0.6572,top10E=0.24,eRank=115.4,q75/q25=inf train_time:621204ms step_avg:75.76ms +[2025-09-02 14:38:17] [Rank 0] step:8201/10000 train_time:621216ms step_avg:75.75ms +[2025-09-02 14:38:17] [Rank 0] step:8201/10000 train_time:621216ms step_avg:75.75ms +[2025-09-02 14:38:18] [Rank 0] step:8221/10000 train_time:622712ms step_avg:75.75ms +[2025-09-02 14:38:18] [Rank 0] step:8221/10000 train_time:622712ms step_avg:75.75ms +[2025-09-02 14:38:20] [Rank 0] step:8241/10000 train_time:624348ms step_avg:75.76ms +[2025-09-02 14:38:20] [Rank 0] step:8241/10000 train_time:624348ms step_avg:75.76ms +[2025-09-02 14:38:22] [Rank 0] step:8261/10000 train_time:625977ms step_avg:75.78ms +[2025-09-02 14:38:22] [Rank 0] step:8261/10000 train_time:625977ms step_avg:75.78ms +[2025-09-02 14:38:23] [Rank 0] step:8281/10000 train_time:627606ms step_avg:75.79ms +[2025-09-02 14:38:23] [Rank 0] step:8281/10000 train_time:627606ms step_avg:75.79ms +[2025-09-02 14:38:25] [Rank 0] step:8301/10000 train_time:629229ms step_avg:75.80ms +[2025-09-02 14:38:25] [Rank 0] step:8301/10000 train_time:629229ms step_avg:75.80ms +[2025-09-02 14:38:26] [Rank 0] step:8321/10000 train_time:630848ms step_avg:75.81ms +[2025-09-02 14:38:26] [Rank 0] step:8321/10000 train_time:630848ms step_avg:75.81ms +[2025-09-02 14:38:28] [Rank 0] step:8341/10000 train_time:632480ms step_avg:75.83ms +[2025-09-02 14:38:28] [Rank 0] step:8341/10000 train_time:632480ms step_avg:75.83ms +[2025-09-02 14:38:30] [Rank 0] step:8361/10000 train_time:634106ms step_avg:75.84ms +[2025-09-02 14:38:30] [Rank 0] step:8361/10000 train_time:634106ms step_avg:75.84ms +[2025-09-02 14:38:31] [Rank 0] step:8381/10000 train_time:635733ms step_avg:75.85ms +[2025-09-02 14:38:31] [Rank 0] step:8381/10000 train_time:635733ms step_avg:75.85ms +[2025-09-02 14:38:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:38:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:38:45] [Rank 0] PRINT: step:8400/10000 val_loss:3.8466 svd_entropy: attn_qk:H=0.7607,top10E=0.26,eRank=178.6,q75/q25=91.28 attn_vo:H=0.7803,top10E=0.16,eRank=254.3,q75/q25=inf mlp_w1:H=0.7923,top10E=0.25,eRank=213.7,q75/q25=21.97 mlp_w2:H=0.8623,top10E=0.13,eRank=312.7,q75/q25=28.01 vo_prod:H=0.6581,top10E=0.24,eRank=116.2,q75/q25=inf train_time:637524ms step_avg:75.90ms +[2025-09-02 14:38:45] [Rank 0] PRINT: step:8400/10000 val_loss:3.8466 svd_entropy: attn_qk:H=0.7607,top10E=0.26,eRank=178.6,q75/q25=91.28 attn_vo:H=0.7803,top10E=0.16,eRank=254.3,q75/q25=inf mlp_w1:H=0.7923,top10E=0.25,eRank=213.7,q75/q25=21.97 mlp_w2:H=0.8623,top10E=0.13,eRank=312.7,q75/q25=28.01 vo_prod:H=0.6581,top10E=0.24,eRank=116.2,q75/q25=inf train_time:637524ms step_avg:75.90ms +[2025-09-02 14:38:45] [Rank 0] step:8401/10000 train_time:637535ms step_avg:75.89ms +[2025-09-02 14:38:45] [Rank 0] step:8401/10000 train_time:637535ms step_avg:75.89ms +[2025-09-02 14:38:47] [Rank 0] step:8421/10000 train_time:639003ms step_avg:75.88ms +[2025-09-02 14:38:47] [Rank 0] step:8421/10000 train_time:639003ms step_avg:75.88ms +[2025-09-02 14:38:48] [Rank 0] step:8441/10000 train_time:640624ms step_avg:75.89ms +[2025-09-02 14:38:48] [Rank 0] step:8441/10000 train_time:640624ms step_avg:75.89ms +[2025-09-02 14:38:50] [Rank 0] step:8461/10000 train_time:642239ms step_avg:75.91ms +[2025-09-02 14:38:50] [Rank 0] step:8461/10000 train_time:642239ms step_avg:75.91ms +[2025-09-02 14:38:52] [Rank 0] step:8481/10000 train_time:643872ms step_avg:75.92ms +[2025-09-02 14:38:52] [Rank 0] step:8481/10000 train_time:643872ms step_avg:75.92ms +[2025-09-02 14:38:53] [Rank 0] step:8501/10000 train_time:645520ms step_avg:75.93ms +[2025-09-02 14:38:53] [Rank 0] step:8501/10000 train_time:645520ms step_avg:75.93ms +[2025-09-02 14:38:55] [Rank 0] step:8521/10000 train_time:647153ms step_avg:75.95ms +[2025-09-02 14:38:55] [Rank 0] step:8521/10000 train_time:647153ms step_avg:75.95ms +[2025-09-02 14:38:56] [Rank 0] step:8541/10000 train_time:648790ms step_avg:75.96ms +[2025-09-02 14:38:56] [Rank 0] step:8541/10000 train_time:648790ms step_avg:75.96ms +[2025-09-02 14:38:58] [Rank 0] step:8561/10000 train_time:650420ms step_avg:75.97ms +[2025-09-02 14:38:58] [Rank 0] step:8561/10000 train_time:650420ms step_avg:75.97ms +[2025-09-02 14:39:00] [Rank 0] step:8581/10000 train_time:652049ms step_avg:75.99ms +[2025-09-02 14:39:00] [Rank 0] step:8581/10000 train_time:652049ms step_avg:75.99ms +[2025-09-02 14:39:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:39:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:39:13] [Rank 0] PRINT: step:8600/10000 val_loss:3.8361 svd_entropy: attn_qk:H=0.7614,top10E=0.26,eRank=179.2,q75/q25=91.06 attn_vo:H=0.7809,top10E=0.16,eRank=255.0,q75/q25=inf mlp_w1:H=0.7931,top10E=0.25,eRank=214.7,q75/q25=22.02 mlp_w2:H=0.8628,top10E=0.12,eRank=313.8,q75/q25=27.85 vo_prod:H=0.6590,top10E=0.24,eRank=117.0,q75/q25=inf train_time:653833ms step_avg:76.03ms +[2025-09-02 14:39:13] [Rank 0] PRINT: step:8600/10000 val_loss:3.8361 svd_entropy: attn_qk:H=0.7614,top10E=0.26,eRank=179.2,q75/q25=91.06 attn_vo:H=0.7809,top10E=0.16,eRank=255.0,q75/q25=inf mlp_w1:H=0.7931,top10E=0.25,eRank=214.7,q75/q25=22.02 mlp_w2:H=0.8628,top10E=0.12,eRank=313.8,q75/q25=27.85 vo_prod:H=0.6590,top10E=0.24,eRank=117.0,q75/q25=inf train_time:653833ms step_avg:76.03ms +[2025-09-02 14:39:13] [Rank 0] step:8601/10000 train_time:653844ms step_avg:76.02ms +[2025-09-02 14:39:13] [Rank 0] step:8601/10000 train_time:653844ms step_avg:76.02ms +[2025-09-02 14:39:15] [Rank 0] step:8621/10000 train_time:655341ms step_avg:76.02ms +[2025-09-02 14:39:15] [Rank 0] step:8621/10000 train_time:655341ms step_avg:76.02ms +[2025-09-02 14:39:16] [Rank 0] step:8641/10000 train_time:656968ms step_avg:76.03ms +[2025-09-02 14:39:16] [Rank 0] step:8641/10000 train_time:656968ms step_avg:76.03ms +[2025-09-02 14:39:18] [Rank 0] step:8661/10000 train_time:658597ms step_avg:76.04ms +[2025-09-02 14:39:18] [Rank 0] step:8661/10000 train_time:658597ms step_avg:76.04ms +[2025-09-02 14:39:20] [Rank 0] step:8681/10000 train_time:660213ms step_avg:76.05ms +[2025-09-02 14:39:20] [Rank 0] step:8681/10000 train_time:660213ms step_avg:76.05ms +[2025-09-02 14:39:21] [Rank 0] step:8701/10000 train_time:661831ms step_avg:76.06ms +[2025-09-02 14:39:21] [Rank 0] step:8701/10000 train_time:661831ms step_avg:76.06ms +[2025-09-02 14:39:23] [Rank 0] step:8721/10000 train_time:663461ms step_avg:76.08ms +[2025-09-02 14:39:23] [Rank 0] step:8721/10000 train_time:663461ms step_avg:76.08ms +[2025-09-02 14:39:24] [Rank 0] step:8741/10000 train_time:665077ms step_avg:76.09ms +[2025-09-02 14:39:24] [Rank 0] step:8741/10000 train_time:665077ms step_avg:76.09ms +[2025-09-02 14:39:26] [Rank 0] step:8761/10000 train_time:666699ms step_avg:76.10ms +[2025-09-02 14:39:26] [Rank 0] step:8761/10000 train_time:666699ms step_avg:76.10ms +[2025-09-02 14:39:28] [Rank 0] step:8781/10000 train_time:668335ms step_avg:76.11ms +[2025-09-02 14:39:28] [Rank 0] step:8781/10000 train_time:668335ms step_avg:76.11ms +[2025-09-02 14:39:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:39:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:39:41] [Rank 0] PRINT: step:8800/10000 val_loss:3.8271 svd_entropy: attn_qk:H=0.7619,top10E=0.25,eRank=179.7,q75/q25=91.12 attn_vo:H=0.7814,top10E=0.16,eRank=255.8,q75/q25=inf mlp_w1:H=0.7937,top10E=0.25,eRank=215.5,q75/q25=22.02 mlp_w2:H=0.8633,top10E=0.12,eRank=314.9,q75/q25=27.80 vo_prod:H=0.6599,top10E=0.24,eRank=117.7,q75/q25=inf train_time:670127ms step_avg:76.15ms +[2025-09-02 14:39:41] [Rank 0] PRINT: step:8800/10000 val_loss:3.8271 svd_entropy: attn_qk:H=0.7619,top10E=0.25,eRank=179.7,q75/q25=91.12 attn_vo:H=0.7814,top10E=0.16,eRank=255.8,q75/q25=inf mlp_w1:H=0.7937,top10E=0.25,eRank=215.5,q75/q25=22.02 mlp_w2:H=0.8633,top10E=0.12,eRank=314.9,q75/q25=27.80 vo_prod:H=0.6599,top10E=0.24,eRank=117.7,q75/q25=inf train_time:670127ms step_avg:76.15ms +[2025-09-02 14:39:41] [Rank 0] step:8801/10000 train_time:670138ms step_avg:76.14ms +[2025-09-02 14:39:41] [Rank 0] step:8801/10000 train_time:670138ms step_avg:76.14ms +[2025-09-02 14:39:43] [Rank 0] step:8821/10000 train_time:671601ms step_avg:76.14ms +[2025-09-02 14:39:43] [Rank 0] step:8821/10000 train_time:671601ms step_avg:76.14ms +[2025-09-02 14:39:44] [Rank 0] step:8841/10000 train_time:673248ms step_avg:76.15ms +[2025-09-02 14:39:44] [Rank 0] step:8841/10000 train_time:673248ms step_avg:76.15ms +[2025-09-02 14:39:46] [Rank 0] step:8861/10000 train_time:674872ms step_avg:76.16ms +[2025-09-02 14:39:46] [Rank 0] step:8861/10000 train_time:674872ms step_avg:76.16ms +[2025-09-02 14:39:48] [Rank 0] step:8881/10000 train_time:676499ms step_avg:76.17ms +[2025-09-02 14:39:48] [Rank 0] step:8881/10000 train_time:676499ms step_avg:76.17ms +[2025-09-02 14:39:49] [Rank 0] step:8901/10000 train_time:678129ms step_avg:76.19ms +[2025-09-02 14:39:49] [Rank 0] step:8901/10000 train_time:678129ms step_avg:76.19ms +[2025-09-02 14:39:51] [Rank 0] step:8921/10000 train_time:679766ms step_avg:76.20ms +[2025-09-02 14:39:51] [Rank 0] step:8921/10000 train_time:679766ms step_avg:76.20ms +[2025-09-02 14:39:53] [Rank 0] step:8941/10000 train_time:681405ms step_avg:76.21ms +[2025-09-02 14:39:53] [Rank 0] step:8941/10000 train_time:681405ms step_avg:76.21ms +[2025-09-02 14:39:54] [Rank 0] step:8961/10000 train_time:683029ms step_avg:76.22ms +[2025-09-02 14:39:54] [Rank 0] step:8961/10000 train_time:683029ms step_avg:76.22ms +[2025-09-02 14:39:56] [Rank 0] step:8981/10000 train_time:684653ms step_avg:76.23ms +[2025-09-02 14:39:56] [Rank 0] step:8981/10000 train_time:684653ms step_avg:76.23ms +[2025-09-02 14:39:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:39:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:40:09] [Rank 0] PRINT: step:9000/10000 val_loss:3.8184 svd_entropy: attn_qk:H=0.7623,top10E=0.25,eRank=180.1,q75/q25=91.22 attn_vo:H=0.7819,top10E=0.16,eRank=256.4,q75/q25=inf mlp_w1:H=0.7943,top10E=0.25,eRank=216.3,q75/q25=21.98 mlp_w2:H=0.8637,top10E=0.12,eRank=315.8,q75/q25=27.72 vo_prod:H=0.6606,top10E=0.24,eRank=118.3,q75/q25=inf train_time:686443ms step_avg:76.27ms +[2025-09-02 14:40:09] [Rank 0] PRINT: step:9000/10000 val_loss:3.8184 svd_entropy: attn_qk:H=0.7623,top10E=0.25,eRank=180.1,q75/q25=91.22 attn_vo:H=0.7819,top10E=0.16,eRank=256.4,q75/q25=inf mlp_w1:H=0.7943,top10E=0.25,eRank=216.3,q75/q25=21.98 mlp_w2:H=0.8637,top10E=0.12,eRank=315.8,q75/q25=27.72 vo_prod:H=0.6606,top10E=0.24,eRank=118.3,q75/q25=inf train_time:686443ms step_avg:76.27ms +[2025-09-02 14:40:09] [Rank 0] step:9001/10000 train_time:686454ms step_avg:76.26ms +[2025-09-02 14:40:09] [Rank 0] step:9001/10000 train_time:686454ms step_avg:76.26ms +[2025-09-02 14:40:11] [Rank 0] step:9021/10000 train_time:687921ms step_avg:76.26ms +[2025-09-02 14:40:11] [Rank 0] step:9021/10000 train_time:687921ms step_avg:76.26ms +[2025-09-02 14:40:13] [Rank 0] step:9041/10000 train_time:689542ms step_avg:76.27ms +[2025-09-02 14:40:13] [Rank 0] step:9041/10000 train_time:689542ms step_avg:76.27ms +[2025-09-02 14:40:14] [Rank 0] step:9061/10000 train_time:691267ms step_avg:76.29ms +[2025-09-02 14:40:14] [Rank 0] step:9061/10000 train_time:691267ms step_avg:76.29ms +[2025-09-02 14:40:16] [Rank 0] step:9081/10000 train_time:692905ms step_avg:76.30ms +[2025-09-02 14:40:16] [Rank 0] step:9081/10000 train_time:692905ms step_avg:76.30ms +[2025-09-02 14:40:18] [Rank 0] step:9101/10000 train_time:694553ms step_avg:76.32ms +[2025-09-02 14:40:18] [Rank 0] step:9101/10000 train_time:694553ms step_avg:76.32ms +[2025-09-02 14:40:19] [Rank 0] step:9121/10000 train_time:696188ms step_avg:76.33ms +[2025-09-02 14:40:19] [Rank 0] step:9121/10000 train_time:696188ms step_avg:76.33ms +[2025-09-02 14:40:21] [Rank 0] step:9141/10000 train_time:697810ms step_avg:76.34ms +[2025-09-02 14:40:21] [Rank 0] step:9141/10000 train_time:697810ms step_avg:76.34ms +[2025-09-02 14:40:22] [Rank 0] step:9161/10000 train_time:699434ms step_avg:76.35ms +[2025-09-02 14:40:22] [Rank 0] step:9161/10000 train_time:699434ms step_avg:76.35ms +[2025-09-02 14:40:24] [Rank 0] step:9181/10000 train_time:701097ms step_avg:76.36ms +[2025-09-02 14:40:24] [Rank 0] step:9181/10000 train_time:701097ms step_avg:76.36ms +[2025-09-02 14:40:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:40:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:40:38] [Rank 0] PRINT: step:9200/10000 val_loss:3.8108 svd_entropy: attn_qk:H=0.7628,top10E=0.25,eRank=180.5,q75/q25=90.67 attn_vo:H=0.7823,top10E=0.16,eRank=256.9,q75/q25=inf mlp_w1:H=0.7949,top10E=0.25,eRank=216.9,q75/q25=22.02 mlp_w2:H=0.8641,top10E=0.12,eRank=316.6,q75/q25=27.65 vo_prod:H=0.6612,top10E=0.23,eRank=118.8,q75/q25=inf train_time:702894ms step_avg:76.40ms +[2025-09-02 14:40:38] [Rank 0] PRINT: step:9200/10000 val_loss:3.8108 svd_entropy: attn_qk:H=0.7628,top10E=0.25,eRank=180.5,q75/q25=90.67 attn_vo:H=0.7823,top10E=0.16,eRank=256.9,q75/q25=inf mlp_w1:H=0.7949,top10E=0.25,eRank=216.9,q75/q25=22.02 mlp_w2:H=0.8641,top10E=0.12,eRank=316.6,q75/q25=27.65 vo_prod:H=0.6612,top10E=0.23,eRank=118.8,q75/q25=inf train_time:702894ms step_avg:76.40ms +[2025-09-02 14:40:38] [Rank 0] step:9201/10000 train_time:702905ms step_avg:76.39ms +[2025-09-02 14:40:38] [Rank 0] step:9201/10000 train_time:702905ms step_avg:76.39ms +[2025-09-02 14:40:39] [Rank 0] step:9221/10000 train_time:704390ms step_avg:76.39ms +[2025-09-02 14:40:39] [Rank 0] step:9221/10000 train_time:704390ms step_avg:76.39ms +[2025-09-02 14:40:41] [Rank 0] step:9241/10000 train_time:706032ms step_avg:76.40ms +[2025-09-02 14:40:41] [Rank 0] step:9241/10000 train_time:706032ms step_avg:76.40ms +[2025-09-02 14:40:43] [Rank 0] step:9261/10000 train_time:707672ms step_avg:76.41ms +[2025-09-02 14:40:43] [Rank 0] step:9261/10000 train_time:707672ms step_avg:76.41ms +[2025-09-02 14:40:44] [Rank 0] step:9281/10000 train_time:709291ms step_avg:76.42ms +[2025-09-02 14:40:44] [Rank 0] step:9281/10000 train_time:709291ms step_avg:76.42ms +[2025-09-02 14:40:46] [Rank 0] step:9301/10000 train_time:710923ms step_avg:76.44ms +[2025-09-02 14:40:46] [Rank 0] step:9301/10000 train_time:710923ms step_avg:76.44ms +[2025-09-02 14:40:48] [Rank 0] step:9321/10000 train_time:712555ms step_avg:76.45ms +[2025-09-02 14:40:48] [Rank 0] step:9321/10000 train_time:712555ms step_avg:76.45ms +[2025-09-02 14:40:49] [Rank 0] step:9341/10000 train_time:714190ms step_avg:76.46ms +[2025-09-02 14:40:49] [Rank 0] step:9341/10000 train_time:714190ms step_avg:76.46ms +[2025-09-02 14:40:51] [Rank 0] step:9361/10000 train_time:715825ms step_avg:76.47ms +[2025-09-02 14:40:51] [Rank 0] step:9361/10000 train_time:715825ms step_avg:76.47ms +[2025-09-02 14:40:52] [Rank 0] step:9381/10000 train_time:717471ms step_avg:76.48ms +[2025-09-02 14:40:52] [Rank 0] step:9381/10000 train_time:717471ms step_avg:76.48ms +[2025-09-02 14:40:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:40:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:41:06] [Rank 0] PRINT: step:9400/10000 val_loss:3.8041 svd_entropy: attn_qk:H=0.7631,top10E=0.25,eRank=180.8,q75/q25=90.95 attn_vo:H=0.7827,top10E=0.16,eRank=257.3,q75/q25=inf mlp_w1:H=0.7953,top10E=0.25,eRank=217.5,q75/q25=22.02 mlp_w2:H=0.8645,top10E=0.12,eRank=317.3,q75/q25=27.58 vo_prod:H=0.6618,top10E=0.23,eRank=119.3,q75/q25=inf train_time:719275ms step_avg:76.52ms +[2025-09-02 14:41:06] [Rank 0] PRINT: step:9400/10000 val_loss:3.8041 svd_entropy: attn_qk:H=0.7631,top10E=0.25,eRank=180.8,q75/q25=90.95 attn_vo:H=0.7827,top10E=0.16,eRank=257.3,q75/q25=inf mlp_w1:H=0.7953,top10E=0.25,eRank=217.5,q75/q25=22.02 mlp_w2:H=0.8645,top10E=0.12,eRank=317.3,q75/q25=27.58 vo_prod:H=0.6618,top10E=0.23,eRank=119.3,q75/q25=inf train_time:719275ms step_avg:76.52ms +[2025-09-02 14:41:06] [Rank 0] step:9401/10000 train_time:719286ms step_avg:76.51ms +[2025-09-02 14:41:06] [Rank 0] step:9401/10000 train_time:719286ms step_avg:76.51ms +[2025-09-02 14:41:08] [Rank 0] step:9421/10000 train_time:720767ms step_avg:76.51ms +[2025-09-02 14:41:08] [Rank 0] step:9421/10000 train_time:720767ms step_avg:76.51ms +[2025-09-02 14:41:09] [Rank 0] step:9441/10000 train_time:722397ms step_avg:76.52ms +[2025-09-02 14:41:09] [Rank 0] step:9441/10000 train_time:722397ms step_avg:76.52ms +[2025-09-02 14:41:11] [Rank 0] step:9461/10000 train_time:724031ms step_avg:76.53ms +[2025-09-02 14:41:11] [Rank 0] step:9461/10000 train_time:724031ms step_avg:76.53ms +[2025-09-02 14:41:12] [Rank 0] step:9481/10000 train_time:725664ms step_avg:76.54ms +[2025-09-02 14:41:12] [Rank 0] step:9481/10000 train_time:725664ms step_avg:76.54ms +[2025-09-02 14:41:14] [Rank 0] step:9501/10000 train_time:727308ms step_avg:76.55ms +[2025-09-02 14:41:14] [Rank 0] step:9501/10000 train_time:727308ms step_avg:76.55ms +[2025-09-02 14:41:16] [Rank 0] step:9521/10000 train_time:728932ms step_avg:76.56ms +[2025-09-02 14:41:16] [Rank 0] step:9521/10000 train_time:728932ms step_avg:76.56ms +[2025-09-02 14:41:17] [Rank 0] step:9541/10000 train_time:730560ms step_avg:76.57ms +[2025-09-02 14:41:17] [Rank 0] step:9541/10000 train_time:730560ms step_avg:76.57ms +[2025-09-02 14:41:19] [Rank 0] step:9561/10000 train_time:732186ms step_avg:76.58ms +[2025-09-02 14:41:19] [Rank 0] step:9561/10000 train_time:732186ms step_avg:76.58ms +[2025-09-02 14:41:21] [Rank 0] step:9581/10000 train_time:733815ms step_avg:76.59ms +[2025-09-02 14:41:21] [Rank 0] step:9581/10000 train_time:733815ms step_avg:76.59ms +[2025-09-02 14:41:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:41:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:41:34] [Rank 0] PRINT: step:9600/10000 val_loss:3.7982 svd_entropy: attn_qk:H=0.7634,top10E=0.25,eRank=181.1,q75/q25=91.02 attn_vo:H=0.7829,top10E=0.16,eRank=257.7,q75/q25=inf mlp_w1:H=0.7956,top10E=0.25,eRank=217.9,q75/q25=22.04 mlp_w2:H=0.8648,top10E=0.12,eRank=318.0,q75/q25=27.56 vo_prod:H=0.6623,top10E=0.23,eRank=119.7,q75/q25=inf train_time:735617ms step_avg:76.63ms +[2025-09-02 14:41:34] [Rank 0] PRINT: step:9600/10000 val_loss:3.7982 svd_entropy: attn_qk:H=0.7634,top10E=0.25,eRank=181.1,q75/q25=91.02 attn_vo:H=0.7829,top10E=0.16,eRank=257.7,q75/q25=inf mlp_w1:H=0.7956,top10E=0.25,eRank=217.9,q75/q25=22.04 mlp_w2:H=0.8648,top10E=0.12,eRank=318.0,q75/q25=27.56 vo_prod:H=0.6623,top10E=0.23,eRank=119.7,q75/q25=inf train_time:735617ms step_avg:76.63ms +[2025-09-02 14:41:34] [Rank 0] step:9601/10000 train_time:735629ms step_avg:76.62ms +[2025-09-02 14:41:34] [Rank 0] step:9601/10000 train_time:735629ms step_avg:76.62ms +[2025-09-02 14:41:36] [Rank 0] step:9621/10000 train_time:737098ms step_avg:76.61ms +[2025-09-02 14:41:36] [Rank 0] step:9621/10000 train_time:737098ms step_avg:76.61ms +[2025-09-02 14:41:37] [Rank 0] step:9641/10000 train_time:738732ms step_avg:76.62ms +[2025-09-02 14:41:37] [Rank 0] step:9641/10000 train_time:738732ms step_avg:76.62ms +[2025-09-02 14:41:39] [Rank 0] step:9661/10000 train_time:740390ms step_avg:76.64ms +[2025-09-02 14:41:39] [Rank 0] step:9661/10000 train_time:740390ms step_avg:76.64ms +[2025-09-02 14:41:41] [Rank 0] step:9681/10000 train_time:742042ms step_avg:76.65ms +[2025-09-02 14:41:41] [Rank 0] step:9681/10000 train_time:742042ms step_avg:76.65ms +[2025-09-02 14:41:42] [Rank 0] step:9701/10000 train_time:743816ms step_avg:76.67ms +[2025-09-02 14:41:42] [Rank 0] step:9701/10000 train_time:743816ms step_avg:76.67ms +[2025-09-02 14:41:44] [Rank 0] step:9721/10000 train_time:745463ms step_avg:76.69ms +[2025-09-02 14:41:44] [Rank 0] step:9721/10000 train_time:745463ms step_avg:76.69ms +[2025-09-02 14:41:46] [Rank 0] step:9741/10000 train_time:747139ms step_avg:76.70ms +[2025-09-02 14:41:46] [Rank 0] step:9741/10000 train_time:747139ms step_avg:76.70ms +[2025-09-02 14:41:47] [Rank 0] step:9761/10000 train_time:748793ms step_avg:76.71ms +[2025-09-02 14:41:47] [Rank 0] step:9761/10000 train_time:748793ms step_avg:76.71ms +[2025-09-02 14:41:49] [Rank 0] step:9781/10000 train_time:750462ms step_avg:76.73ms +[2025-09-02 14:41:49] [Rank 0] step:9781/10000 train_time:750462ms step_avg:76.73ms +[2025-09-02 14:41:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:41:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:42:03] [Rank 0] PRINT: step:9800/10000 val_loss:3.7929 svd_entropy: attn_qk:H=0.7636,top10E=0.25,eRank=181.3,q75/q25=90.81 attn_vo:H=0.7832,top10E=0.16,eRank=258.0,q75/q25=inf mlp_w1:H=0.7959,top10E=0.25,eRank=218.2,q75/q25=22.03 mlp_w2:H=0.8650,top10E=0.12,eRank=318.4,q75/q25=27.47 vo_prod:H=0.6627,top10E=0.23,eRank=120.0,q75/q25=inf train_time:752299ms step_avg:76.77ms +[2025-09-02 14:42:03] [Rank 0] PRINT: step:9800/10000 val_loss:3.7929 svd_entropy: attn_qk:H=0.7636,top10E=0.25,eRank=181.3,q75/q25=90.81 attn_vo:H=0.7832,top10E=0.16,eRank=258.0,q75/q25=inf mlp_w1:H=0.7959,top10E=0.25,eRank=218.2,q75/q25=22.03 mlp_w2:H=0.8650,top10E=0.12,eRank=318.4,q75/q25=27.47 vo_prod:H=0.6627,top10E=0.23,eRank=120.0,q75/q25=inf train_time:752299ms step_avg:76.77ms +[2025-09-02 14:42:03] [Rank 0] step:9801/10000 train_time:752310ms step_avg:76.76ms +[2025-09-02 14:42:03] [Rank 0] step:9801/10000 train_time:752310ms step_avg:76.76ms +[2025-09-02 14:42:04] [Rank 0] step:9821/10000 train_time:753799ms step_avg:76.75ms +[2025-09-02 14:42:04] [Rank 0] step:9821/10000 train_time:753799ms step_avg:76.75ms +[2025-09-02 14:42:06] [Rank 0] step:9841/10000 train_time:755469ms step_avg:76.77ms +[2025-09-02 14:42:06] [Rank 0] step:9841/10000 train_time:755469ms step_avg:76.77ms +[2025-09-02 14:42:08] [Rank 0] step:9861/10000 train_time:757115ms step_avg:76.78ms +[2025-09-02 14:42:08] [Rank 0] step:9861/10000 train_time:757115ms step_avg:76.78ms +[2025-09-02 14:42:09] [Rank 0] step:9881/10000 train_time:758760ms step_avg:76.79ms +[2025-09-02 14:42:09] [Rank 0] step:9881/10000 train_time:758760ms step_avg:76.79ms +[2025-09-02 14:42:11] [Rank 0] step:9901/10000 train_time:760421ms step_avg:76.80ms +[2025-09-02 14:42:11] [Rank 0] step:9901/10000 train_time:760421ms step_avg:76.80ms +[2025-09-02 14:42:13] [Rank 0] step:9921/10000 train_time:762075ms step_avg:76.81ms +[2025-09-02 14:42:13] [Rank 0] step:9921/10000 train_time:762075ms step_avg:76.81ms +[2025-09-02 14:42:14] [Rank 0] step:9941/10000 train_time:763737ms step_avg:76.83ms +[2025-09-02 14:42:14] [Rank 0] step:9941/10000 train_time:763737ms step_avg:76.83ms +[2025-09-02 14:42:16] [Rank 0] step:9961/10000 train_time:765391ms step_avg:76.84ms +[2025-09-02 14:42:16] [Rank 0] step:9961/10000 train_time:765391ms step_avg:76.84ms +[2025-09-02 14:42:18] [Rank 0] step:9981/10000 train_time:767044ms step_avg:76.85ms +[2025-09-02 14:42:18] [Rank 0] step:9981/10000 train_time:767044ms step_avg:76.85ms +[2025-09-02 14:42:19] [Rank 0] step:10000/10000 train_time:768623ms step_avg:76.86ms +[2025-09-02 14:42:19] [Rank 0] step:10000/10000 train_time:768623ms step_avg:76.86ms +[2025-09-02 14:42:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:42:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:42:31] [Rank 0] PRINT: step:10000/10000 val_loss:3.7869 svd_entropy: attn_qk:H=0.7638,top10E=0.25,eRank=181.5,q75/q25=90.65 attn_vo:H=0.7833,top10E=0.16,eRank=258.2,q75/q25=inf mlp_w1:H=0.7961,top10E=0.25,eRank=218.5,q75/q25=22.04 mlp_w2:H=0.8652,top10E=0.12,eRank=318.8,q75/q25=27.45 vo_prod:H=0.6629,top10E=0.23,eRank=120.3,q75/q25=inf train_time:768878ms step_avg:76.89ms +[2025-09-02 14:42:31] [Rank 0] PRINT: step:10000/10000 val_loss:3.7869 svd_entropy: attn_qk:H=0.7638,top10E=0.25,eRank=181.5,q75/q25=90.65 attn_vo:H=0.7833,top10E=0.16,eRank=258.2,q75/q25=inf mlp_w1:H=0.7961,top10E=0.25,eRank=218.5,q75/q25=22.04 mlp_w2:H=0.8652,top10E=0.12,eRank=318.8,q75/q25=27.45 vo_prod:H=0.6629,top10E=0.23,eRank=120.3,q75/q25=inf train_time:768878ms step_avg:76.89ms +[2025-09-02 14:42:31] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 14:42:31 2025 --- +[2025-09-02 14:42:31] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 14:42:31 2025 --- +[2025-09-02 14:42:31] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14416 MiB +[2025-09-02 14:42:31] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14416 MiB diff --git a/logs_svd_qkvo/mode_14_param_qkvo_seed_47/config.json b/logs_svd_qkvo/mode_14_param_qkvo_seed_47/config.json new file mode 100644 index 0000000000000000000000000000000000000000..77383ef98d55c3091eeee77f86712e79c3bc560c --- /dev/null +++ b/logs_svd_qkvo/mode_14_param_qkvo_seed_47/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 47, + "optimizer_mode": 14, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "fc76ff81-0005-4d24-be19-7ef7aeb19591", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_14_param_qkvo_seed_47/training_log_fc76ff81-0005-4d24-be19-7ef7aeb19591.txt b/logs_svd_qkvo/mode_14_param_qkvo_seed_47/training_log_fc76ff81-0005-4d24-be19-7ef7aeb19591.txt new file mode 100644 index 0000000000000000000000000000000000000000..0da0915137c1c670e491773d42aa1dc95e6badc9 --- /dev/null +++ b/logs_svd_qkvo/mode_14_param_qkvo_seed_47/training_log_fc76ff81-0005-4d24-be19-7ef7aeb19591.txt @@ -0,0 +1,2984 @@ +[2025-09-02 15:36:10] [Rank 0] PRINT: --- Script Start: Tue Sep 2 15:36:10 2025 --- +[2025-09-02 15:36:10] [Rank 0] PRINT: --- Script Start: Tue Sep 2 15:36:10 2025 --- +[2025-09-02 15:36:10] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=14, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 15:36:10] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=14, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 15:36:10] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 15:36:10] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 15:36:10] [Rank 0] PRINT: Using fixed seed: 47 +[2025-09-02 15:36:10] [Rank 0] PRINT: Using fixed seed: 47 +[2025-09-02 15:36:10] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_14_param_qkvo_seed_47 +[2025-09-02 15:36:10] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_14_param_qkvo_seed_47 +[2025-09-02 15:36:10] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 15:36:10] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 15:36:10] [Rank 0] PRINT: Constructing model... +[2025-09-02 15:36:10] [Rank 0] PRINT: Constructing model... +[2025-09-02 15:36:11] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 15:36:11] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 15:36:11] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 15:36:11] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 15:36:11] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 15:36:11] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 15:36:11] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 14 +[2025-09-02 15:36:11] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 14 +[2025-09-02 15:36:11] [Rank 0] PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 15:36:11] [Rank 0] PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 15:36:11] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 15:36:11] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 15:36:11] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 15:36:11] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 15:36:11] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 15:36:11] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 15:36:11] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 15:36:11] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 15:36:11] [Rank 0] PRINT: Starting warmup... +[2025-09-02 15:36:11] [Rank 0] PRINT: Starting warmup... +[2025-09-02 15:36:56] [Rank 0] PRINT: Warmup complete. +[2025-09-02 15:36:56] [Rank 0] PRINT: Warmup complete. +[2025-09-02 15:36:56] [Rank 0] PRINT: Starting training... +[2025-09-02 15:36:56] [Rank 0] PRINT: Starting training... +[2025-09-02 15:36:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:36:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:37:12] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.28 attn_vo:H=0.4624,top10E=0.02,eRank=232.9,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 15:37:12] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.28 attn_vo:H=0.4624,top10E=0.02,eRank=232.9,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 15:37:14] [Rank 0] step:21/10000 train_time:1311ms step_avg:62.44ms +[2025-09-02 15:37:14] [Rank 0] step:21/10000 train_time:1311ms step_avg:62.44ms +[2025-09-02 15:37:15] [Rank 0] step:41/10000 train_time:2708ms step_avg:66.06ms +[2025-09-02 15:37:15] [Rank 0] step:41/10000 train_time:2708ms step_avg:66.06ms +[2025-09-02 15:37:16] [Rank 0] step:61/10000 train_time:4110ms step_avg:67.38ms +[2025-09-02 15:37:16] [Rank 0] step:61/10000 train_time:4110ms step_avg:67.38ms +[2025-09-02 15:37:18] [Rank 0] step:81/10000 train_time:5514ms step_avg:68.07ms +[2025-09-02 15:37:18] [Rank 0] step:81/10000 train_time:5514ms step_avg:68.07ms +[2025-09-02 15:37:19] [Rank 0] step:101/10000 train_time:6920ms step_avg:68.52ms +[2025-09-02 15:37:19] [Rank 0] step:101/10000 train_time:6920ms step_avg:68.52ms +[2025-09-02 15:37:21] [Rank 0] step:121/10000 train_time:8326ms step_avg:68.81ms +[2025-09-02 15:37:21] [Rank 0] step:121/10000 train_time:8326ms step_avg:68.81ms +[2025-09-02 15:37:22] [Rank 0] step:141/10000 train_time:9732ms step_avg:69.02ms +[2025-09-02 15:37:22] [Rank 0] step:141/10000 train_time:9732ms step_avg:69.02ms +[2025-09-02 15:37:23] [Rank 0] step:161/10000 train_time:11139ms step_avg:69.19ms +[2025-09-02 15:37:23] [Rank 0] step:161/10000 train_time:11139ms step_avg:69.19ms +[2025-09-02 15:37:25] [Rank 0] step:181/10000 train_time:12546ms step_avg:69.32ms +[2025-09-02 15:37:25] [Rank 0] step:181/10000 train_time:12546ms step_avg:69.32ms +[2025-09-02 15:37:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:37:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:37:38] [Rank 0] PRINT: step:200/10000 val_loss:6.4026 svd_entropy: attn_qk:H=0.5095,top10E=0.71,eRank=77.0,q75/q25=12.05 attn_vo:H=0.4939,top10E=0.60,eRank=70.9,q75/q25=inf mlp_w1:H=0.4058,top10E=0.75,eRank=23.5,q75/q25=2.77 mlp_w2:H=0.1842,top10E=0.94,eRank=4.8,q75/q25=155.31 vo_prod:H=0.2698,top10E=0.85,eRank=9.5,q75/q25=inf train_time:14094ms step_avg:70.47ms +[2025-09-02 15:37:38] [Rank 0] PRINT: step:200/10000 val_loss:6.4026 svd_entropy: attn_qk:H=0.5095,top10E=0.71,eRank=77.0,q75/q25=12.05 attn_vo:H=0.4939,top10E=0.60,eRank=70.9,q75/q25=inf mlp_w1:H=0.4058,top10E=0.75,eRank=23.5,q75/q25=2.77 mlp_w2:H=0.1842,top10E=0.94,eRank=4.8,q75/q25=155.31 vo_prod:H=0.2698,top10E=0.85,eRank=9.5,q75/q25=inf train_time:14094ms step_avg:70.47ms +[2025-09-02 15:37:38] [Rank 0] step:201/10000 train_time:14106ms step_avg:70.18ms +[2025-09-02 15:37:38] [Rank 0] step:201/10000 train_time:14106ms step_avg:70.18ms +[2025-09-02 15:37:39] [Rank 0] step:221/10000 train_time:15393ms step_avg:69.65ms +[2025-09-02 15:37:39] [Rank 0] step:221/10000 train_time:15393ms step_avg:69.65ms +[2025-09-02 15:37:41] [Rank 0] step:241/10000 train_time:16799ms step_avg:69.70ms +[2025-09-02 15:37:41] [Rank 0] step:241/10000 train_time:16799ms step_avg:69.70ms +[2025-09-02 15:37:42] [Rank 0] step:261/10000 train_time:18206ms step_avg:69.76ms +[2025-09-02 15:37:42] [Rank 0] step:261/10000 train_time:18206ms step_avg:69.76ms +[2025-09-02 15:37:44] [Rank 0] step:281/10000 train_time:19614ms step_avg:69.80ms +[2025-09-02 15:37:44] [Rank 0] step:281/10000 train_time:19614ms step_avg:69.80ms +[2025-09-02 15:37:45] [Rank 0] step:301/10000 train_time:21021ms step_avg:69.84ms +[2025-09-02 15:37:45] [Rank 0] step:301/10000 train_time:21021ms step_avg:69.84ms +[2025-09-02 15:37:46] [Rank 0] step:321/10000 train_time:22429ms step_avg:69.87ms +[2025-09-02 15:37:46] [Rank 0] step:321/10000 train_time:22429ms step_avg:69.87ms +[2025-09-02 15:37:48] [Rank 0] step:341/10000 train_time:23837ms step_avg:69.90ms +[2025-09-02 15:37:48] [Rank 0] step:341/10000 train_time:23837ms step_avg:69.90ms +[2025-09-02 15:37:49] [Rank 0] step:361/10000 train_time:25246ms step_avg:69.93ms +[2025-09-02 15:37:49] [Rank 0] step:361/10000 train_time:25246ms step_avg:69.93ms +[2025-09-02 15:37:51] [Rank 0] step:381/10000 train_time:26656ms step_avg:69.96ms +[2025-09-02 15:37:51] [Rank 0] step:381/10000 train_time:26656ms step_avg:69.96ms +[2025-09-02 15:37:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:37:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:38:04] [Rank 0] PRINT: step:400/10000 val_loss:5.9101 svd_entropy: attn_qk:H=0.5563,top10E=0.61,eRank=85.7,q75/q25=13.19 attn_vo:H=0.5586,top10E=0.48,eRank=88.9,q75/q25=inf mlp_w1:H=0.4383,top10E=0.69,eRank=36.9,q75/q25=3.18 mlp_w2:H=0.5341,top10E=0.62,eRank=36.7,q75/q25=12.54 vo_prod:H=0.3879,top10E=0.73,eRank=18.6,q75/q25=inf train_time:28206ms step_avg:70.51ms +[2025-09-02 15:38:04] [Rank 0] PRINT: step:400/10000 val_loss:5.9101 svd_entropy: attn_qk:H=0.5563,top10E=0.61,eRank=85.7,q75/q25=13.19 attn_vo:H=0.5586,top10E=0.48,eRank=88.9,q75/q25=inf mlp_w1:H=0.4383,top10E=0.69,eRank=36.9,q75/q25=3.18 mlp_w2:H=0.5341,top10E=0.62,eRank=36.7,q75/q25=12.54 vo_prod:H=0.3879,top10E=0.73,eRank=18.6,q75/q25=inf train_time:28206ms step_avg:70.51ms +[2025-09-02 15:38:04] [Rank 0] step:401/10000 train_time:28218ms step_avg:70.37ms +[2025-09-02 15:38:04] [Rank 0] step:401/10000 train_time:28218ms step_avg:70.37ms +[2025-09-02 15:38:05] [Rank 0] step:421/10000 train_time:29489ms step_avg:70.05ms +[2025-09-02 15:38:05] [Rank 0] step:421/10000 train_time:29489ms step_avg:70.05ms +[2025-09-02 15:38:06] [Rank 0] step:441/10000 train_time:30895ms step_avg:70.06ms +[2025-09-02 15:38:06] [Rank 0] step:441/10000 train_time:30895ms step_avg:70.06ms +[2025-09-02 15:38:08] [Rank 0] step:461/10000 train_time:32305ms step_avg:70.08ms +[2025-09-02 15:38:08] [Rank 0] step:461/10000 train_time:32305ms step_avg:70.08ms +[2025-09-02 15:38:09] [Rank 0] step:481/10000 train_time:33713ms step_avg:70.09ms +[2025-09-02 15:38:09] [Rank 0] step:481/10000 train_time:33713ms step_avg:70.09ms +[2025-09-02 15:38:11] [Rank 0] step:501/10000 train_time:35121ms step_avg:70.10ms +[2025-09-02 15:38:11] [Rank 0] step:501/10000 train_time:35121ms step_avg:70.10ms +[2025-09-02 15:38:12] [Rank 0] step:521/10000 train_time:36552ms step_avg:70.16ms +[2025-09-02 15:38:12] [Rank 0] step:521/10000 train_time:36552ms step_avg:70.16ms +[2025-09-02 15:38:14] [Rank 0] step:541/10000 train_time:37960ms step_avg:70.17ms +[2025-09-02 15:38:14] [Rank 0] step:541/10000 train_time:37960ms step_avg:70.17ms +[2025-09-02 15:38:15] [Rank 0] step:561/10000 train_time:39369ms step_avg:70.18ms +[2025-09-02 15:38:15] [Rank 0] step:561/10000 train_time:39369ms step_avg:70.18ms +[2025-09-02 15:38:16] [Rank 0] step:581/10000 train_time:40779ms step_avg:70.19ms +[2025-09-02 15:38:16] [Rank 0] step:581/10000 train_time:40779ms step_avg:70.19ms +[2025-09-02 15:38:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:38:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:38:29] [Rank 0] PRINT: step:600/10000 val_loss:5.6331 svd_entropy: attn_qk:H=0.5882,top10E=0.54,eRank=93.1,q75/q25=14.60 attn_vo:H=0.5997,top10E=0.41,eRank=105.8,q75/q25=inf mlp_w1:H=0.4777,top10E=0.65,eRank=46.5,q75/q25=3.53 mlp_w2:H=0.6290,top10E=0.47,eRank=67.7,q75/q25=9.30 vo_prod:H=0.4461,top10E=0.62,eRank=26.4,q75/q25=inf train_time:42330ms step_avg:70.55ms +[2025-09-02 15:38:29] [Rank 0] PRINT: step:600/10000 val_loss:5.6331 svd_entropy: attn_qk:H=0.5882,top10E=0.54,eRank=93.1,q75/q25=14.60 attn_vo:H=0.5997,top10E=0.41,eRank=105.8,q75/q25=inf mlp_w1:H=0.4777,top10E=0.65,eRank=46.5,q75/q25=3.53 mlp_w2:H=0.6290,top10E=0.47,eRank=67.7,q75/q25=9.30 vo_prod:H=0.4461,top10E=0.62,eRank=26.4,q75/q25=inf train_time:42330ms step_avg:70.55ms +[2025-09-02 15:38:29] [Rank 0] step:601/10000 train_time:42342ms step_avg:70.45ms +[2025-09-02 15:38:29] [Rank 0] step:601/10000 train_time:42342ms step_avg:70.45ms +[2025-09-02 15:38:31] [Rank 0] step:621/10000 train_time:43617ms step_avg:70.24ms +[2025-09-02 15:38:31] [Rank 0] step:621/10000 train_time:43617ms step_avg:70.24ms +[2025-09-02 15:38:32] [Rank 0] step:641/10000 train_time:45022ms step_avg:70.24ms +[2025-09-02 15:38:32] [Rank 0] step:641/10000 train_time:45022ms step_avg:70.24ms +[2025-09-02 15:38:34] [Rank 0] step:661/10000 train_time:46431ms step_avg:70.24ms +[2025-09-02 15:38:34] [Rank 0] step:661/10000 train_time:46431ms step_avg:70.24ms +[2025-09-02 15:38:35] [Rank 0] step:681/10000 train_time:47838ms step_avg:70.25ms +[2025-09-02 15:38:35] [Rank 0] step:681/10000 train_time:47838ms step_avg:70.25ms +[2025-09-02 15:38:36] [Rank 0] step:701/10000 train_time:49247ms step_avg:70.25ms +[2025-09-02 15:38:36] [Rank 0] step:701/10000 train_time:49247ms step_avg:70.25ms +[2025-09-02 15:38:38] [Rank 0] step:721/10000 train_time:50656ms step_avg:70.26ms +[2025-09-02 15:38:38] [Rank 0] step:721/10000 train_time:50656ms step_avg:70.26ms +[2025-09-02 15:38:39] [Rank 0] step:741/10000 train_time:52065ms step_avg:70.26ms +[2025-09-02 15:38:39] [Rank 0] step:741/10000 train_time:52065ms step_avg:70.26ms +[2025-09-02 15:38:41] [Rank 0] step:761/10000 train_time:53486ms step_avg:70.28ms +[2025-09-02 15:38:41] [Rank 0] step:761/10000 train_time:53486ms step_avg:70.28ms +[2025-09-02 15:38:42] [Rank 0] step:781/10000 train_time:54907ms step_avg:70.30ms +[2025-09-02 15:38:42] [Rank 0] step:781/10000 train_time:54907ms step_avg:70.30ms +[2025-09-02 15:38:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:38:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:38:55] [Rank 0] PRINT: step:800/10000 val_loss:5.4118 svd_entropy: attn_qk:H=0.6122,top10E=0.49,eRank=99.4,q75/q25=16.48 attn_vo:H=0.6299,top10E=0.37,eRank=121.8,q75/q25=inf mlp_w1:H=0.5101,top10E=0.61,eRank=53.8,q75/q25=3.86 mlp_w2:H=0.6808,top10E=0.39,eRank=94.5,q75/q25=8.81 vo_prod:H=0.4821,top10E=0.54,eRank=33.4,q75/q25=inf train_time:56472ms step_avg:70.59ms +[2025-09-02 15:38:55] [Rank 0] PRINT: step:800/10000 val_loss:5.4118 svd_entropy: attn_qk:H=0.6122,top10E=0.49,eRank=99.4,q75/q25=16.48 attn_vo:H=0.6299,top10E=0.37,eRank=121.8,q75/q25=inf mlp_w1:H=0.5101,top10E=0.61,eRank=53.8,q75/q25=3.86 mlp_w2:H=0.6808,top10E=0.39,eRank=94.5,q75/q25=8.81 vo_prod:H=0.4821,top10E=0.54,eRank=33.4,q75/q25=inf train_time:56472ms step_avg:70.59ms +[2025-09-02 15:38:55] [Rank 0] step:801/10000 train_time:56484ms step_avg:70.52ms +[2025-09-02 15:38:55] [Rank 0] step:801/10000 train_time:56484ms step_avg:70.52ms +[2025-09-02 15:38:57] [Rank 0] step:821/10000 train_time:57774ms step_avg:70.37ms +[2025-09-02 15:38:57] [Rank 0] step:821/10000 train_time:57774ms step_avg:70.37ms +[2025-09-02 15:38:58] [Rank 0] step:841/10000 train_time:59191ms step_avg:70.38ms +[2025-09-02 15:38:58] [Rank 0] step:841/10000 train_time:59191ms step_avg:70.38ms +[2025-09-02 15:39:00] [Rank 0] step:861/10000 train_time:60614ms step_avg:70.40ms +[2025-09-02 15:39:00] [Rank 0] step:861/10000 train_time:60614ms step_avg:70.40ms +[2025-09-02 15:39:01] [Rank 0] step:881/10000 train_time:62035ms step_avg:70.41ms +[2025-09-02 15:39:01] [Rank 0] step:881/10000 train_time:62035ms step_avg:70.41ms +[2025-09-02 15:39:02] [Rank 0] step:901/10000 train_time:63456ms step_avg:70.43ms +[2025-09-02 15:39:02] [Rank 0] step:901/10000 train_time:63456ms step_avg:70.43ms +[2025-09-02 15:39:04] [Rank 0] step:921/10000 train_time:64880ms step_avg:70.45ms +[2025-09-02 15:39:04] [Rank 0] step:921/10000 train_time:64880ms step_avg:70.45ms +[2025-09-02 15:39:05] [Rank 0] step:941/10000 train_time:66302ms step_avg:70.46ms +[2025-09-02 15:39:05] [Rank 0] step:941/10000 train_time:66302ms step_avg:70.46ms +[2025-09-02 15:39:07] [Rank 0] step:961/10000 train_time:67726ms step_avg:70.47ms +[2025-09-02 15:39:07] [Rank 0] step:961/10000 train_time:67726ms step_avg:70.47ms +[2025-09-02 15:39:08] [Rank 0] step:981/10000 train_time:69149ms step_avg:70.49ms +[2025-09-02 15:39:08] [Rank 0] step:981/10000 train_time:69149ms step_avg:70.49ms +[2025-09-02 15:39:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:39:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:39:21] [Rank 0] PRINT: step:1000/10000 val_loss:5.2633 svd_entropy: attn_qk:H=0.6310,top10E=0.45,eRank=105.2,q75/q25=19.02 attn_vo:H=0.6531,top10E=0.34,eRank=137.0,q75/q25=inf mlp_w1:H=0.5345,top10E=0.58,eRank=59.7,q75/q25=4.22 mlp_w2:H=0.7060,top10E=0.35,eRank=111.4,q75/q25=9.71 vo_prod:H=0.5066,top10E=0.49,eRank=39.5,q75/q25=inf train_time:70714ms step_avg:70.71ms +[2025-09-02 15:39:21] [Rank 0] PRINT: step:1000/10000 val_loss:5.2633 svd_entropy: attn_qk:H=0.6310,top10E=0.45,eRank=105.2,q75/q25=19.02 attn_vo:H=0.6531,top10E=0.34,eRank=137.0,q75/q25=inf mlp_w1:H=0.5345,top10E=0.58,eRank=59.7,q75/q25=4.22 mlp_w2:H=0.7060,top10E=0.35,eRank=111.4,q75/q25=9.71 vo_prod:H=0.5066,top10E=0.49,eRank=39.5,q75/q25=inf train_time:70714ms step_avg:70.71ms +[2025-09-02 15:39:21] [Rank 0] step:1001/10000 train_time:70727ms step_avg:70.66ms +[2025-09-02 15:39:21] [Rank 0] step:1001/10000 train_time:70727ms step_avg:70.66ms +[2025-09-02 15:39:23] [Rank 0] step:1021/10000 train_time:72026ms step_avg:70.54ms +[2025-09-02 15:39:23] [Rank 0] step:1021/10000 train_time:72026ms step_avg:70.54ms +[2025-09-02 15:39:24] [Rank 0] step:1041/10000 train_time:73447ms step_avg:70.55ms +[2025-09-02 15:39:24] [Rank 0] step:1041/10000 train_time:73447ms step_avg:70.55ms +[2025-09-02 15:39:26] [Rank 0] step:1061/10000 train_time:74869ms step_avg:70.56ms +[2025-09-02 15:39:26] [Rank 0] step:1061/10000 train_time:74869ms step_avg:70.56ms +[2025-09-02 15:39:27] [Rank 0] step:1081/10000 train_time:76293ms step_avg:70.58ms +[2025-09-02 15:39:27] [Rank 0] step:1081/10000 train_time:76293ms step_avg:70.58ms +[2025-09-02 15:39:28] [Rank 0] step:1101/10000 train_time:77714ms step_avg:70.58ms +[2025-09-02 15:39:28] [Rank 0] step:1101/10000 train_time:77714ms step_avg:70.58ms +[2025-09-02 15:39:30] [Rank 0] step:1121/10000 train_time:79136ms step_avg:70.59ms +[2025-09-02 15:39:30] [Rank 0] step:1121/10000 train_time:79136ms step_avg:70.59ms +[2025-09-02 15:39:31] [Rank 0] step:1141/10000 train_time:80560ms step_avg:70.60ms +[2025-09-02 15:39:31] [Rank 0] step:1141/10000 train_time:80560ms step_avg:70.60ms +[2025-09-02 15:39:33] [Rank 0] step:1161/10000 train_time:81984ms step_avg:70.61ms +[2025-09-02 15:39:33] [Rank 0] step:1161/10000 train_time:81984ms step_avg:70.61ms +[2025-09-02 15:39:34] [Rank 0] step:1181/10000 train_time:83408ms step_avg:70.63ms +[2025-09-02 15:39:34] [Rank 0] step:1181/10000 train_time:83408ms step_avg:70.63ms +[2025-09-02 15:39:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:39:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:39:47] [Rank 0] PRINT: step:1200/10000 val_loss:5.1133 svd_entropy: attn_qk:H=0.6462,top10E=0.42,eRank=110.8,q75/q25=22.50 attn_vo:H=0.6726,top10E=0.31,eRank=151.5,q75/q25=inf mlp_w1:H=0.5545,top10E=0.56,eRank=65.1,q75/q25=4.62 mlp_w2:H=0.7259,top10E=0.31,eRank=127.3,q75/q25=10.96 vo_prod:H=0.5269,top10E=0.45,eRank=45.4,q75/q25=inf train_time:84974ms step_avg:70.81ms +[2025-09-02 15:39:47] [Rank 0] PRINT: step:1200/10000 val_loss:5.1133 svd_entropy: attn_qk:H=0.6462,top10E=0.42,eRank=110.8,q75/q25=22.50 attn_vo:H=0.6726,top10E=0.31,eRank=151.5,q75/q25=inf mlp_w1:H=0.5545,top10E=0.56,eRank=65.1,q75/q25=4.62 mlp_w2:H=0.7259,top10E=0.31,eRank=127.3,q75/q25=10.96 vo_prod:H=0.5269,top10E=0.45,eRank=45.4,q75/q25=inf train_time:84974ms step_avg:70.81ms +[2025-09-02 15:39:47] [Rank 0] step:1201/10000 train_time:84986ms step_avg:70.76ms +[2025-09-02 15:39:47] [Rank 0] step:1201/10000 train_time:84986ms step_avg:70.76ms +[2025-09-02 15:39:49] [Rank 0] step:1221/10000 train_time:86273ms step_avg:70.66ms +[2025-09-02 15:39:49] [Rank 0] step:1221/10000 train_time:86273ms step_avg:70.66ms +[2025-09-02 15:39:50] [Rank 0] step:1241/10000 train_time:87694ms step_avg:70.66ms +[2025-09-02 15:39:50] [Rank 0] step:1241/10000 train_time:87694ms step_avg:70.66ms +[2025-09-02 15:39:51] [Rank 0] step:1261/10000 train_time:89116ms step_avg:70.67ms +[2025-09-02 15:39:51] [Rank 0] step:1261/10000 train_time:89116ms step_avg:70.67ms +[2025-09-02 15:39:53] [Rank 0] step:1281/10000 train_time:90540ms step_avg:70.68ms +[2025-09-02 15:39:53] [Rank 0] step:1281/10000 train_time:90540ms step_avg:70.68ms +[2025-09-02 15:39:54] [Rank 0] step:1301/10000 train_time:91964ms step_avg:70.69ms +[2025-09-02 15:39:54] [Rank 0] step:1301/10000 train_time:91964ms step_avg:70.69ms +[2025-09-02 15:39:56] [Rank 0] step:1321/10000 train_time:93387ms step_avg:70.69ms +[2025-09-02 15:39:56] [Rank 0] step:1321/10000 train_time:93387ms step_avg:70.69ms +[2025-09-02 15:39:57] [Rank 0] step:1341/10000 train_time:94812ms step_avg:70.70ms +[2025-09-02 15:39:57] [Rank 0] step:1341/10000 train_time:94812ms step_avg:70.70ms +[2025-09-02 15:39:59] [Rank 0] step:1361/10000 train_time:96236ms step_avg:70.71ms +[2025-09-02 15:39:59] [Rank 0] step:1361/10000 train_time:96236ms step_avg:70.71ms +[2025-09-02 15:40:00] [Rank 0] step:1381/10000 train_time:97660ms step_avg:70.72ms +[2025-09-02 15:40:00] [Rank 0] step:1381/10000 train_time:97660ms step_avg:70.72ms +[2025-09-02 15:40:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:40:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:40:13] [Rank 0] PRINT: step:1400/10000 val_loss:4.9883 svd_entropy: attn_qk:H=0.6588,top10E=0.40,eRank=115.9,q75/q25=26.98 attn_vo:H=0.6889,top10E=0.29,eRank=164.2,q75/q25=inf mlp_w1:H=0.5739,top10E=0.53,eRank=70.8,q75/q25=5.08 mlp_w2:H=0.7427,top10E=0.29,eRank=142.3,q75/q25=12.25 vo_prod:H=0.5437,top10E=0.42,eRank=51.0,q75/q25=inf train_time:99229ms step_avg:70.88ms +[2025-09-02 15:40:13] [Rank 0] PRINT: step:1400/10000 val_loss:4.9883 svd_entropy: attn_qk:H=0.6588,top10E=0.40,eRank=115.9,q75/q25=26.98 attn_vo:H=0.6889,top10E=0.29,eRank=164.2,q75/q25=inf mlp_w1:H=0.5739,top10E=0.53,eRank=70.8,q75/q25=5.08 mlp_w2:H=0.7427,top10E=0.29,eRank=142.3,q75/q25=12.25 vo_prod:H=0.5437,top10E=0.42,eRank=51.0,q75/q25=inf train_time:99229ms step_avg:70.88ms +[2025-09-02 15:40:13] [Rank 0] step:1401/10000 train_time:99241ms step_avg:70.84ms +[2025-09-02 15:40:13] [Rank 0] step:1401/10000 train_time:99241ms step_avg:70.84ms +[2025-09-02 15:40:14] [Rank 0] step:1421/10000 train_time:100544ms step_avg:70.76ms +[2025-09-02 15:40:14] [Rank 0] step:1421/10000 train_time:100544ms step_avg:70.76ms +[2025-09-02 15:40:16] [Rank 0] step:1441/10000 train_time:101967ms step_avg:70.76ms +[2025-09-02 15:40:16] [Rank 0] step:1441/10000 train_time:101967ms step_avg:70.76ms +[2025-09-02 15:40:17] [Rank 0] step:1461/10000 train_time:103390ms step_avg:70.77ms +[2025-09-02 15:40:17] [Rank 0] step:1461/10000 train_time:103390ms step_avg:70.77ms +[2025-09-02 15:40:19] [Rank 0] step:1481/10000 train_time:104815ms step_avg:70.77ms +[2025-09-02 15:40:19] [Rank 0] step:1481/10000 train_time:104815ms step_avg:70.77ms +[2025-09-02 15:40:20] [Rank 0] step:1501/10000 train_time:106246ms step_avg:70.78ms +[2025-09-02 15:40:20] [Rank 0] step:1501/10000 train_time:106246ms step_avg:70.78ms +[2025-09-02 15:40:22] [Rank 0] step:1521/10000 train_time:107679ms step_avg:70.80ms +[2025-09-02 15:40:22] [Rank 0] step:1521/10000 train_time:107679ms step_avg:70.80ms +[2025-09-02 15:40:23] [Rank 0] step:1541/10000 train_time:109215ms step_avg:70.87ms +[2025-09-02 15:40:23] [Rank 0] step:1541/10000 train_time:109215ms step_avg:70.87ms +[2025-09-02 15:40:25] [Rank 0] step:1561/10000 train_time:110649ms step_avg:70.88ms +[2025-09-02 15:40:25] [Rank 0] step:1561/10000 train_time:110649ms step_avg:70.88ms +[2025-09-02 15:40:26] [Rank 0] step:1581/10000 train_time:112082ms step_avg:70.89ms +[2025-09-02 15:40:26] [Rank 0] step:1581/10000 train_time:112082ms step_avg:70.89ms +[2025-09-02 15:40:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:40:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:40:39] [Rank 0] PRINT: step:1600/10000 val_loss:4.8435 svd_entropy: attn_qk:H=0.6689,top10E=0.39,eRank=120.1,q75/q25=32.54 attn_vo:H=0.7022,top10E=0.27,eRank=175.3,q75/q25=inf mlp_w1:H=0.5906,top10E=0.51,eRank=76.6,q75/q25=5.53 mlp_w2:H=0.7562,top10E=0.27,eRank=155.9,q75/q25=13.56 vo_prod:H=0.5580,top10E=0.39,eRank=56.3,q75/q25=inf train_time:113661ms step_avg:71.04ms +[2025-09-02 15:40:39] [Rank 0] PRINT: step:1600/10000 val_loss:4.8435 svd_entropy: attn_qk:H=0.6689,top10E=0.39,eRank=120.1,q75/q25=32.54 attn_vo:H=0.7022,top10E=0.27,eRank=175.3,q75/q25=inf mlp_w1:H=0.5906,top10E=0.51,eRank=76.6,q75/q25=5.53 mlp_w2:H=0.7562,top10E=0.27,eRank=155.9,q75/q25=13.56 vo_prod:H=0.5580,top10E=0.39,eRank=56.3,q75/q25=inf train_time:113661ms step_avg:71.04ms +[2025-09-02 15:40:39] [Rank 0] step:1601/10000 train_time:113673ms step_avg:71.00ms +[2025-09-02 15:40:39] [Rank 0] step:1601/10000 train_time:113673ms step_avg:71.00ms +[2025-09-02 15:40:41] [Rank 0] step:1621/10000 train_time:114979ms step_avg:70.93ms +[2025-09-02 15:40:41] [Rank 0] step:1621/10000 train_time:114979ms step_avg:70.93ms +[2025-09-02 15:40:42] [Rank 0] step:1641/10000 train_time:116410ms step_avg:70.94ms +[2025-09-02 15:40:42] [Rank 0] step:1641/10000 train_time:116410ms step_avg:70.94ms +[2025-09-02 15:40:43] [Rank 0] step:1661/10000 train_time:117844ms step_avg:70.95ms +[2025-09-02 15:40:43] [Rank 0] step:1661/10000 train_time:117844ms step_avg:70.95ms +[2025-09-02 15:40:45] [Rank 0] step:1681/10000 train_time:119276ms step_avg:70.96ms +[2025-09-02 15:40:45] [Rank 0] step:1681/10000 train_time:119276ms step_avg:70.96ms +[2025-09-02 15:40:46] [Rank 0] step:1701/10000 train_time:120710ms step_avg:70.96ms +[2025-09-02 15:40:46] [Rank 0] step:1701/10000 train_time:120710ms step_avg:70.96ms +[2025-09-02 15:40:48] [Rank 0] step:1721/10000 train_time:122143ms step_avg:70.97ms +[2025-09-02 15:40:48] [Rank 0] step:1721/10000 train_time:122143ms step_avg:70.97ms +[2025-09-02 15:40:49] [Rank 0] step:1741/10000 train_time:123577ms step_avg:70.98ms +[2025-09-02 15:40:49] [Rank 0] step:1741/10000 train_time:123577ms step_avg:70.98ms +[2025-09-02 15:40:51] [Rank 0] step:1761/10000 train_time:125011ms step_avg:70.99ms +[2025-09-02 15:40:51] [Rank 0] step:1761/10000 train_time:125011ms step_avg:70.99ms +[2025-09-02 15:40:52] [Rank 0] step:1781/10000 train_time:126446ms step_avg:71.00ms +[2025-09-02 15:40:52] [Rank 0] step:1781/10000 train_time:126446ms step_avg:71.00ms +[2025-09-02 15:40:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:40:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:41:05] [Rank 0] PRINT: step:1800/10000 val_loss:4.7354 svd_entropy: attn_qk:H=0.6783,top10E=0.37,eRank=124.4,q75/q25=38.22 attn_vo:H=0.7132,top10E=0.25,eRank=184.6,q75/q25=inf mlp_w1:H=0.6065,top10E=0.49,eRank=82.3,q75/q25=6.03 mlp_w2:H=0.7686,top10E=0.25,eRank=169.1,q75/q25=14.61 vo_prod:H=0.5701,top10E=0.37,eRank=61.3,q75/q25=inf train_time:128024ms step_avg:71.12ms +[2025-09-02 15:41:05] [Rank 0] PRINT: step:1800/10000 val_loss:4.7354 svd_entropy: attn_qk:H=0.6783,top10E=0.37,eRank=124.4,q75/q25=38.22 attn_vo:H=0.7132,top10E=0.25,eRank=184.6,q75/q25=inf mlp_w1:H=0.6065,top10E=0.49,eRank=82.3,q75/q25=6.03 mlp_w2:H=0.7686,top10E=0.25,eRank=169.1,q75/q25=14.61 vo_prod:H=0.5701,top10E=0.37,eRank=61.3,q75/q25=inf train_time:128024ms step_avg:71.12ms +[2025-09-02 15:41:05] [Rank 0] step:1801/10000 train_time:128036ms step_avg:71.09ms +[2025-09-02 15:41:05] [Rank 0] step:1801/10000 train_time:128036ms step_avg:71.09ms +[2025-09-02 15:41:07] [Rank 0] step:1821/10000 train_time:129327ms step_avg:71.02ms +[2025-09-02 15:41:07] [Rank 0] step:1821/10000 train_time:129327ms step_avg:71.02ms +[2025-09-02 15:41:08] [Rank 0] step:1841/10000 train_time:130761ms step_avg:71.03ms +[2025-09-02 15:41:08] [Rank 0] step:1841/10000 train_time:130761ms step_avg:71.03ms +[2025-09-02 15:41:10] [Rank 0] step:1861/10000 train_time:132196ms step_avg:71.03ms +[2025-09-02 15:41:10] [Rank 0] step:1861/10000 train_time:132196ms step_avg:71.03ms +[2025-09-02 15:41:11] [Rank 0] step:1881/10000 train_time:133630ms step_avg:71.04ms +[2025-09-02 15:41:11] [Rank 0] step:1881/10000 train_time:133630ms step_avg:71.04ms +[2025-09-02 15:41:12] [Rank 0] step:1901/10000 train_time:135065ms step_avg:71.05ms +[2025-09-02 15:41:12] [Rank 0] step:1901/10000 train_time:135065ms step_avg:71.05ms +[2025-09-02 15:41:14] [Rank 0] step:1921/10000 train_time:136500ms step_avg:71.06ms +[2025-09-02 15:41:14] [Rank 0] step:1921/10000 train_time:136500ms step_avg:71.06ms +[2025-09-02 15:41:15] [Rank 0] step:1941/10000 train_time:137935ms step_avg:71.06ms +[2025-09-02 15:41:15] [Rank 0] step:1941/10000 train_time:137935ms step_avg:71.06ms +[2025-09-02 15:41:17] [Rank 0] step:1961/10000 train_time:139370ms step_avg:71.07ms +[2025-09-02 15:41:17] [Rank 0] step:1961/10000 train_time:139370ms step_avg:71.07ms +[2025-09-02 15:41:18] [Rank 0] step:1981/10000 train_time:140805ms step_avg:71.08ms +[2025-09-02 15:41:18] [Rank 0] step:1981/10000 train_time:140805ms step_avg:71.08ms +[2025-09-02 15:41:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:41:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:41:31] [Rank 0] PRINT: step:2000/10000 val_loss:4.6679 svd_entropy: attn_qk:H=0.6860,top10E=0.36,eRank=128.2,q75/q25=44.24 attn_vo:H=0.7225,top10E=0.24,eRank=192.6,q75/q25=inf mlp_w1:H=0.6201,top10E=0.48,eRank=87.7,q75/q25=6.51 mlp_w2:H=0.7779,top10E=0.23,eRank=180.0,q75/q25=15.50 vo_prod:H=0.5803,top10E=0.35,eRank=65.9,q75/q25=inf train_time:142388ms step_avg:71.19ms +[2025-09-02 15:41:31] [Rank 0] PRINT: step:2000/10000 val_loss:4.6679 svd_entropy: attn_qk:H=0.6860,top10E=0.36,eRank=128.2,q75/q25=44.24 attn_vo:H=0.7225,top10E=0.24,eRank=192.6,q75/q25=inf mlp_w1:H=0.6201,top10E=0.48,eRank=87.7,q75/q25=6.51 mlp_w2:H=0.7779,top10E=0.23,eRank=180.0,q75/q25=15.50 vo_prod:H=0.5803,top10E=0.35,eRank=65.9,q75/q25=inf train_time:142388ms step_avg:71.19ms +[2025-09-02 15:41:31] [Rank 0] step:2001/10000 train_time:142401ms step_avg:71.16ms +[2025-09-02 15:41:31] [Rank 0] step:2001/10000 train_time:142401ms step_avg:71.16ms +[2025-09-02 15:41:33] [Rank 0] step:2021/10000 train_time:143703ms step_avg:71.10ms +[2025-09-02 15:41:33] [Rank 0] step:2021/10000 train_time:143703ms step_avg:71.10ms +[2025-09-02 15:41:34] [Rank 0] step:2041/10000 train_time:145258ms step_avg:71.17ms +[2025-09-02 15:41:34] [Rank 0] step:2041/10000 train_time:145258ms step_avg:71.17ms +[2025-09-02 15:41:36] [Rank 0] step:2061/10000 train_time:146693ms step_avg:71.18ms +[2025-09-02 15:41:36] [Rank 0] step:2061/10000 train_time:146693ms step_avg:71.18ms +[2025-09-02 15:41:37] [Rank 0] step:2081/10000 train_time:148128ms step_avg:71.18ms +[2025-09-02 15:41:37] [Rank 0] step:2081/10000 train_time:148128ms step_avg:71.18ms +[2025-09-02 15:41:39] [Rank 0] step:2101/10000 train_time:149565ms step_avg:71.19ms +[2025-09-02 15:41:39] [Rank 0] step:2101/10000 train_time:149565ms step_avg:71.19ms +[2025-09-02 15:41:40] [Rank 0] step:2121/10000 train_time:151002ms step_avg:71.19ms +[2025-09-02 15:41:40] [Rank 0] step:2121/10000 train_time:151002ms step_avg:71.19ms +[2025-09-02 15:41:42] [Rank 0] step:2141/10000 train_time:152440ms step_avg:71.20ms +[2025-09-02 15:41:42] [Rank 0] step:2141/10000 train_time:152440ms step_avg:71.20ms +[2025-09-02 15:41:43] [Rank 0] step:2161/10000 train_time:153877ms step_avg:71.21ms +[2025-09-02 15:41:43] [Rank 0] step:2161/10000 train_time:153877ms step_avg:71.21ms +[2025-09-02 15:41:44] [Rank 0] step:2181/10000 train_time:155314ms step_avg:71.21ms +[2025-09-02 15:41:44] [Rank 0] step:2181/10000 train_time:155314ms step_avg:71.21ms +[2025-09-02 15:41:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:41:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:41:57] [Rank 0] PRINT: step:2200/10000 val_loss:4.5873 svd_entropy: attn_qk:H=0.6928,top10E=0.35,eRank=131.8,q75/q25=49.70 attn_vo:H=0.7300,top10E=0.23,eRank=199.6,q75/q25=inf mlp_w1:H=0.6326,top10E=0.46,eRank=93.2,q75/q25=6.91 mlp_w2:H=0.7863,top10E=0.22,eRank=190.3,q75/q25=15.94 vo_prod:H=0.5887,top10E=0.34,eRank=70.0,q75/q25=inf train_time:156895ms step_avg:71.32ms +[2025-09-02 15:41:57] [Rank 0] PRINT: step:2200/10000 val_loss:4.5873 svd_entropy: attn_qk:H=0.6928,top10E=0.35,eRank=131.8,q75/q25=49.70 attn_vo:H=0.7300,top10E=0.23,eRank=199.6,q75/q25=inf mlp_w1:H=0.6326,top10E=0.46,eRank=93.2,q75/q25=6.91 mlp_w2:H=0.7863,top10E=0.22,eRank=190.3,q75/q25=15.94 vo_prod:H=0.5887,top10E=0.34,eRank=70.0,q75/q25=inf train_time:156895ms step_avg:71.32ms +[2025-09-02 15:41:58] [Rank 0] step:2201/10000 train_time:156908ms step_avg:71.29ms +[2025-09-02 15:41:58] [Rank 0] step:2201/10000 train_time:156908ms step_avg:71.29ms +[2025-09-02 15:41:59] [Rank 0] step:2221/10000 train_time:158230ms step_avg:71.24ms +[2025-09-02 15:41:59] [Rank 0] step:2221/10000 train_time:158230ms step_avg:71.24ms +[2025-09-02 15:42:00] [Rank 0] step:2241/10000 train_time:159697ms step_avg:71.26ms +[2025-09-02 15:42:00] [Rank 0] step:2241/10000 train_time:159697ms step_avg:71.26ms +[2025-09-02 15:42:02] [Rank 0] step:2261/10000 train_time:161176ms step_avg:71.29ms +[2025-09-02 15:42:02] [Rank 0] step:2261/10000 train_time:161176ms step_avg:71.29ms +[2025-09-02 15:42:03] [Rank 0] step:2281/10000 train_time:162656ms step_avg:71.31ms +[2025-09-02 15:42:03] [Rank 0] step:2281/10000 train_time:162656ms step_avg:71.31ms +[2025-09-02 15:42:05] [Rank 0] step:2301/10000 train_time:164136ms step_avg:71.33ms +[2025-09-02 15:42:05] [Rank 0] step:2301/10000 train_time:164136ms step_avg:71.33ms +[2025-09-02 15:42:06] [Rank 0] step:2321/10000 train_time:165615ms step_avg:71.36ms +[2025-09-02 15:42:06] [Rank 0] step:2321/10000 train_time:165615ms step_avg:71.36ms +[2025-09-02 15:42:08] [Rank 0] step:2341/10000 train_time:167095ms step_avg:71.38ms +[2025-09-02 15:42:08] [Rank 0] step:2341/10000 train_time:167095ms step_avg:71.38ms +[2025-09-02 15:42:09] [Rank 0] step:2361/10000 train_time:168573ms step_avg:71.40ms +[2025-09-02 15:42:09] [Rank 0] step:2361/10000 train_time:168573ms step_avg:71.40ms +[2025-09-02 15:42:11] [Rank 0] step:2381/10000 train_time:170053ms step_avg:71.42ms +[2025-09-02 15:42:11] [Rank 0] step:2381/10000 train_time:170053ms step_avg:71.42ms +[2025-09-02 15:42:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:42:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:42:24] [Rank 0] PRINT: step:2400/10000 val_loss:4.5083 svd_entropy: attn_qk:H=0.6987,top10E=0.34,eRank=134.9,q75/q25=55.11 attn_vo:H=0.7367,top10E=0.22,eRank=205.8,q75/q25=inf mlp_w1:H=0.6447,top10E=0.45,eRank=99.0,q75/q25=7.21 mlp_w2:H=0.7945,top10E=0.21,eRank=201.1,q75/q25=15.62 vo_prod:H=0.5958,top10E=0.33,eRank=73.6,q75/q25=inf train_time:171681ms step_avg:71.53ms +[2025-09-02 15:42:24] [Rank 0] PRINT: step:2400/10000 val_loss:4.5083 svd_entropy: attn_qk:H=0.6987,top10E=0.34,eRank=134.9,q75/q25=55.11 attn_vo:H=0.7367,top10E=0.22,eRank=205.8,q75/q25=inf mlp_w1:H=0.6447,top10E=0.45,eRank=99.0,q75/q25=7.21 mlp_w2:H=0.7945,top10E=0.21,eRank=201.1,q75/q25=15.62 vo_prod:H=0.5958,top10E=0.33,eRank=73.6,q75/q25=inf train_time:171681ms step_avg:71.53ms +[2025-09-02 15:42:24] [Rank 0] step:2401/10000 train_time:171694ms step_avg:71.51ms +[2025-09-02 15:42:24] [Rank 0] step:2401/10000 train_time:171694ms step_avg:71.51ms +[2025-09-02 15:42:26] [Rank 0] step:2421/10000 train_time:173137ms step_avg:71.51ms +[2025-09-02 15:42:26] [Rank 0] step:2421/10000 train_time:173137ms step_avg:71.51ms +[2025-09-02 15:42:27] [Rank 0] step:2441/10000 train_time:174665ms step_avg:71.55ms +[2025-09-02 15:42:27] [Rank 0] step:2441/10000 train_time:174665ms step_avg:71.55ms +[2025-09-02 15:42:29] [Rank 0] step:2461/10000 train_time:176142ms step_avg:71.57ms +[2025-09-02 15:42:29] [Rank 0] step:2461/10000 train_time:176142ms step_avg:71.57ms +[2025-09-02 15:42:30] [Rank 0] step:2481/10000 train_time:177618ms step_avg:71.59ms +[2025-09-02 15:42:30] [Rank 0] step:2481/10000 train_time:177618ms step_avg:71.59ms +[2025-09-02 15:42:32] [Rank 0] step:2501/10000 train_time:179095ms step_avg:71.61ms +[2025-09-02 15:42:32] [Rank 0] step:2501/10000 train_time:179095ms step_avg:71.61ms +[2025-09-02 15:42:33] [Rank 0] step:2521/10000 train_time:180573ms step_avg:71.63ms +[2025-09-02 15:42:33] [Rank 0] step:2521/10000 train_time:180573ms step_avg:71.63ms +[2025-09-02 15:42:35] [Rank 0] step:2541/10000 train_time:182051ms step_avg:71.65ms +[2025-09-02 15:42:35] [Rank 0] step:2541/10000 train_time:182051ms step_avg:71.65ms +[2025-09-02 15:42:36] [Rank 0] step:2561/10000 train_time:183532ms step_avg:71.66ms +[2025-09-02 15:42:36] [Rank 0] step:2561/10000 train_time:183532ms step_avg:71.66ms +[2025-09-02 15:42:38] [Rank 0] step:2581/10000 train_time:185016ms step_avg:71.68ms +[2025-09-02 15:42:38] [Rank 0] step:2581/10000 train_time:185016ms step_avg:71.68ms +[2025-09-02 15:42:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:42:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:42:51] [Rank 0] PRINT: step:2600/10000 val_loss:4.4474 svd_entropy: attn_qk:H=0.7044,top10E=0.33,eRank=138.2,q75/q25=60.02 attn_vo:H=0.7427,top10E=0.21,eRank=211.5,q75/q25=inf mlp_w1:H=0.6551,top10E=0.44,eRank=104.3,q75/q25=7.51 mlp_w2:H=0.8012,top10E=0.20,eRank=210.3,q75/q25=15.62 vo_prod:H=0.6025,top10E=0.32,eRank=77.3,q75/q25=inf train_time:186642ms step_avg:71.79ms +[2025-09-02 15:42:51] [Rank 0] PRINT: step:2600/10000 val_loss:4.4474 svd_entropy: attn_qk:H=0.7044,top10E=0.33,eRank=138.2,q75/q25=60.02 attn_vo:H=0.7427,top10E=0.21,eRank=211.5,q75/q25=inf mlp_w1:H=0.6551,top10E=0.44,eRank=104.3,q75/q25=7.51 mlp_w2:H=0.8012,top10E=0.20,eRank=210.3,q75/q25=15.62 vo_prod:H=0.6025,top10E=0.32,eRank=77.3,q75/q25=inf train_time:186642ms step_avg:71.79ms +[2025-09-02 15:42:51] [Rank 0] step:2601/10000 train_time:186655ms step_avg:71.76ms +[2025-09-02 15:42:51] [Rank 0] step:2601/10000 train_time:186655ms step_avg:71.76ms +[2025-09-02 15:42:53] [Rank 0] step:2621/10000 train_time:188096ms step_avg:71.77ms +[2025-09-02 15:42:53] [Rank 0] step:2621/10000 train_time:188096ms step_avg:71.77ms +[2025-09-02 15:42:54] [Rank 0] step:2641/10000 train_time:189677ms step_avg:71.82ms +[2025-09-02 15:42:54] [Rank 0] step:2641/10000 train_time:189677ms step_avg:71.82ms +[2025-09-02 15:42:56] [Rank 0] step:2661/10000 train_time:191155ms step_avg:71.84ms +[2025-09-02 15:42:56] [Rank 0] step:2661/10000 train_time:191155ms step_avg:71.84ms +[2025-09-02 15:42:57] [Rank 0] step:2681/10000 train_time:192633ms step_avg:71.85ms +[2025-09-02 15:42:57] [Rank 0] step:2681/10000 train_time:192633ms step_avg:71.85ms +[2025-09-02 15:42:59] [Rank 0] step:2701/10000 train_time:194112ms step_avg:71.87ms +[2025-09-02 15:42:59] [Rank 0] step:2701/10000 train_time:194112ms step_avg:71.87ms +[2025-09-02 15:43:00] [Rank 0] step:2721/10000 train_time:195590ms step_avg:71.88ms +[2025-09-02 15:43:00] [Rank 0] step:2721/10000 train_time:195590ms step_avg:71.88ms +[2025-09-02 15:43:02] [Rank 0] step:2741/10000 train_time:197068ms step_avg:71.90ms +[2025-09-02 15:43:02] [Rank 0] step:2741/10000 train_time:197068ms step_avg:71.90ms +[2025-09-02 15:43:03] [Rank 0] step:2761/10000 train_time:198547ms step_avg:71.91ms +[2025-09-02 15:43:03] [Rank 0] step:2761/10000 train_time:198547ms step_avg:71.91ms +[2025-09-02 15:43:05] [Rank 0] step:2781/10000 train_time:200027ms step_avg:71.93ms +[2025-09-02 15:43:05] [Rank 0] step:2781/10000 train_time:200027ms step_avg:71.93ms +[2025-09-02 15:43:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:43:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:43:18] [Rank 0] PRINT: step:2800/10000 val_loss:4.4068 svd_entropy: attn_qk:H=0.7097,top10E=0.33,eRank=141.3,q75/q25=64.37 attn_vo:H=0.7481,top10E=0.20,eRank=216.8,q75/q25=inf mlp_w1:H=0.6655,top10E=0.42,eRank=109.9,q75/q25=7.78 mlp_w2:H=0.8080,top10E=0.19,eRank=219.8,q75/q25=15.54 vo_prod:H=0.6090,top10E=0.31,eRank=80.9,q75/q25=inf train_time:201654ms step_avg:72.02ms +[2025-09-02 15:43:18] [Rank 0] PRINT: step:2800/10000 val_loss:4.4068 svd_entropy: attn_qk:H=0.7097,top10E=0.33,eRank=141.3,q75/q25=64.37 attn_vo:H=0.7481,top10E=0.20,eRank=216.8,q75/q25=inf mlp_w1:H=0.6655,top10E=0.42,eRank=109.9,q75/q25=7.78 mlp_w2:H=0.8080,top10E=0.19,eRank=219.8,q75/q25=15.54 vo_prod:H=0.6090,top10E=0.31,eRank=80.9,q75/q25=inf train_time:201654ms step_avg:72.02ms +[2025-09-02 15:43:18] [Rank 0] step:2801/10000 train_time:201666ms step_avg:72.00ms +[2025-09-02 15:43:18] [Rank 0] step:2801/10000 train_time:201666ms step_avg:72.00ms +[2025-09-02 15:43:19] [Rank 0] step:2821/10000 train_time:203016ms step_avg:71.97ms +[2025-09-02 15:43:19] [Rank 0] step:2821/10000 train_time:203016ms step_avg:71.97ms +[2025-09-02 15:43:21] [Rank 0] step:2841/10000 train_time:204494ms step_avg:71.98ms +[2025-09-02 15:43:21] [Rank 0] step:2841/10000 train_time:204494ms step_avg:71.98ms +[2025-09-02 15:43:22] [Rank 0] step:2861/10000 train_time:205972ms step_avg:71.99ms +[2025-09-02 15:43:22] [Rank 0] step:2861/10000 train_time:205972ms step_avg:71.99ms +[2025-09-02 15:43:24] [Rank 0] step:2881/10000 train_time:207451ms step_avg:72.01ms +[2025-09-02 15:43:24] [Rank 0] step:2881/10000 train_time:207451ms step_avg:72.01ms +[2025-09-02 15:43:25] [Rank 0] step:2901/10000 train_time:208929ms step_avg:72.02ms +[2025-09-02 15:43:25] [Rank 0] step:2901/10000 train_time:208929ms step_avg:72.02ms +[2025-09-02 15:43:27] [Rank 0] step:2921/10000 train_time:210409ms step_avg:72.03ms +[2025-09-02 15:43:27] [Rank 0] step:2921/10000 train_time:210409ms step_avg:72.03ms +[2025-09-02 15:43:28] [Rank 0] step:2941/10000 train_time:211902ms step_avg:72.05ms +[2025-09-02 15:43:28] [Rank 0] step:2941/10000 train_time:211902ms step_avg:72.05ms +[2025-09-02 15:43:30] [Rank 0] step:2961/10000 train_time:213382ms step_avg:72.06ms +[2025-09-02 15:43:30] [Rank 0] step:2961/10000 train_time:213382ms step_avg:72.06ms +[2025-09-02 15:43:31] [Rank 0] step:2981/10000 train_time:214868ms step_avg:72.08ms +[2025-09-02 15:43:31] [Rank 0] step:2981/10000 train_time:214868ms step_avg:72.08ms +[2025-09-02 15:43:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:43:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:43:44] [Rank 0] PRINT: step:3000/10000 val_loss:4.3643 svd_entropy: attn_qk:H=0.7144,top10E=0.32,eRank=144.2,q75/q25=68.00 attn_vo:H=0.7528,top10E=0.19,eRank=221.6,q75/q25=inf mlp_w1:H=0.6746,top10E=0.41,eRank=115.2,q75/q25=8.07 mlp_w2:H=0.8133,top10E=0.18,eRank=227.6,q75/q25=15.53 vo_prod:H=0.6151,top10E=0.30,eRank=84.4,q75/q25=inf train_time:216504ms step_avg:72.17ms +[2025-09-02 15:43:44] [Rank 0] PRINT: step:3000/10000 val_loss:4.3643 svd_entropy: attn_qk:H=0.7144,top10E=0.32,eRank=144.2,q75/q25=68.00 attn_vo:H=0.7528,top10E=0.19,eRank=221.6,q75/q25=inf mlp_w1:H=0.6746,top10E=0.41,eRank=115.2,q75/q25=8.07 mlp_w2:H=0.8133,top10E=0.18,eRank=227.6,q75/q25=15.53 vo_prod:H=0.6151,top10E=0.30,eRank=84.4,q75/q25=inf train_time:216504ms step_avg:72.17ms +[2025-09-02 15:43:44] [Rank 0] step:3001/10000 train_time:216517ms step_avg:72.15ms +[2025-09-02 15:43:44] [Rank 0] step:3001/10000 train_time:216517ms step_avg:72.15ms +[2025-09-02 15:43:46] [Rank 0] step:3021/10000 train_time:217864ms step_avg:72.12ms +[2025-09-02 15:43:46] [Rank 0] step:3021/10000 train_time:217864ms step_avg:72.12ms +[2025-09-02 15:43:47] [Rank 0] step:3041/10000 train_time:219348ms step_avg:72.13ms +[2025-09-02 15:43:47] [Rank 0] step:3041/10000 train_time:219348ms step_avg:72.13ms +[2025-09-02 15:43:49] [Rank 0] step:3061/10000 train_time:220833ms step_avg:72.14ms +[2025-09-02 15:43:49] [Rank 0] step:3061/10000 train_time:220833ms step_avg:72.14ms +[2025-09-02 15:43:50] [Rank 0] step:3081/10000 train_time:222320ms step_avg:72.16ms +[2025-09-02 15:43:50] [Rank 0] step:3081/10000 train_time:222320ms step_avg:72.16ms +[2025-09-02 15:43:52] [Rank 0] step:3101/10000 train_time:223806ms step_avg:72.17ms +[2025-09-02 15:43:52] [Rank 0] step:3101/10000 train_time:223806ms step_avg:72.17ms +[2025-09-02 15:43:53] [Rank 0] step:3121/10000 train_time:225293ms step_avg:72.19ms +[2025-09-02 15:43:53] [Rank 0] step:3121/10000 train_time:225293ms step_avg:72.19ms +[2025-09-02 15:43:55] [Rank 0] step:3141/10000 train_time:226779ms step_avg:72.20ms +[2025-09-02 15:43:55] [Rank 0] step:3141/10000 train_time:226779ms step_avg:72.20ms +[2025-09-02 15:43:56] [Rank 0] step:3161/10000 train_time:228267ms step_avg:72.21ms +[2025-09-02 15:43:56] [Rank 0] step:3161/10000 train_time:228267ms step_avg:72.21ms +[2025-09-02 15:43:58] [Rank 0] step:3181/10000 train_time:229754ms step_avg:72.23ms +[2025-09-02 15:43:58] [Rank 0] step:3181/10000 train_time:229754ms step_avg:72.23ms +[2025-09-02 15:43:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:43:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:44:11] [Rank 0] PRINT: step:3200/10000 val_loss:4.3251 svd_entropy: attn_qk:H=0.7186,top10E=0.31,eRank=146.9,q75/q25=71.34 attn_vo:H=0.7570,top10E=0.19,eRank=226.0,q75/q25=inf mlp_w1:H=0.6831,top10E=0.40,eRank=120.3,q75/q25=8.35 mlp_w2:H=0.8181,top10E=0.18,eRank=235.0,q75/q25=15.64 vo_prod:H=0.6206,top10E=0.29,eRank=87.8,q75/q25=inf train_time:231393ms step_avg:72.31ms +[2025-09-02 15:44:11] [Rank 0] PRINT: step:3200/10000 val_loss:4.3251 svd_entropy: attn_qk:H=0.7186,top10E=0.31,eRank=146.9,q75/q25=71.34 attn_vo:H=0.7570,top10E=0.19,eRank=226.0,q75/q25=inf mlp_w1:H=0.6831,top10E=0.40,eRank=120.3,q75/q25=8.35 mlp_w2:H=0.8181,top10E=0.18,eRank=235.0,q75/q25=15.64 vo_prod:H=0.6206,top10E=0.29,eRank=87.8,q75/q25=inf train_time:231393ms step_avg:72.31ms +[2025-09-02 15:44:11] [Rank 0] step:3201/10000 train_time:231405ms step_avg:72.29ms +[2025-09-02 15:44:11] [Rank 0] step:3201/10000 train_time:231405ms step_avg:72.29ms +[2025-09-02 15:44:12] [Rank 0] step:3221/10000 train_time:232765ms step_avg:72.26ms +[2025-09-02 15:44:12] [Rank 0] step:3221/10000 train_time:232765ms step_avg:72.26ms +[2025-09-02 15:44:14] [Rank 0] step:3241/10000 train_time:234247ms step_avg:72.28ms +[2025-09-02 15:44:14] [Rank 0] step:3241/10000 train_time:234247ms step_avg:72.28ms +[2025-09-02 15:44:15] [Rank 0] step:3261/10000 train_time:235730ms step_avg:72.29ms +[2025-09-02 15:44:15] [Rank 0] step:3261/10000 train_time:235730ms step_avg:72.29ms +[2025-09-02 15:44:17] [Rank 0] step:3281/10000 train_time:237214ms step_avg:72.30ms +[2025-09-02 15:44:17] [Rank 0] step:3281/10000 train_time:237214ms step_avg:72.30ms +[2025-09-02 15:44:18] [Rank 0] step:3301/10000 train_time:238699ms step_avg:72.31ms +[2025-09-02 15:44:18] [Rank 0] step:3301/10000 train_time:238699ms step_avg:72.31ms +[2025-09-02 15:44:20] [Rank 0] step:3321/10000 train_time:240184ms step_avg:72.32ms +[2025-09-02 15:44:20] [Rank 0] step:3321/10000 train_time:240184ms step_avg:72.32ms +[2025-09-02 15:44:21] [Rank 0] step:3341/10000 train_time:241669ms step_avg:72.33ms +[2025-09-02 15:44:21] [Rank 0] step:3341/10000 train_time:241669ms step_avg:72.33ms +[2025-09-02 15:44:23] [Rank 0] step:3361/10000 train_time:243155ms step_avg:72.35ms +[2025-09-02 15:44:23] [Rank 0] step:3361/10000 train_time:243155ms step_avg:72.35ms +[2025-09-02 15:44:24] [Rank 0] step:3381/10000 train_time:244640ms step_avg:72.36ms +[2025-09-02 15:44:24] [Rank 0] step:3381/10000 train_time:244640ms step_avg:72.36ms +[2025-09-02 15:44:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:44:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:44:37] [Rank 0] PRINT: step:3400/10000 val_loss:4.2836 svd_entropy: attn_qk:H=0.7230,top10E=0.31,eRank=149.8,q75/q25=74.53 attn_vo:H=0.7611,top10E=0.18,eRank=230.4,q75/q25=inf mlp_w1:H=0.6909,top10E=0.39,eRank=125.5,q75/q25=8.66 mlp_w2:H=0.8223,top10E=0.17,eRank=241.8,q75/q25=15.78 vo_prod:H=0.6260,top10E=0.28,eRank=91.2,q75/q25=inf train_time:246277ms step_avg:72.43ms +[2025-09-02 15:44:37] [Rank 0] PRINT: step:3400/10000 val_loss:4.2836 svd_entropy: attn_qk:H=0.7230,top10E=0.31,eRank=149.8,q75/q25=74.53 attn_vo:H=0.7611,top10E=0.18,eRank=230.4,q75/q25=inf mlp_w1:H=0.6909,top10E=0.39,eRank=125.5,q75/q25=8.66 mlp_w2:H=0.8223,top10E=0.17,eRank=241.8,q75/q25=15.78 vo_prod:H=0.6260,top10E=0.28,eRank=91.2,q75/q25=inf train_time:246277ms step_avg:72.43ms +[2025-09-02 15:44:37] [Rank 0] step:3401/10000 train_time:246288ms step_avg:72.42ms +[2025-09-02 15:44:37] [Rank 0] step:3401/10000 train_time:246288ms step_avg:72.42ms +[2025-09-02 15:44:39] [Rank 0] step:3421/10000 train_time:247640ms step_avg:72.39ms +[2025-09-02 15:44:39] [Rank 0] step:3421/10000 train_time:247640ms step_avg:72.39ms +[2025-09-02 15:44:40] [Rank 0] step:3441/10000 train_time:249122ms step_avg:72.40ms +[2025-09-02 15:44:40] [Rank 0] step:3441/10000 train_time:249122ms step_avg:72.40ms +[2025-09-02 15:44:42] [Rank 0] step:3461/10000 train_time:250607ms step_avg:72.41ms +[2025-09-02 15:44:42] [Rank 0] step:3461/10000 train_time:250607ms step_avg:72.41ms +[2025-09-02 15:44:43] [Rank 0] step:3481/10000 train_time:252095ms step_avg:72.42ms +[2025-09-02 15:44:43] [Rank 0] step:3481/10000 train_time:252095ms step_avg:72.42ms +[2025-09-02 15:44:45] [Rank 0] step:3501/10000 train_time:253586ms step_avg:72.43ms +[2025-09-02 15:44:45] [Rank 0] step:3501/10000 train_time:253586ms step_avg:72.43ms +[2025-09-02 15:44:46] [Rank 0] step:3521/10000 train_time:255074ms step_avg:72.44ms +[2025-09-02 15:44:46] [Rank 0] step:3521/10000 train_time:255074ms step_avg:72.44ms +[2025-09-02 15:44:48] [Rank 0] step:3541/10000 train_time:256561ms step_avg:72.45ms +[2025-09-02 15:44:48] [Rank 0] step:3541/10000 train_time:256561ms step_avg:72.45ms +[2025-09-02 15:44:49] [Rank 0] step:3561/10000 train_time:258046ms step_avg:72.46ms +[2025-09-02 15:44:49] [Rank 0] step:3561/10000 train_time:258046ms step_avg:72.46ms +[2025-09-02 15:44:51] [Rank 0] step:3581/10000 train_time:259531ms step_avg:72.47ms +[2025-09-02 15:44:51] [Rank 0] step:3581/10000 train_time:259531ms step_avg:72.47ms +[2025-09-02 15:44:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:44:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:45:04] [Rank 0] PRINT: step:3600/10000 val_loss:4.2736 svd_entropy: attn_qk:H=0.7268,top10E=0.30,eRank=152.4,q75/q25=77.49 attn_vo:H=0.7646,top10E=0.18,eRank=234.3,q75/q25=inf mlp_w1:H=0.6979,top10E=0.38,eRank=130.2,q75/q25=8.95 mlp_w2:H=0.8257,top10E=0.17,eRank=247.5,q75/q25=15.96 vo_prod:H=0.6305,top10E=0.27,eRank=94.1,q75/q25=inf train_time:261168ms step_avg:72.55ms +[2025-09-02 15:45:04] [Rank 0] PRINT: step:3600/10000 val_loss:4.2736 svd_entropy: attn_qk:H=0.7268,top10E=0.30,eRank=152.4,q75/q25=77.49 attn_vo:H=0.7646,top10E=0.18,eRank=234.3,q75/q25=inf mlp_w1:H=0.6979,top10E=0.38,eRank=130.2,q75/q25=8.95 mlp_w2:H=0.8257,top10E=0.17,eRank=247.5,q75/q25=15.96 vo_prod:H=0.6305,top10E=0.27,eRank=94.1,q75/q25=inf train_time:261168ms step_avg:72.55ms +[2025-09-02 15:45:04] [Rank 0] step:3601/10000 train_time:261179ms step_avg:72.53ms +[2025-09-02 15:45:04] [Rank 0] step:3601/10000 train_time:261179ms step_avg:72.53ms +[2025-09-02 15:45:05] [Rank 0] step:3621/10000 train_time:262535ms step_avg:72.50ms +[2025-09-02 15:45:05] [Rank 0] step:3621/10000 train_time:262535ms step_avg:72.50ms +[2025-09-02 15:45:07] [Rank 0] step:3641/10000 train_time:264018ms step_avg:72.51ms +[2025-09-02 15:45:07] [Rank 0] step:3641/10000 train_time:264018ms step_avg:72.51ms +[2025-09-02 15:45:08] [Rank 0] step:3661/10000 train_time:265505ms step_avg:72.52ms +[2025-09-02 15:45:08] [Rank 0] step:3661/10000 train_time:265505ms step_avg:72.52ms +[2025-09-02 15:45:10] [Rank 0] step:3681/10000 train_time:266991ms step_avg:72.53ms +[2025-09-02 15:45:10] [Rank 0] step:3681/10000 train_time:266991ms step_avg:72.53ms +[2025-09-02 15:45:11] [Rank 0] step:3701/10000 train_time:268477ms step_avg:72.54ms +[2025-09-02 15:45:11] [Rank 0] step:3701/10000 train_time:268477ms step_avg:72.54ms +[2025-09-02 15:45:13] [Rank 0] step:3721/10000 train_time:269988ms step_avg:72.56ms +[2025-09-02 15:45:13] [Rank 0] step:3721/10000 train_time:269988ms step_avg:72.56ms +[2025-09-02 15:45:14] [Rank 0] step:3741/10000 train_time:271510ms step_avg:72.58ms +[2025-09-02 15:45:14] [Rank 0] step:3741/10000 train_time:271510ms step_avg:72.58ms +[2025-09-02 15:45:16] [Rank 0] step:3761/10000 train_time:273033ms step_avg:72.60ms +[2025-09-02 15:45:16] [Rank 0] step:3761/10000 train_time:273033ms step_avg:72.60ms +[2025-09-02 15:45:17] [Rank 0] step:3781/10000 train_time:274559ms step_avg:72.62ms +[2025-09-02 15:45:17] [Rank 0] step:3781/10000 train_time:274559ms step_avg:72.62ms +[2025-09-02 15:45:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:45:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:45:30] [Rank 0] PRINT: step:3800/10000 val_loss:4.2138 svd_entropy: attn_qk:H=0.7301,top10E=0.30,eRank=154.7,q75/q25=79.42 attn_vo:H=0.7679,top10E=0.17,eRank=238.0,q75/q25=inf mlp_w1:H=0.7048,top10E=0.37,eRank=135.2,q75/q25=9.31 mlp_w2:H=0.8287,top10E=0.16,eRank=252.6,q75/q25=16.20 vo_prod:H=0.6347,top10E=0.27,eRank=97.0,q75/q25=inf train_time:276235ms step_avg:72.69ms +[2025-09-02 15:45:30] [Rank 0] PRINT: step:3800/10000 val_loss:4.2138 svd_entropy: attn_qk:H=0.7301,top10E=0.30,eRank=154.7,q75/q25=79.42 attn_vo:H=0.7679,top10E=0.17,eRank=238.0,q75/q25=inf mlp_w1:H=0.7048,top10E=0.37,eRank=135.2,q75/q25=9.31 mlp_w2:H=0.8287,top10E=0.16,eRank=252.6,q75/q25=16.20 vo_prod:H=0.6347,top10E=0.27,eRank=97.0,q75/q25=inf train_time:276235ms step_avg:72.69ms +[2025-09-02 15:45:30] [Rank 0] step:3801/10000 train_time:276246ms step_avg:72.68ms +[2025-09-02 15:45:30] [Rank 0] step:3801/10000 train_time:276246ms step_avg:72.68ms +[2025-09-02 15:45:32] [Rank 0] step:3821/10000 train_time:277634ms step_avg:72.66ms +[2025-09-02 15:45:32] [Rank 0] step:3821/10000 train_time:277634ms step_avg:72.66ms +[2025-09-02 15:45:33] [Rank 0] step:3841/10000 train_time:279158ms step_avg:72.68ms +[2025-09-02 15:45:33] [Rank 0] step:3841/10000 train_time:279158ms step_avg:72.68ms +[2025-09-02 15:45:35] [Rank 0] step:3861/10000 train_time:280681ms step_avg:72.70ms +[2025-09-02 15:45:35] [Rank 0] step:3861/10000 train_time:280681ms step_avg:72.70ms +[2025-09-02 15:45:37] [Rank 0] step:3881/10000 train_time:282231ms step_avg:72.72ms +[2025-09-02 15:45:37] [Rank 0] step:3881/10000 train_time:282231ms step_avg:72.72ms +[2025-09-02 15:45:38] [Rank 0] step:3901/10000 train_time:283754ms step_avg:72.74ms +[2025-09-02 15:45:38] [Rank 0] step:3901/10000 train_time:283754ms step_avg:72.74ms +[2025-09-02 15:45:40] [Rank 0] step:3921/10000 train_time:285276ms step_avg:72.76ms +[2025-09-02 15:45:40] [Rank 0] step:3921/10000 train_time:285276ms step_avg:72.76ms +[2025-09-02 15:45:41] [Rank 0] step:3941/10000 train_time:286799ms step_avg:72.77ms +[2025-09-02 15:45:41] [Rank 0] step:3941/10000 train_time:286799ms step_avg:72.77ms +[2025-09-02 15:45:43] [Rank 0] step:3961/10000 train_time:288321ms step_avg:72.79ms +[2025-09-02 15:45:43] [Rank 0] step:3961/10000 train_time:288321ms step_avg:72.79ms +[2025-09-02 15:45:44] [Rank 0] step:3981/10000 train_time:289844ms step_avg:72.81ms +[2025-09-02 15:45:44] [Rank 0] step:3981/10000 train_time:289844ms step_avg:72.81ms +[2025-09-02 15:45:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:45:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:45:57] [Rank 0] PRINT: step:4000/10000 val_loss:4.1896 svd_entropy: attn_qk:H=0.7334,top10E=0.29,eRank=157.1,q75/q25=81.21 attn_vo:H=0.7709,top10E=0.17,eRank=241.4,q75/q25=inf mlp_w1:H=0.7116,top10E=0.36,eRank=140.2,q75/q25=9.57 mlp_w2:H=0.8318,top10E=0.16,eRank=257.9,q75/q25=16.17 vo_prod:H=0.6387,top10E=0.26,eRank=99.8,q75/q25=inf train_time:291517ms step_avg:72.88ms +[2025-09-02 15:45:57] [Rank 0] PRINT: step:4000/10000 val_loss:4.1896 svd_entropy: attn_qk:H=0.7334,top10E=0.29,eRank=157.1,q75/q25=81.21 attn_vo:H=0.7709,top10E=0.17,eRank=241.4,q75/q25=inf mlp_w1:H=0.7116,top10E=0.36,eRank=140.2,q75/q25=9.57 mlp_w2:H=0.8318,top10E=0.16,eRank=257.9,q75/q25=16.17 vo_prod:H=0.6387,top10E=0.26,eRank=99.8,q75/q25=inf train_time:291517ms step_avg:72.88ms +[2025-09-02 15:45:58] [Rank 0] step:4001/10000 train_time:291529ms step_avg:72.86ms +[2025-09-02 15:45:58] [Rank 0] step:4001/10000 train_time:291529ms step_avg:72.86ms +[2025-09-02 15:45:59] [Rank 0] step:4021/10000 train_time:292929ms step_avg:72.85ms +[2025-09-02 15:45:59] [Rank 0] step:4021/10000 train_time:292929ms step_avg:72.85ms +[2025-09-02 15:46:01] [Rank 0] step:4041/10000 train_time:294451ms step_avg:72.87ms +[2025-09-02 15:46:01] [Rank 0] step:4041/10000 train_time:294451ms step_avg:72.87ms +[2025-09-02 15:46:02] [Rank 0] step:4061/10000 train_time:295971ms step_avg:72.88ms +[2025-09-02 15:46:02] [Rank 0] step:4061/10000 train_time:295971ms step_avg:72.88ms +[2025-09-02 15:46:04] [Rank 0] step:4081/10000 train_time:297602ms step_avg:72.92ms +[2025-09-02 15:46:04] [Rank 0] step:4081/10000 train_time:297602ms step_avg:72.92ms +[2025-09-02 15:46:05] [Rank 0] step:4101/10000 train_time:299123ms step_avg:72.94ms +[2025-09-02 15:46:05] [Rank 0] step:4101/10000 train_time:299123ms step_avg:72.94ms +[2025-09-02 15:46:07] [Rank 0] step:4121/10000 train_time:300645ms step_avg:72.95ms +[2025-09-02 15:46:07] [Rank 0] step:4121/10000 train_time:300645ms step_avg:72.95ms +[2025-09-02 15:46:08] [Rank 0] step:4141/10000 train_time:302166ms step_avg:72.97ms +[2025-09-02 15:46:08] [Rank 0] step:4141/10000 train_time:302166ms step_avg:72.97ms +[2025-09-02 15:46:10] [Rank 0] step:4161/10000 train_time:303688ms step_avg:72.98ms +[2025-09-02 15:46:10] [Rank 0] step:4161/10000 train_time:303688ms step_avg:72.98ms +[2025-09-02 15:46:12] [Rank 0] step:4181/10000 train_time:305211ms step_avg:73.00ms +[2025-09-02 15:46:12] [Rank 0] step:4181/10000 train_time:305211ms step_avg:73.00ms +[2025-09-02 15:46:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:46:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:46:25] [Rank 0] PRINT: step:4200/10000 val_loss:4.1725 svd_entropy: attn_qk:H=0.7366,top10E=0.29,eRank=159.4,q75/q25=82.73 attn_vo:H=0.7737,top10E=0.16,eRank=244.7,q75/q25=inf mlp_w1:H=0.7173,top10E=0.36,eRank=144.8,q75/q25=9.88 mlp_w2:H=0.8343,top10E=0.16,eRank=262.4,q75/q25=16.36 vo_prod:H=0.6426,top10E=0.25,eRank=102.6,q75/q25=inf train_time:306885ms step_avg:73.07ms +[2025-09-02 15:46:25] [Rank 0] PRINT: step:4200/10000 val_loss:4.1725 svd_entropy: attn_qk:H=0.7366,top10E=0.29,eRank=159.4,q75/q25=82.73 attn_vo:H=0.7737,top10E=0.16,eRank=244.7,q75/q25=inf mlp_w1:H=0.7173,top10E=0.36,eRank=144.8,q75/q25=9.88 mlp_w2:H=0.8343,top10E=0.16,eRank=262.4,q75/q25=16.36 vo_prod:H=0.6426,top10E=0.25,eRank=102.6,q75/q25=inf train_time:306885ms step_avg:73.07ms +[2025-09-02 15:46:25] [Rank 0] step:4201/10000 train_time:306897ms step_avg:73.05ms +[2025-09-02 15:46:25] [Rank 0] step:4201/10000 train_time:306897ms step_avg:73.05ms +[2025-09-02 15:46:26] [Rank 0] step:4221/10000 train_time:308275ms step_avg:73.03ms +[2025-09-02 15:46:26] [Rank 0] step:4221/10000 train_time:308275ms step_avg:73.03ms +[2025-09-02 15:46:28] [Rank 0] step:4241/10000 train_time:309795ms step_avg:73.05ms +[2025-09-02 15:46:28] [Rank 0] step:4241/10000 train_time:309795ms step_avg:73.05ms +[2025-09-02 15:46:30] [Rank 0] step:4261/10000 train_time:311316ms step_avg:73.06ms +[2025-09-02 15:46:30] [Rank 0] step:4261/10000 train_time:311316ms step_avg:73.06ms +[2025-09-02 15:46:31] [Rank 0] step:4281/10000 train_time:312837ms step_avg:73.08ms +[2025-09-02 15:46:31] [Rank 0] step:4281/10000 train_time:312837ms step_avg:73.08ms +[2025-09-02 15:46:33] [Rank 0] step:4301/10000 train_time:314359ms step_avg:73.09ms +[2025-09-02 15:46:33] [Rank 0] step:4301/10000 train_time:314359ms step_avg:73.09ms +[2025-09-02 15:46:34] [Rank 0] step:4321/10000 train_time:315883ms step_avg:73.10ms +[2025-09-02 15:46:34] [Rank 0] step:4321/10000 train_time:315883ms step_avg:73.10ms +[2025-09-02 15:46:36] [Rank 0] step:4341/10000 train_time:317402ms step_avg:73.12ms +[2025-09-02 15:46:36] [Rank 0] step:4341/10000 train_time:317402ms step_avg:73.12ms +[2025-09-02 15:46:37] [Rank 0] step:4361/10000 train_time:318962ms step_avg:73.14ms +[2025-09-02 15:46:37] [Rank 0] step:4361/10000 train_time:318962ms step_avg:73.14ms +[2025-09-02 15:46:39] [Rank 0] step:4381/10000 train_time:320483ms step_avg:73.15ms +[2025-09-02 15:46:39] [Rank 0] step:4381/10000 train_time:320483ms step_avg:73.15ms +[2025-09-02 15:46:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:46:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:46:52] [Rank 0] PRINT: step:4400/10000 val_loss:4.1455 svd_entropy: attn_qk:H=0.7396,top10E=0.28,eRank=161.7,q75/q25=83.39 attn_vo:H=0.7762,top10E=0.16,eRank=247.7,q75/q25=inf mlp_w1:H=0.7230,top10E=0.35,eRank=149.4,q75/q25=10.15 mlp_w2:H=0.8365,top10E=0.15,eRank=266.4,q75/q25=16.59 vo_prod:H=0.6460,top10E=0.25,eRank=105.2,q75/q25=inf train_time:322159ms step_avg:73.22ms +[2025-09-02 15:46:52] [Rank 0] PRINT: step:4400/10000 val_loss:4.1455 svd_entropy: attn_qk:H=0.7396,top10E=0.28,eRank=161.7,q75/q25=83.39 attn_vo:H=0.7762,top10E=0.16,eRank=247.7,q75/q25=inf mlp_w1:H=0.7230,top10E=0.35,eRank=149.4,q75/q25=10.15 mlp_w2:H=0.8365,top10E=0.15,eRank=266.4,q75/q25=16.59 vo_prod:H=0.6460,top10E=0.25,eRank=105.2,q75/q25=inf train_time:322159ms step_avg:73.22ms +[2025-09-02 15:46:52] [Rank 0] step:4401/10000 train_time:322170ms step_avg:73.20ms +[2025-09-02 15:46:52] [Rank 0] step:4401/10000 train_time:322170ms step_avg:73.20ms +[2025-09-02 15:46:54] [Rank 0] step:4421/10000 train_time:323560ms step_avg:73.19ms +[2025-09-02 15:46:54] [Rank 0] step:4421/10000 train_time:323560ms step_avg:73.19ms +[2025-09-02 15:46:55] [Rank 0] step:4441/10000 train_time:325079ms step_avg:73.20ms +[2025-09-02 15:46:55] [Rank 0] step:4441/10000 train_time:325079ms step_avg:73.20ms +[2025-09-02 15:46:57] [Rank 0] step:4461/10000 train_time:326607ms step_avg:73.21ms +[2025-09-02 15:46:57] [Rank 0] step:4461/10000 train_time:326607ms step_avg:73.21ms +[2025-09-02 15:46:58] [Rank 0] step:4481/10000 train_time:328135ms step_avg:73.23ms +[2025-09-02 15:46:58] [Rank 0] step:4481/10000 train_time:328135ms step_avg:73.23ms +[2025-09-02 15:47:00] [Rank 0] step:4501/10000 train_time:329664ms step_avg:73.24ms +[2025-09-02 15:47:00] [Rank 0] step:4501/10000 train_time:329664ms step_avg:73.24ms +[2025-09-02 15:47:01] [Rank 0] step:4521/10000 train_time:331191ms step_avg:73.26ms +[2025-09-02 15:47:01] [Rank 0] step:4521/10000 train_time:331191ms step_avg:73.26ms +[2025-09-02 15:47:03] [Rank 0] step:4541/10000 train_time:332722ms step_avg:73.27ms +[2025-09-02 15:47:03] [Rank 0] step:4541/10000 train_time:332722ms step_avg:73.27ms +[2025-09-02 15:47:04] [Rank 0] step:4561/10000 train_time:334252ms step_avg:73.28ms +[2025-09-02 15:47:04] [Rank 0] step:4561/10000 train_time:334252ms step_avg:73.28ms +[2025-09-02 15:47:06] [Rank 0] step:4581/10000 train_time:335782ms step_avg:73.30ms +[2025-09-02 15:47:06] [Rank 0] step:4581/10000 train_time:335782ms step_avg:73.30ms +[2025-09-02 15:47:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:47:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:47:19] [Rank 0] PRINT: step:4600/10000 val_loss:4.1165 svd_entropy: attn_qk:H=0.7425,top10E=0.28,eRank=164.0,q75/q25=84.49 attn_vo:H=0.7787,top10E=0.16,eRank=250.7,q75/q25=inf mlp_w1:H=0.7285,top10E=0.34,eRank=154.1,q75/q25=10.50 mlp_w2:H=0.8385,top10E=0.15,eRank=270.1,q75/q25=16.89 vo_prod:H=0.6494,top10E=0.24,eRank=107.8,q75/q25=inf train_time:337466ms step_avg:73.36ms +[2025-09-02 15:47:19] [Rank 0] PRINT: step:4600/10000 val_loss:4.1165 svd_entropy: attn_qk:H=0.7425,top10E=0.28,eRank=164.0,q75/q25=84.49 attn_vo:H=0.7787,top10E=0.16,eRank=250.7,q75/q25=inf mlp_w1:H=0.7285,top10E=0.34,eRank=154.1,q75/q25=10.50 mlp_w2:H=0.8385,top10E=0.15,eRank=270.1,q75/q25=16.89 vo_prod:H=0.6494,top10E=0.24,eRank=107.8,q75/q25=inf train_time:337466ms step_avg:73.36ms +[2025-09-02 15:47:19] [Rank 0] step:4601/10000 train_time:337478ms step_avg:73.35ms +[2025-09-02 15:47:19] [Rank 0] step:4601/10000 train_time:337478ms step_avg:73.35ms +[2025-09-02 15:47:21] [Rank 0] step:4621/10000 train_time:338889ms step_avg:73.34ms +[2025-09-02 15:47:21] [Rank 0] step:4621/10000 train_time:338889ms step_avg:73.34ms +[2025-09-02 15:47:22] [Rank 0] step:4641/10000 train_time:340417ms step_avg:73.35ms +[2025-09-02 15:47:22] [Rank 0] step:4641/10000 train_time:340417ms step_avg:73.35ms +[2025-09-02 15:47:24] [Rank 0] step:4661/10000 train_time:341946ms step_avg:73.36ms +[2025-09-02 15:47:24] [Rank 0] step:4661/10000 train_time:341946ms step_avg:73.36ms +[2025-09-02 15:47:25] [Rank 0] step:4681/10000 train_time:343475ms step_avg:73.38ms +[2025-09-02 15:47:25] [Rank 0] step:4681/10000 train_time:343475ms step_avg:73.38ms +[2025-09-02 15:47:27] [Rank 0] step:4701/10000 train_time:345006ms step_avg:73.39ms +[2025-09-02 15:47:27] [Rank 0] step:4701/10000 train_time:345006ms step_avg:73.39ms +[2025-09-02 15:47:29] [Rank 0] step:4721/10000 train_time:346534ms step_avg:73.40ms +[2025-09-02 15:47:29] [Rank 0] step:4721/10000 train_time:346534ms step_avg:73.40ms +[2025-09-02 15:47:30] [Rank 0] step:4741/10000 train_time:348065ms step_avg:73.42ms +[2025-09-02 15:47:30] [Rank 0] step:4741/10000 train_time:348065ms step_avg:73.42ms +[2025-09-02 15:47:32] [Rank 0] step:4761/10000 train_time:349595ms step_avg:73.43ms +[2025-09-02 15:47:32] [Rank 0] step:4761/10000 train_time:349595ms step_avg:73.43ms +[2025-09-02 15:47:33] [Rank 0] step:4781/10000 train_time:351125ms step_avg:73.44ms +[2025-09-02 15:47:33] [Rank 0] step:4781/10000 train_time:351125ms step_avg:73.44ms +[2025-09-02 15:47:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:47:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:47:46] [Rank 0] PRINT: step:4800/10000 val_loss:4.1021 svd_entropy: attn_qk:H=0.7453,top10E=0.28,eRank=166.1,q75/q25=86.50 attn_vo:H=0.7810,top10E=0.15,eRank=253.7,q75/q25=inf mlp_w1:H=0.7335,top10E=0.34,eRank=158.5,q75/q25=10.78 mlp_w2:H=0.8403,top10E=0.15,eRank=273.6,q75/q25=17.24 vo_prod:H=0.6525,top10E=0.24,eRank=110.2,q75/q25=inf train_time:352811ms step_avg:73.50ms +[2025-09-02 15:47:46] [Rank 0] PRINT: step:4800/10000 val_loss:4.1021 svd_entropy: attn_qk:H=0.7453,top10E=0.28,eRank=166.1,q75/q25=86.50 attn_vo:H=0.7810,top10E=0.15,eRank=253.7,q75/q25=inf mlp_w1:H=0.7335,top10E=0.34,eRank=158.5,q75/q25=10.78 mlp_w2:H=0.8403,top10E=0.15,eRank=273.6,q75/q25=17.24 vo_prod:H=0.6525,top10E=0.24,eRank=110.2,q75/q25=inf train_time:352811ms step_avg:73.50ms +[2025-09-02 15:47:47] [Rank 0] step:4801/10000 train_time:352823ms step_avg:73.49ms +[2025-09-02 15:47:47] [Rank 0] step:4801/10000 train_time:352823ms step_avg:73.49ms +[2025-09-02 15:47:48] [Rank 0] step:4821/10000 train_time:354202ms step_avg:73.47ms +[2025-09-02 15:47:48] [Rank 0] step:4821/10000 train_time:354202ms step_avg:73.47ms +[2025-09-02 15:47:50] [Rank 0] step:4841/10000 train_time:355729ms step_avg:73.48ms +[2025-09-02 15:47:50] [Rank 0] step:4841/10000 train_time:355729ms step_avg:73.48ms +[2025-09-02 15:47:51] [Rank 0] step:4861/10000 train_time:357260ms step_avg:73.50ms +[2025-09-02 15:47:51] [Rank 0] step:4861/10000 train_time:357260ms step_avg:73.50ms +[2025-09-02 15:47:53] [Rank 0] step:4881/10000 train_time:358789ms step_avg:73.51ms +[2025-09-02 15:47:53] [Rank 0] step:4881/10000 train_time:358789ms step_avg:73.51ms +[2025-09-02 15:47:54] [Rank 0] step:4901/10000 train_time:360314ms step_avg:73.52ms +[2025-09-02 15:47:54] [Rank 0] step:4901/10000 train_time:360314ms step_avg:73.52ms +[2025-09-02 15:47:56] [Rank 0] step:4921/10000 train_time:361848ms step_avg:73.53ms +[2025-09-02 15:47:56] [Rank 0] step:4921/10000 train_time:361848ms step_avg:73.53ms +[2025-09-02 15:47:57] [Rank 0] step:4941/10000 train_time:363381ms step_avg:73.54ms +[2025-09-02 15:47:57] [Rank 0] step:4941/10000 train_time:363381ms step_avg:73.54ms +[2025-09-02 15:47:59] [Rank 0] step:4961/10000 train_time:364911ms step_avg:73.56ms +[2025-09-02 15:47:59] [Rank 0] step:4961/10000 train_time:364911ms step_avg:73.56ms +[2025-09-02 15:48:00] [Rank 0] step:4981/10000 train_time:366446ms step_avg:73.57ms +[2025-09-02 15:48:00] [Rank 0] step:4981/10000 train_time:366446ms step_avg:73.57ms +[2025-09-02 15:48:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:48:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:48:13] [Rank 0] PRINT: step:5000/10000 val_loss:4.0796 svd_entropy: attn_qk:H=0.7479,top10E=0.27,eRank=168.3,q75/q25=87.05 attn_vo:H=0.7832,top10E=0.15,eRank=256.4,q75/q25=inf mlp_w1:H=0.7383,top10E=0.33,eRank=162.8,q75/q25=11.15 mlp_w2:H=0.8420,top10E=0.15,eRank=276.7,q75/q25=17.39 vo_prod:H=0.6553,top10E=0.24,eRank=112.4,q75/q25=inf train_time:368129ms step_avg:73.63ms +[2025-09-02 15:48:13] [Rank 0] PRINT: step:5000/10000 val_loss:4.0796 svd_entropy: attn_qk:H=0.7479,top10E=0.27,eRank=168.3,q75/q25=87.05 attn_vo:H=0.7832,top10E=0.15,eRank=256.4,q75/q25=inf mlp_w1:H=0.7383,top10E=0.33,eRank=162.8,q75/q25=11.15 mlp_w2:H=0.8420,top10E=0.15,eRank=276.7,q75/q25=17.39 vo_prod:H=0.6553,top10E=0.24,eRank=112.4,q75/q25=inf train_time:368129ms step_avg:73.63ms +[2025-09-02 15:48:14] [Rank 0] step:5001/10000 train_time:368140ms step_avg:73.61ms +[2025-09-02 15:48:14] [Rank 0] step:5001/10000 train_time:368140ms step_avg:73.61ms +[2025-09-02 15:48:15] [Rank 0] step:5021/10000 train_time:369522ms step_avg:73.60ms +[2025-09-02 15:48:15] [Rank 0] step:5021/10000 train_time:369522ms step_avg:73.60ms +[2025-09-02 15:48:17] [Rank 0] step:5041/10000 train_time:371051ms step_avg:73.61ms +[2025-09-02 15:48:17] [Rank 0] step:5041/10000 train_time:371051ms step_avg:73.61ms +[2025-09-02 15:48:18] [Rank 0] step:5061/10000 train_time:372577ms step_avg:73.62ms +[2025-09-02 15:48:18] [Rank 0] step:5061/10000 train_time:372577ms step_avg:73.62ms +[2025-09-02 15:48:20] [Rank 0] step:5081/10000 train_time:374106ms step_avg:73.63ms +[2025-09-02 15:48:20] [Rank 0] step:5081/10000 train_time:374106ms step_avg:73.63ms +[2025-09-02 15:48:21] [Rank 0] step:5101/10000 train_time:375634ms step_avg:73.64ms +[2025-09-02 15:48:21] [Rank 0] step:5101/10000 train_time:375634ms step_avg:73.64ms +[2025-09-02 15:48:23] [Rank 0] step:5121/10000 train_time:377162ms step_avg:73.65ms +[2025-09-02 15:48:23] [Rank 0] step:5121/10000 train_time:377162ms step_avg:73.65ms +[2025-09-02 15:48:24] [Rank 0] step:5141/10000 train_time:378693ms step_avg:73.66ms +[2025-09-02 15:48:24] [Rank 0] step:5141/10000 train_time:378693ms step_avg:73.66ms +[2025-09-02 15:48:26] [Rank 0] step:5161/10000 train_time:380222ms step_avg:73.67ms +[2025-09-02 15:48:26] [Rank 0] step:5161/10000 train_time:380222ms step_avg:73.67ms +[2025-09-02 15:48:27] [Rank 0] step:5181/10000 train_time:381755ms step_avg:73.68ms +[2025-09-02 15:48:27] [Rank 0] step:5181/10000 train_time:381755ms step_avg:73.68ms +[2025-09-02 15:48:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:48:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:48:40] [Rank 0] PRINT: step:5200/10000 val_loss:4.0582 svd_entropy: attn_qk:H=0.7503,top10E=0.27,eRank=170.3,q75/q25=87.53 attn_vo:H=0.7852,top10E=0.15,eRank=258.9,q75/q25=inf mlp_w1:H=0.7431,top10E=0.32,eRank=167.3,q75/q25=11.50 mlp_w2:H=0.8435,top10E=0.15,eRank=279.6,q75/q25=17.63 vo_prod:H=0.6580,top10E=0.23,eRank=114.6,q75/q25=inf train_time:383465ms step_avg:73.74ms +[2025-09-02 15:48:40] [Rank 0] PRINT: step:5200/10000 val_loss:4.0582 svd_entropy: attn_qk:H=0.7503,top10E=0.27,eRank=170.3,q75/q25=87.53 attn_vo:H=0.7852,top10E=0.15,eRank=258.9,q75/q25=inf mlp_w1:H=0.7431,top10E=0.32,eRank=167.3,q75/q25=11.50 mlp_w2:H=0.8435,top10E=0.15,eRank=279.6,q75/q25=17.63 vo_prod:H=0.6580,top10E=0.23,eRank=114.6,q75/q25=inf train_time:383465ms step_avg:73.74ms +[2025-09-02 15:48:41] [Rank 0] step:5201/10000 train_time:383477ms step_avg:73.73ms +[2025-09-02 15:48:41] [Rank 0] step:5201/10000 train_time:383477ms step_avg:73.73ms +[2025-09-02 15:48:42] [Rank 0] step:5221/10000 train_time:384900ms step_avg:73.72ms +[2025-09-02 15:48:42] [Rank 0] step:5221/10000 train_time:384900ms step_avg:73.72ms +[2025-09-02 15:48:44] [Rank 0] step:5241/10000 train_time:386468ms step_avg:73.74ms +[2025-09-02 15:48:44] [Rank 0] step:5241/10000 train_time:386468ms step_avg:73.74ms +[2025-09-02 15:48:45] [Rank 0] step:5261/10000 train_time:388028ms step_avg:73.76ms +[2025-09-02 15:48:45] [Rank 0] step:5261/10000 train_time:388028ms step_avg:73.76ms +[2025-09-02 15:48:47] [Rank 0] step:5281/10000 train_time:389591ms step_avg:73.77ms +[2025-09-02 15:48:47] [Rank 0] step:5281/10000 train_time:389591ms step_avg:73.77ms +[2025-09-02 15:48:48] [Rank 0] step:5301/10000 train_time:391161ms step_avg:73.79ms +[2025-09-02 15:48:48] [Rank 0] step:5301/10000 train_time:391161ms step_avg:73.79ms +[2025-09-02 15:48:50] [Rank 0] step:5321/10000 train_time:392719ms step_avg:73.81ms +[2025-09-02 15:48:50] [Rank 0] step:5321/10000 train_time:392719ms step_avg:73.81ms +[2025-09-02 15:48:51] [Rank 0] step:5341/10000 train_time:394278ms step_avg:73.82ms +[2025-09-02 15:48:51] [Rank 0] step:5341/10000 train_time:394278ms step_avg:73.82ms +[2025-09-02 15:48:53] [Rank 0] step:5361/10000 train_time:395843ms step_avg:73.84ms +[2025-09-02 15:48:53] [Rank 0] step:5361/10000 train_time:395843ms step_avg:73.84ms +[2025-09-02 15:48:55] [Rank 0] step:5381/10000 train_time:397407ms step_avg:73.85ms +[2025-09-02 15:48:55] [Rank 0] step:5381/10000 train_time:397407ms step_avg:73.85ms +[2025-09-02 15:48:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:48:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:49:08] [Rank 0] PRINT: step:5400/10000 val_loss:4.0434 svd_entropy: attn_qk:H=0.7526,top10E=0.27,eRank=172.2,q75/q25=87.48 attn_vo:H=0.7870,top10E=0.15,eRank=261.3,q75/q25=inf mlp_w1:H=0.7474,top10E=0.32,eRank=171.6,q75/q25=11.78 mlp_w2:H=0.8448,top10E=0.14,eRank=282.2,q75/q25=18.01 vo_prod:H=0.6605,top10E=0.23,eRank=116.7,q75/q25=inf train_time:399126ms step_avg:73.91ms +[2025-09-02 15:49:08] [Rank 0] PRINT: step:5400/10000 val_loss:4.0434 svd_entropy: attn_qk:H=0.7526,top10E=0.27,eRank=172.2,q75/q25=87.48 attn_vo:H=0.7870,top10E=0.15,eRank=261.3,q75/q25=inf mlp_w1:H=0.7474,top10E=0.32,eRank=171.6,q75/q25=11.78 mlp_w2:H=0.8448,top10E=0.14,eRank=282.2,q75/q25=18.01 vo_prod:H=0.6605,top10E=0.23,eRank=116.7,q75/q25=inf train_time:399126ms step_avg:73.91ms +[2025-09-02 15:49:08] [Rank 0] step:5401/10000 train_time:399137ms step_avg:73.90ms +[2025-09-02 15:49:08] [Rank 0] step:5401/10000 train_time:399137ms step_avg:73.90ms +[2025-09-02 15:49:09] [Rank 0] step:5421/10000 train_time:400545ms step_avg:73.89ms +[2025-09-02 15:49:09] [Rank 0] step:5421/10000 train_time:400545ms step_avg:73.89ms +[2025-09-02 15:49:11] [Rank 0] step:5441/10000 train_time:402102ms step_avg:73.90ms +[2025-09-02 15:49:11] [Rank 0] step:5441/10000 train_time:402102ms step_avg:73.90ms +[2025-09-02 15:49:13] [Rank 0] step:5461/10000 train_time:403666ms step_avg:73.92ms +[2025-09-02 15:49:13] [Rank 0] step:5461/10000 train_time:403666ms step_avg:73.92ms +[2025-09-02 15:49:14] [Rank 0] step:5481/10000 train_time:405231ms step_avg:73.93ms +[2025-09-02 15:49:14] [Rank 0] step:5481/10000 train_time:405231ms step_avg:73.93ms +[2025-09-02 15:49:16] [Rank 0] step:5501/10000 train_time:406798ms step_avg:73.95ms +[2025-09-02 15:49:16] [Rank 0] step:5501/10000 train_time:406798ms step_avg:73.95ms +[2025-09-02 15:49:17] [Rank 0] step:5521/10000 train_time:408365ms step_avg:73.97ms +[2025-09-02 15:49:17] [Rank 0] step:5521/10000 train_time:408365ms step_avg:73.97ms +[2025-09-02 15:49:19] [Rank 0] step:5541/10000 train_time:409926ms step_avg:73.98ms +[2025-09-02 15:49:19] [Rank 0] step:5541/10000 train_time:409926ms step_avg:73.98ms +[2025-09-02 15:49:20] [Rank 0] step:5561/10000 train_time:411487ms step_avg:74.00ms +[2025-09-02 15:49:20] [Rank 0] step:5561/10000 train_time:411487ms step_avg:74.00ms +[2025-09-02 15:49:22] [Rank 0] step:5581/10000 train_time:413053ms step_avg:74.01ms +[2025-09-02 15:49:22] [Rank 0] step:5581/10000 train_time:413053ms step_avg:74.01ms +[2025-09-02 15:49:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:49:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:49:35] [Rank 0] PRINT: step:5600/10000 val_loss:4.0277 svd_entropy: attn_qk:H=0.7548,top10E=0.26,eRank=174.0,q75/q25=87.64 attn_vo:H=0.7888,top10E=0.15,eRank=263.5,q75/q25=inf mlp_w1:H=0.7515,top10E=0.31,eRank=175.7,q75/q25=12.08 mlp_w2:H=0.8461,top10E=0.14,eRank=284.7,q75/q25=18.14 vo_prod:H=0.6629,top10E=0.22,eRank=118.7,q75/q25=inf train_time:414773ms step_avg:74.07ms +[2025-09-02 15:49:35] [Rank 0] PRINT: step:5600/10000 val_loss:4.0277 svd_entropy: attn_qk:H=0.7548,top10E=0.26,eRank=174.0,q75/q25=87.64 attn_vo:H=0.7888,top10E=0.15,eRank=263.5,q75/q25=inf mlp_w1:H=0.7515,top10E=0.31,eRank=175.7,q75/q25=12.08 mlp_w2:H=0.8461,top10E=0.14,eRank=284.7,q75/q25=18.14 vo_prod:H=0.6629,top10E=0.22,eRank=118.7,q75/q25=inf train_time:414773ms step_avg:74.07ms +[2025-09-02 15:49:35] [Rank 0] step:5601/10000 train_time:414784ms step_avg:74.06ms +[2025-09-02 15:49:35] [Rank 0] step:5601/10000 train_time:414784ms step_avg:74.06ms +[2025-09-02 15:49:37] [Rank 0] step:5621/10000 train_time:416205ms step_avg:74.04ms +[2025-09-02 15:49:37] [Rank 0] step:5621/10000 train_time:416205ms step_avg:74.04ms +[2025-09-02 15:49:38] [Rank 0] step:5641/10000 train_time:417766ms step_avg:74.06ms +[2025-09-02 15:49:38] [Rank 0] step:5641/10000 train_time:417766ms step_avg:74.06ms +[2025-09-02 15:49:40] [Rank 0] step:5661/10000 train_time:419323ms step_avg:74.07ms +[2025-09-02 15:49:40] [Rank 0] step:5661/10000 train_time:419323ms step_avg:74.07ms +[2025-09-02 15:49:41] [Rank 0] step:5681/10000 train_time:420889ms step_avg:74.09ms +[2025-09-02 15:49:41] [Rank 0] step:5681/10000 train_time:420889ms step_avg:74.09ms +[2025-09-02 15:49:43] [Rank 0] step:5701/10000 train_time:422450ms step_avg:74.10ms +[2025-09-02 15:49:43] [Rank 0] step:5701/10000 train_time:422450ms step_avg:74.10ms +[2025-09-02 15:49:45] [Rank 0] step:5721/10000 train_time:424017ms step_avg:74.12ms +[2025-09-02 15:49:45] [Rank 0] step:5721/10000 train_time:424017ms step_avg:74.12ms +[2025-09-02 15:49:46] [Rank 0] step:5741/10000 train_time:425580ms step_avg:74.13ms +[2025-09-02 15:49:46] [Rank 0] step:5741/10000 train_time:425580ms step_avg:74.13ms +[2025-09-02 15:49:48] [Rank 0] step:5761/10000 train_time:427163ms step_avg:74.15ms +[2025-09-02 15:49:48] [Rank 0] step:5761/10000 train_time:427163ms step_avg:74.15ms +[2025-09-02 15:49:49] [Rank 0] step:5781/10000 train_time:428732ms step_avg:74.16ms +[2025-09-02 15:49:49] [Rank 0] step:5781/10000 train_time:428732ms step_avg:74.16ms +[2025-09-02 15:49:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:49:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:50:02] [Rank 0] PRINT: step:5800/10000 val_loss:4.0192 svd_entropy: attn_qk:H=0.7569,top10E=0.26,eRank=175.9,q75/q25=88.22 attn_vo:H=0.7904,top10E=0.14,eRank=265.8,q75/q25=inf mlp_w1:H=0.7553,top10E=0.31,eRank=179.5,q75/q25=12.42 mlp_w2:H=0.8472,top10E=0.14,eRank=286.9,q75/q25=18.50 vo_prod:H=0.6651,top10E=0.22,eRank=120.6,q75/q25=inf train_time:430452ms step_avg:74.22ms +[2025-09-02 15:50:02] [Rank 0] PRINT: step:5800/10000 val_loss:4.0192 svd_entropy: attn_qk:H=0.7569,top10E=0.26,eRank=175.9,q75/q25=88.22 attn_vo:H=0.7904,top10E=0.14,eRank=265.8,q75/q25=inf mlp_w1:H=0.7553,top10E=0.31,eRank=179.5,q75/q25=12.42 mlp_w2:H=0.8472,top10E=0.14,eRank=286.9,q75/q25=18.50 vo_prod:H=0.6651,top10E=0.22,eRank=120.6,q75/q25=inf train_time:430452ms step_avg:74.22ms +[2025-09-02 15:50:03] [Rank 0] step:5801/10000 train_time:430463ms step_avg:74.20ms +[2025-09-02 15:50:03] [Rank 0] step:5801/10000 train_time:430463ms step_avg:74.20ms +[2025-09-02 15:50:04] [Rank 0] step:5821/10000 train_time:431871ms step_avg:74.19ms +[2025-09-02 15:50:04] [Rank 0] step:5821/10000 train_time:431871ms step_avg:74.19ms +[2025-09-02 15:50:06] [Rank 0] step:5841/10000 train_time:433430ms step_avg:74.20ms +[2025-09-02 15:50:06] [Rank 0] step:5841/10000 train_time:433430ms step_avg:74.20ms +[2025-09-02 15:50:07] [Rank 0] step:5861/10000 train_time:434991ms step_avg:74.22ms +[2025-09-02 15:50:07] [Rank 0] step:5861/10000 train_time:434991ms step_avg:74.22ms +[2025-09-02 15:50:09] [Rank 0] step:5881/10000 train_time:436554ms step_avg:74.23ms +[2025-09-02 15:50:09] [Rank 0] step:5881/10000 train_time:436554ms step_avg:74.23ms +[2025-09-02 15:50:10] [Rank 0] step:5901/10000 train_time:438116ms step_avg:74.24ms +[2025-09-02 15:50:10] [Rank 0] step:5901/10000 train_time:438116ms step_avg:74.24ms +[2025-09-02 15:50:12] [Rank 0] step:5921/10000 train_time:439679ms step_avg:74.26ms +[2025-09-02 15:50:12] [Rank 0] step:5921/10000 train_time:439679ms step_avg:74.26ms +[2025-09-02 15:50:13] [Rank 0] step:5941/10000 train_time:441245ms step_avg:74.27ms +[2025-09-02 15:50:13] [Rank 0] step:5941/10000 train_time:441245ms step_avg:74.27ms +[2025-09-02 15:50:15] [Rank 0] step:5961/10000 train_time:442815ms step_avg:74.29ms +[2025-09-02 15:50:15] [Rank 0] step:5961/10000 train_time:442815ms step_avg:74.29ms +[2025-09-02 15:50:17] [Rank 0] step:5981/10000 train_time:444381ms step_avg:74.30ms +[2025-09-02 15:50:17] [Rank 0] step:5981/10000 train_time:444381ms step_avg:74.30ms +[2025-09-02 15:50:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:50:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:50:30] [Rank 0] PRINT: step:6000/10000 val_loss:3.9952 svd_entropy: attn_qk:H=0.7590,top10E=0.26,eRank=177.8,q75/q25=88.24 attn_vo:H=0.7920,top10E=0.14,eRank=267.9,q75/q25=inf mlp_w1:H=0.7590,top10E=0.30,eRank=183.5,q75/q25=12.74 mlp_w2:H=0.8483,top10E=0.14,eRank=289.0,q75/q25=18.66 vo_prod:H=0.6672,top10E=0.22,eRank=122.4,q75/q25=inf train_time:446097ms step_avg:74.35ms +[2025-09-02 15:50:30] [Rank 0] PRINT: step:6000/10000 val_loss:3.9952 svd_entropy: attn_qk:H=0.7590,top10E=0.26,eRank=177.8,q75/q25=88.24 attn_vo:H=0.7920,top10E=0.14,eRank=267.9,q75/q25=inf mlp_w1:H=0.7590,top10E=0.30,eRank=183.5,q75/q25=12.74 mlp_w2:H=0.8483,top10E=0.14,eRank=289.0,q75/q25=18.66 vo_prod:H=0.6672,top10E=0.22,eRank=122.4,q75/q25=inf train_time:446097ms step_avg:74.35ms +[2025-09-02 15:50:30] [Rank 0] step:6001/10000 train_time:446109ms step_avg:74.34ms +[2025-09-02 15:50:30] [Rank 0] step:6001/10000 train_time:446109ms step_avg:74.34ms +[2025-09-02 15:50:31] [Rank 0] step:6021/10000 train_time:447519ms step_avg:74.33ms +[2025-09-02 15:50:31] [Rank 0] step:6021/10000 train_time:447519ms step_avg:74.33ms +[2025-09-02 15:50:33] [Rank 0] step:6041/10000 train_time:449083ms step_avg:74.34ms +[2025-09-02 15:50:33] [Rank 0] step:6041/10000 train_time:449083ms step_avg:74.34ms +[2025-09-02 15:50:34] [Rank 0] step:6061/10000 train_time:450652ms step_avg:74.35ms +[2025-09-02 15:50:34] [Rank 0] step:6061/10000 train_time:450652ms step_avg:74.35ms +[2025-09-02 15:50:36] [Rank 0] step:6081/10000 train_time:452217ms step_avg:74.37ms +[2025-09-02 15:50:36] [Rank 0] step:6081/10000 train_time:452217ms step_avg:74.37ms +[2025-09-02 15:50:38] [Rank 0] step:6101/10000 train_time:453787ms step_avg:74.38ms +[2025-09-02 15:50:38] [Rank 0] step:6101/10000 train_time:453787ms step_avg:74.38ms +[2025-09-02 15:50:39] [Rank 0] step:6121/10000 train_time:455619ms step_avg:74.44ms +[2025-09-02 15:50:39] [Rank 0] step:6121/10000 train_time:455619ms step_avg:74.44ms +[2025-09-02 15:50:41] [Rank 0] step:6141/10000 train_time:457193ms step_avg:74.45ms +[2025-09-02 15:50:41] [Rank 0] step:6141/10000 train_time:457193ms step_avg:74.45ms +[2025-09-02 15:50:43] [Rank 0] step:6161/10000 train_time:458761ms step_avg:74.46ms +[2025-09-02 15:50:43] [Rank 0] step:6161/10000 train_time:458761ms step_avg:74.46ms +[2025-09-02 15:50:44] [Rank 0] step:6181/10000 train_time:460328ms step_avg:74.47ms +[2025-09-02 15:50:44] [Rank 0] step:6181/10000 train_time:460328ms step_avg:74.47ms +[2025-09-02 15:50:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:50:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:50:57] [Rank 0] PRINT: step:6200/10000 val_loss:3.9787 svd_entropy: attn_qk:H=0.7610,top10E=0.25,eRank=179.5,q75/q25=88.14 attn_vo:H=0.7935,top10E=0.14,eRank=269.9,q75/q25=inf mlp_w1:H=0.7623,top10E=0.30,eRank=187.1,q75/q25=13.08 mlp_w2:H=0.8493,top10E=0.14,eRank=291.1,q75/q25=18.82 vo_prod:H=0.6692,top10E=0.22,eRank=124.2,q75/q25=inf train_time:462051ms step_avg:74.52ms +[2025-09-02 15:50:57] [Rank 0] PRINT: step:6200/10000 val_loss:3.9787 svd_entropy: attn_qk:H=0.7610,top10E=0.25,eRank=179.5,q75/q25=88.14 attn_vo:H=0.7935,top10E=0.14,eRank=269.9,q75/q25=inf mlp_w1:H=0.7623,top10E=0.30,eRank=187.1,q75/q25=13.08 mlp_w2:H=0.8493,top10E=0.14,eRank=291.1,q75/q25=18.82 vo_prod:H=0.6692,top10E=0.22,eRank=124.2,q75/q25=inf train_time:462051ms step_avg:74.52ms +[2025-09-02 15:50:57] [Rank 0] step:6201/10000 train_time:462063ms step_avg:74.51ms +[2025-09-02 15:50:57] [Rank 0] step:6201/10000 train_time:462063ms step_avg:74.51ms +[2025-09-02 15:50:59] [Rank 0] step:6221/10000 train_time:463489ms step_avg:74.50ms +[2025-09-02 15:50:59] [Rank 0] step:6221/10000 train_time:463489ms step_avg:74.50ms +[2025-09-02 15:51:01] [Rank 0] step:6241/10000 train_time:465051ms step_avg:74.52ms +[2025-09-02 15:51:01] [Rank 0] step:6241/10000 train_time:465051ms step_avg:74.52ms +[2025-09-02 15:51:02] [Rank 0] step:6261/10000 train_time:466618ms step_avg:74.53ms +[2025-09-02 15:51:02] [Rank 0] step:6261/10000 train_time:466618ms step_avg:74.53ms +[2025-09-02 15:51:04] [Rank 0] step:6281/10000 train_time:468188ms step_avg:74.54ms +[2025-09-02 15:51:04] [Rank 0] step:6281/10000 train_time:468188ms step_avg:74.54ms +[2025-09-02 15:51:05] [Rank 0] step:6301/10000 train_time:469757ms step_avg:74.55ms +[2025-09-02 15:51:05] [Rank 0] step:6301/10000 train_time:469757ms step_avg:74.55ms +[2025-09-02 15:51:07] [Rank 0] step:6321/10000 train_time:471322ms step_avg:74.56ms +[2025-09-02 15:51:07] [Rank 0] step:6321/10000 train_time:471322ms step_avg:74.56ms +[2025-09-02 15:51:08] [Rank 0] step:6341/10000 train_time:472892ms step_avg:74.58ms +[2025-09-02 15:51:08] [Rank 0] step:6341/10000 train_time:472892ms step_avg:74.58ms +[2025-09-02 15:51:10] [Rank 0] step:6361/10000 train_time:474465ms step_avg:74.59ms +[2025-09-02 15:51:10] [Rank 0] step:6361/10000 train_time:474465ms step_avg:74.59ms +[2025-09-02 15:51:12] [Rank 0] step:6381/10000 train_time:476037ms step_avg:74.60ms +[2025-09-02 15:51:12] [Rank 0] step:6381/10000 train_time:476037ms step_avg:74.60ms +[2025-09-02 15:51:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:51:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:51:25] [Rank 0] PRINT: step:6400/10000 val_loss:3.9630 svd_entropy: attn_qk:H=0.7627,top10E=0.25,eRank=181.1,q75/q25=88.26 attn_vo:H=0.7949,top10E=0.14,eRank=271.8,q75/q25=inf mlp_w1:H=0.7652,top10E=0.29,eRank=190.3,q75/q25=13.32 mlp_w2:H=0.8502,top10E=0.14,eRank=293.0,q75/q25=19.14 vo_prod:H=0.6712,top10E=0.21,eRank=125.9,q75/q25=inf train_time:477761ms step_avg:74.65ms +[2025-09-02 15:51:25] [Rank 0] PRINT: step:6400/10000 val_loss:3.9630 svd_entropy: attn_qk:H=0.7627,top10E=0.25,eRank=181.1,q75/q25=88.26 attn_vo:H=0.7949,top10E=0.14,eRank=271.8,q75/q25=inf mlp_w1:H=0.7652,top10E=0.29,eRank=190.3,q75/q25=13.32 mlp_w2:H=0.8502,top10E=0.14,eRank=293.0,q75/q25=19.14 vo_prod:H=0.6712,top10E=0.21,eRank=125.9,q75/q25=inf train_time:477761ms step_avg:74.65ms +[2025-09-02 15:51:25] [Rank 0] step:6401/10000 train_time:477773ms step_avg:74.64ms +[2025-09-02 15:51:25] [Rank 0] step:6401/10000 train_time:477773ms step_avg:74.64ms +[2025-09-02 15:51:27] [Rank 0] step:6421/10000 train_time:479191ms step_avg:74.63ms +[2025-09-02 15:51:27] [Rank 0] step:6421/10000 train_time:479191ms step_avg:74.63ms +[2025-09-02 15:51:28] [Rank 0] step:6441/10000 train_time:480756ms step_avg:74.64ms +[2025-09-02 15:51:28] [Rank 0] step:6441/10000 train_time:480756ms step_avg:74.64ms +[2025-09-02 15:51:30] [Rank 0] step:6461/10000 train_time:482325ms step_avg:74.65ms +[2025-09-02 15:51:30] [Rank 0] step:6461/10000 train_time:482325ms step_avg:74.65ms +[2025-09-02 15:51:31] [Rank 0] step:6481/10000 train_time:483902ms step_avg:74.66ms +[2025-09-02 15:51:31] [Rank 0] step:6481/10000 train_time:483902ms step_avg:74.66ms +[2025-09-02 15:51:33] [Rank 0] step:6501/10000 train_time:485467ms step_avg:74.68ms +[2025-09-02 15:51:33] [Rank 0] step:6501/10000 train_time:485467ms step_avg:74.68ms +[2025-09-02 15:51:34] [Rank 0] step:6521/10000 train_time:487032ms step_avg:74.69ms +[2025-09-02 15:51:34] [Rank 0] step:6521/10000 train_time:487032ms step_avg:74.69ms +[2025-09-02 15:51:36] [Rank 0] step:6541/10000 train_time:488601ms step_avg:74.70ms +[2025-09-02 15:51:36] [Rank 0] step:6541/10000 train_time:488601ms step_avg:74.70ms +[2025-09-02 15:51:38] [Rank 0] step:6561/10000 train_time:490172ms step_avg:74.71ms +[2025-09-02 15:51:38] [Rank 0] step:6561/10000 train_time:490172ms step_avg:74.71ms +[2025-09-02 15:51:39] [Rank 0] step:6581/10000 train_time:491739ms step_avg:74.72ms +[2025-09-02 15:51:39] [Rank 0] step:6581/10000 train_time:491739ms step_avg:74.72ms +[2025-09-02 15:51:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:51:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:51:52] [Rank 0] PRINT: step:6600/10000 val_loss:3.9524 svd_entropy: attn_qk:H=0.7644,top10E=0.25,eRank=182.6,q75/q25=88.53 attn_vo:H=0.7962,top10E=0.14,eRank=273.5,q75/q25=inf mlp_w1:H=0.7680,top10E=0.29,eRank=193.4,q75/q25=13.60 mlp_w2:H=0.8510,top10E=0.14,eRank=294.6,q75/q25=19.42 vo_prod:H=0.6729,top10E=0.21,eRank=127.5,q75/q25=inf train_time:493467ms step_avg:74.77ms +[2025-09-02 15:51:52] [Rank 0] PRINT: step:6600/10000 val_loss:3.9524 svd_entropy: attn_qk:H=0.7644,top10E=0.25,eRank=182.6,q75/q25=88.53 attn_vo:H=0.7962,top10E=0.14,eRank=273.5,q75/q25=inf mlp_w1:H=0.7680,top10E=0.29,eRank=193.4,q75/q25=13.60 mlp_w2:H=0.8510,top10E=0.14,eRank=294.6,q75/q25=19.42 vo_prod:H=0.6729,top10E=0.21,eRank=127.5,q75/q25=inf train_time:493467ms step_avg:74.77ms +[2025-09-02 15:51:53] [Rank 0] step:6601/10000 train_time:493479ms step_avg:74.76ms +[2025-09-02 15:51:53] [Rank 0] step:6601/10000 train_time:493479ms step_avg:74.76ms +[2025-09-02 15:51:54] [Rank 0] step:6621/10000 train_time:494907ms step_avg:74.75ms +[2025-09-02 15:51:54] [Rank 0] step:6621/10000 train_time:494907ms step_avg:74.75ms +[2025-09-02 15:51:56] [Rank 0] step:6641/10000 train_time:496477ms step_avg:74.76ms +[2025-09-02 15:51:56] [Rank 0] step:6641/10000 train_time:496477ms step_avg:74.76ms +[2025-09-02 15:51:57] [Rank 0] step:6661/10000 train_time:498045ms step_avg:74.77ms +[2025-09-02 15:51:57] [Rank 0] step:6661/10000 train_time:498045ms step_avg:74.77ms +[2025-09-02 15:51:59] [Rank 0] step:6681/10000 train_time:499629ms step_avg:74.78ms +[2025-09-02 15:51:59] [Rank 0] step:6681/10000 train_time:499629ms step_avg:74.78ms +[2025-09-02 15:52:00] [Rank 0] step:6701/10000 train_time:501235ms step_avg:74.80ms +[2025-09-02 15:52:00] [Rank 0] step:6701/10000 train_time:501235ms step_avg:74.80ms +[2025-09-02 15:52:02] [Rank 0] step:6721/10000 train_time:502833ms step_avg:74.82ms +[2025-09-02 15:52:02] [Rank 0] step:6721/10000 train_time:502833ms step_avg:74.82ms +[2025-09-02 15:52:04] [Rank 0] step:6741/10000 train_time:504429ms step_avg:74.83ms +[2025-09-02 15:52:04] [Rank 0] step:6741/10000 train_time:504429ms step_avg:74.83ms +[2025-09-02 15:52:05] [Rank 0] step:6761/10000 train_time:506023ms step_avg:74.84ms +[2025-09-02 15:52:05] [Rank 0] step:6761/10000 train_time:506023ms step_avg:74.84ms +[2025-09-02 15:52:07] [Rank 0] step:6781/10000 train_time:507625ms step_avg:74.86ms +[2025-09-02 15:52:07] [Rank 0] step:6781/10000 train_time:507625ms step_avg:74.86ms +[2025-09-02 15:52:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:52:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:52:20] [Rank 0] PRINT: step:6800/10000 val_loss:3.9346 svd_entropy: attn_qk:H=0.7658,top10E=0.25,eRank=183.9,q75/q25=88.13 attn_vo:H=0.7973,top10E=0.14,eRank=275.2,q75/q25=inf mlp_w1:H=0.7704,top10E=0.28,eRank=196.1,q75/q25=13.77 mlp_w2:H=0.8518,top10E=0.13,eRank=296.2,q75/q25=19.54 vo_prod:H=0.6745,top10E=0.21,eRank=128.9,q75/q25=inf train_time:509387ms step_avg:74.91ms +[2025-09-02 15:52:20] [Rank 0] PRINT: step:6800/10000 val_loss:3.9346 svd_entropy: attn_qk:H=0.7658,top10E=0.25,eRank=183.9,q75/q25=88.13 attn_vo:H=0.7973,top10E=0.14,eRank=275.2,q75/q25=inf mlp_w1:H=0.7704,top10E=0.28,eRank=196.1,q75/q25=13.77 mlp_w2:H=0.8518,top10E=0.13,eRank=296.2,q75/q25=19.54 vo_prod:H=0.6745,top10E=0.21,eRank=128.9,q75/q25=inf train_time:509387ms step_avg:74.91ms +[2025-09-02 15:52:20] [Rank 0] step:6801/10000 train_time:509399ms step_avg:74.90ms +[2025-09-02 15:52:20] [Rank 0] step:6801/10000 train_time:509399ms step_avg:74.90ms +[2025-09-02 15:52:22] [Rank 0] step:6821/10000 train_time:510841ms step_avg:74.89ms +[2025-09-02 15:52:22] [Rank 0] step:6821/10000 train_time:510841ms step_avg:74.89ms +[2025-09-02 15:52:23] [Rank 0] step:6841/10000 train_time:512428ms step_avg:74.91ms +[2025-09-02 15:52:23] [Rank 0] step:6841/10000 train_time:512428ms step_avg:74.91ms +[2025-09-02 15:52:25] [Rank 0] step:6861/10000 train_time:514025ms step_avg:74.92ms +[2025-09-02 15:52:25] [Rank 0] step:6861/10000 train_time:514025ms step_avg:74.92ms +[2025-09-02 15:52:27] [Rank 0] step:6881/10000 train_time:515618ms step_avg:74.93ms +[2025-09-02 15:52:27] [Rank 0] step:6881/10000 train_time:515618ms step_avg:74.93ms +[2025-09-02 15:52:28] [Rank 0] step:6901/10000 train_time:517212ms step_avg:74.95ms +[2025-09-02 15:52:28] [Rank 0] step:6901/10000 train_time:517212ms step_avg:74.95ms +[2025-09-02 15:52:30] [Rank 0] step:6921/10000 train_time:518804ms step_avg:74.96ms +[2025-09-02 15:52:30] [Rank 0] step:6921/10000 train_time:518804ms step_avg:74.96ms +[2025-09-02 15:52:31] [Rank 0] step:6941/10000 train_time:520407ms step_avg:74.98ms +[2025-09-02 15:52:31] [Rank 0] step:6941/10000 train_time:520407ms step_avg:74.98ms +[2025-09-02 15:52:33] [Rank 0] step:6961/10000 train_time:522015ms step_avg:74.99ms +[2025-09-02 15:52:33] [Rank 0] step:6961/10000 train_time:522015ms step_avg:74.99ms +[2025-09-02 15:52:35] [Rank 0] step:6981/10000 train_time:523614ms step_avg:75.01ms +[2025-09-02 15:52:35] [Rank 0] step:6981/10000 train_time:523614ms step_avg:75.01ms +[2025-09-02 15:52:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:52:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:52:48] [Rank 0] PRINT: step:7000/10000 val_loss:3.9207 svd_entropy: attn_qk:H=0.7671,top10E=0.25,eRank=185.2,q75/q25=87.90 attn_vo:H=0.7984,top10E=0.13,eRank=276.7,q75/q25=inf mlp_w1:H=0.7726,top10E=0.28,eRank=198.6,q75/q25=13.99 mlp_w2:H=0.8525,top10E=0.13,eRank=297.8,q75/q25=19.61 vo_prod:H=0.6761,top10E=0.21,eRank=130.5,q75/q25=inf train_time:525372ms step_avg:75.05ms +[2025-09-02 15:52:48] [Rank 0] PRINT: step:7000/10000 val_loss:3.9207 svd_entropy: attn_qk:H=0.7671,top10E=0.25,eRank=185.2,q75/q25=87.90 attn_vo:H=0.7984,top10E=0.13,eRank=276.7,q75/q25=inf mlp_w1:H=0.7726,top10E=0.28,eRank=198.6,q75/q25=13.99 mlp_w2:H=0.8525,top10E=0.13,eRank=297.8,q75/q25=19.61 vo_prod:H=0.6761,top10E=0.21,eRank=130.5,q75/q25=inf train_time:525372ms step_avg:75.05ms +[2025-09-02 15:52:48] [Rank 0] step:7001/10000 train_time:525384ms step_avg:75.04ms +[2025-09-02 15:52:48] [Rank 0] step:7001/10000 train_time:525384ms step_avg:75.04ms +[2025-09-02 15:52:50] [Rank 0] step:7021/10000 train_time:526836ms step_avg:75.04ms +[2025-09-02 15:52:50] [Rank 0] step:7021/10000 train_time:526836ms step_avg:75.04ms +[2025-09-02 15:52:51] [Rank 0] step:7041/10000 train_time:528440ms step_avg:75.05ms +[2025-09-02 15:52:51] [Rank 0] step:7041/10000 train_time:528440ms step_avg:75.05ms +[2025-09-02 15:52:53] [Rank 0] step:7061/10000 train_time:530041ms step_avg:75.07ms +[2025-09-02 15:52:53] [Rank 0] step:7061/10000 train_time:530041ms step_avg:75.07ms +[2025-09-02 15:52:55] [Rank 0] step:7081/10000 train_time:531645ms step_avg:75.08ms +[2025-09-02 15:52:55] [Rank 0] step:7081/10000 train_time:531645ms step_avg:75.08ms +[2025-09-02 15:52:56] [Rank 0] step:7101/10000 train_time:533249ms step_avg:75.09ms +[2025-09-02 15:52:56] [Rank 0] step:7101/10000 train_time:533249ms step_avg:75.09ms +[2025-09-02 15:52:58] [Rank 0] step:7121/10000 train_time:534851ms step_avg:75.11ms +[2025-09-02 15:52:58] [Rank 0] step:7121/10000 train_time:534851ms step_avg:75.11ms +[2025-09-02 15:52:59] [Rank 0] step:7141/10000 train_time:536486ms step_avg:75.13ms +[2025-09-02 15:52:59] [Rank 0] step:7141/10000 train_time:536486ms step_avg:75.13ms +[2025-09-02 15:53:01] [Rank 0] step:7161/10000 train_time:538085ms step_avg:75.14ms +[2025-09-02 15:53:01] [Rank 0] step:7161/10000 train_time:538085ms step_avg:75.14ms +[2025-09-02 15:53:03] [Rank 0] step:7181/10000 train_time:539682ms step_avg:75.15ms +[2025-09-02 15:53:03] [Rank 0] step:7181/10000 train_time:539682ms step_avg:75.15ms +[2025-09-02 15:53:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:53:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:53:16] [Rank 0] PRINT: step:7200/10000 val_loss:3.9100 svd_entropy: attn_qk:H=0.7684,top10E=0.25,eRank=186.4,q75/q25=87.85 attn_vo:H=0.7993,top10E=0.13,eRank=278.0,q75/q25=inf mlp_w1:H=0.7744,top10E=0.28,eRank=200.9,q75/q25=14.12 mlp_w2:H=0.8533,top10E=0.13,eRank=299.3,q75/q25=19.75 vo_prod:H=0.6775,top10E=0.21,eRank=131.7,q75/q25=inf train_time:541443ms step_avg:75.20ms +[2025-09-02 15:53:16] [Rank 0] PRINT: step:7200/10000 val_loss:3.9100 svd_entropy: attn_qk:H=0.7684,top10E=0.25,eRank=186.4,q75/q25=87.85 attn_vo:H=0.7993,top10E=0.13,eRank=278.0,q75/q25=inf mlp_w1:H=0.7744,top10E=0.28,eRank=200.9,q75/q25=14.12 mlp_w2:H=0.8533,top10E=0.13,eRank=299.3,q75/q25=19.75 vo_prod:H=0.6775,top10E=0.21,eRank=131.7,q75/q25=inf train_time:541443ms step_avg:75.20ms +[2025-09-02 15:53:16] [Rank 0] step:7201/10000 train_time:541455ms step_avg:75.19ms +[2025-09-02 15:53:16] [Rank 0] step:7201/10000 train_time:541455ms step_avg:75.19ms +[2025-09-02 15:53:18] [Rank 0] step:7221/10000 train_time:542924ms step_avg:75.19ms +[2025-09-02 15:53:18] [Rank 0] step:7221/10000 train_time:542924ms step_avg:75.19ms +[2025-09-02 15:53:19] [Rank 0] step:7241/10000 train_time:544510ms step_avg:75.20ms +[2025-09-02 15:53:19] [Rank 0] step:7241/10000 train_time:544510ms step_avg:75.20ms +[2025-09-02 15:53:21] [Rank 0] step:7261/10000 train_time:546107ms step_avg:75.21ms +[2025-09-02 15:53:21] [Rank 0] step:7261/10000 train_time:546107ms step_avg:75.21ms +[2025-09-02 15:53:22] [Rank 0] step:7281/10000 train_time:547718ms step_avg:75.23ms +[2025-09-02 15:53:22] [Rank 0] step:7281/10000 train_time:547718ms step_avg:75.23ms +[2025-09-02 15:53:24] [Rank 0] step:7301/10000 train_time:549316ms step_avg:75.24ms +[2025-09-02 15:53:24] [Rank 0] step:7301/10000 train_time:549316ms step_avg:75.24ms +[2025-09-02 15:53:26] [Rank 0] step:7321/10000 train_time:550920ms step_avg:75.25ms +[2025-09-02 15:53:26] [Rank 0] step:7321/10000 train_time:550920ms step_avg:75.25ms +[2025-09-02 15:53:27] [Rank 0] step:7341/10000 train_time:552519ms step_avg:75.26ms +[2025-09-02 15:53:27] [Rank 0] step:7341/10000 train_time:552519ms step_avg:75.26ms +[2025-09-02 15:53:29] [Rank 0] step:7361/10000 train_time:554118ms step_avg:75.28ms +[2025-09-02 15:53:29] [Rank 0] step:7361/10000 train_time:554118ms step_avg:75.28ms +[2025-09-02 15:53:30] [Rank 0] step:7381/10000 train_time:555722ms step_avg:75.29ms +[2025-09-02 15:53:30] [Rank 0] step:7381/10000 train_time:555722ms step_avg:75.29ms +[2025-09-02 15:53:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:53:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:53:44] [Rank 0] PRINT: step:7400/10000 val_loss:3.8914 svd_entropy: attn_qk:H=0.7695,top10E=0.24,eRank=187.5,q75/q25=87.22 attn_vo:H=0.8002,top10E=0.13,eRank=279.2,q75/q25=inf mlp_w1:H=0.7762,top10E=0.28,eRank=202.9,q75/q25=14.28 mlp_w2:H=0.8539,top10E=0.13,eRank=300.6,q75/q25=19.87 vo_prod:H=0.6787,top10E=0.21,eRank=132.9,q75/q25=inf train_time:557464ms step_avg:75.33ms +[2025-09-02 15:53:44] [Rank 0] PRINT: step:7400/10000 val_loss:3.8914 svd_entropy: attn_qk:H=0.7695,top10E=0.24,eRank=187.5,q75/q25=87.22 attn_vo:H=0.8002,top10E=0.13,eRank=279.2,q75/q25=inf mlp_w1:H=0.7762,top10E=0.28,eRank=202.9,q75/q25=14.28 mlp_w2:H=0.8539,top10E=0.13,eRank=300.6,q75/q25=19.87 vo_prod:H=0.6787,top10E=0.21,eRank=132.9,q75/q25=inf train_time:557464ms step_avg:75.33ms +[2025-09-02 15:53:44] [Rank 0] step:7401/10000 train_time:557476ms step_avg:75.32ms +[2025-09-02 15:53:44] [Rank 0] step:7401/10000 train_time:557476ms step_avg:75.32ms +[2025-09-02 15:53:46] [Rank 0] step:7421/10000 train_time:558941ms step_avg:75.32ms +[2025-09-02 15:53:46] [Rank 0] step:7421/10000 train_time:558941ms step_avg:75.32ms +[2025-09-02 15:53:47] [Rank 0] step:7441/10000 train_time:560536ms step_avg:75.33ms +[2025-09-02 15:53:47] [Rank 0] step:7441/10000 train_time:560536ms step_avg:75.33ms +[2025-09-02 15:53:49] [Rank 0] step:7461/10000 train_time:562134ms step_avg:75.34ms +[2025-09-02 15:53:49] [Rank 0] step:7461/10000 train_time:562134ms step_avg:75.34ms +[2025-09-02 15:53:50] [Rank 0] step:7481/10000 train_time:563737ms step_avg:75.36ms +[2025-09-02 15:53:50] [Rank 0] step:7481/10000 train_time:563737ms step_avg:75.36ms +[2025-09-02 15:53:52] [Rank 0] step:7501/10000 train_time:565342ms step_avg:75.37ms +[2025-09-02 15:53:52] [Rank 0] step:7501/10000 train_time:565342ms step_avg:75.37ms +[2025-09-02 15:53:54] [Rank 0] step:7521/10000 train_time:566945ms step_avg:75.38ms +[2025-09-02 15:53:54] [Rank 0] step:7521/10000 train_time:566945ms step_avg:75.38ms +[2025-09-02 15:53:55] [Rank 0] step:7541/10000 train_time:568558ms step_avg:75.40ms +[2025-09-02 15:53:55] [Rank 0] step:7541/10000 train_time:568558ms step_avg:75.40ms +[2025-09-02 15:53:57] [Rank 0] step:7561/10000 train_time:570147ms step_avg:75.41ms +[2025-09-02 15:53:57] [Rank 0] step:7561/10000 train_time:570147ms step_avg:75.41ms +[2025-09-02 15:53:58] [Rank 0] step:7581/10000 train_time:571879ms step_avg:75.44ms +[2025-09-02 15:53:58] [Rank 0] step:7581/10000 train_time:571879ms step_avg:75.44ms +[2025-09-02 15:54:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:54:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:54:12] [Rank 0] PRINT: step:7600/10000 val_loss:3.8885 svd_entropy: attn_qk:H=0.7705,top10E=0.24,eRank=188.5,q75/q25=87.17 attn_vo:H=0.8010,top10E=0.13,eRank=280.3,q75/q25=inf mlp_w1:H=0.7777,top10E=0.27,eRank=204.7,q75/q25=14.46 mlp_w2:H=0.8545,top10E=0.13,eRank=301.9,q75/q25=19.95 vo_prod:H=0.6799,top10E=0.20,eRank=134.1,q75/q25=inf train_time:573534ms step_avg:75.46ms +[2025-09-02 15:54:12] [Rank 0] PRINT: step:7600/10000 val_loss:3.8885 svd_entropy: attn_qk:H=0.7705,top10E=0.24,eRank=188.5,q75/q25=87.17 attn_vo:H=0.8010,top10E=0.13,eRank=280.3,q75/q25=inf mlp_w1:H=0.7777,top10E=0.27,eRank=204.7,q75/q25=14.46 mlp_w2:H=0.8545,top10E=0.13,eRank=301.9,q75/q25=19.95 vo_prod:H=0.6799,top10E=0.20,eRank=134.1,q75/q25=inf train_time:573534ms step_avg:75.46ms +[2025-09-02 15:54:12] [Rank 0] step:7601/10000 train_time:573546ms step_avg:75.46ms +[2025-09-02 15:54:12] [Rank 0] step:7601/10000 train_time:573546ms step_avg:75.46ms +[2025-09-02 15:54:13] [Rank 0] step:7621/10000 train_time:574991ms step_avg:75.45ms +[2025-09-02 15:54:13] [Rank 0] step:7621/10000 train_time:574991ms step_avg:75.45ms +[2025-09-02 15:54:15] [Rank 0] step:7641/10000 train_time:576588ms step_avg:75.46ms +[2025-09-02 15:54:15] [Rank 0] step:7641/10000 train_time:576588ms step_avg:75.46ms +[2025-09-02 15:54:17] [Rank 0] step:7661/10000 train_time:578193ms step_avg:75.47ms +[2025-09-02 15:54:17] [Rank 0] step:7661/10000 train_time:578193ms step_avg:75.47ms +[2025-09-02 15:54:18] [Rank 0] step:7681/10000 train_time:579788ms step_avg:75.48ms +[2025-09-02 15:54:18] [Rank 0] step:7681/10000 train_time:579788ms step_avg:75.48ms +[2025-09-02 15:54:20] [Rank 0] step:7701/10000 train_time:581386ms step_avg:75.49ms +[2025-09-02 15:54:20] [Rank 0] step:7701/10000 train_time:581386ms step_avg:75.49ms +[2025-09-02 15:54:21] [Rank 0] step:7721/10000 train_time:582999ms step_avg:75.51ms +[2025-09-02 15:54:21] [Rank 0] step:7721/10000 train_time:582999ms step_avg:75.51ms +[2025-09-02 15:54:23] [Rank 0] step:7741/10000 train_time:584605ms step_avg:75.52ms +[2025-09-02 15:54:23] [Rank 0] step:7741/10000 train_time:584605ms step_avg:75.52ms +[2025-09-02 15:54:25] [Rank 0] step:7761/10000 train_time:586210ms step_avg:75.53ms +[2025-09-02 15:54:25] [Rank 0] step:7761/10000 train_time:586210ms step_avg:75.53ms +[2025-09-02 15:54:26] [Rank 0] step:7781/10000 train_time:587819ms step_avg:75.55ms +[2025-09-02 15:54:26] [Rank 0] step:7781/10000 train_time:587819ms step_avg:75.55ms +[2025-09-02 15:54:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:54:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:54:40] [Rank 0] PRINT: step:7800/10000 val_loss:3.8734 svd_entropy: attn_qk:H=0.7714,top10E=0.24,eRank=189.3,q75/q25=86.64 attn_vo:H=0.8017,top10E=0.13,eRank=281.4,q75/q25=inf mlp_w1:H=0.7792,top10E=0.27,eRank=206.4,q75/q25=14.50 mlp_w2:H=0.8551,top10E=0.13,eRank=303.1,q75/q25=20.02 vo_prod:H=0.6810,top10E=0.20,eRank=135.1,q75/q25=inf train_time:589593ms step_avg:75.59ms +[2025-09-02 15:54:40] [Rank 0] PRINT: step:7800/10000 val_loss:3.8734 svd_entropy: attn_qk:H=0.7714,top10E=0.24,eRank=189.3,q75/q25=86.64 attn_vo:H=0.8017,top10E=0.13,eRank=281.4,q75/q25=inf mlp_w1:H=0.7792,top10E=0.27,eRank=206.4,q75/q25=14.50 mlp_w2:H=0.8551,top10E=0.13,eRank=303.1,q75/q25=20.02 vo_prod:H=0.6810,top10E=0.20,eRank=135.1,q75/q25=inf train_time:589593ms step_avg:75.59ms +[2025-09-02 15:54:40] [Rank 0] step:7801/10000 train_time:589605ms step_avg:75.58ms +[2025-09-02 15:54:40] [Rank 0] step:7801/10000 train_time:589605ms step_avg:75.58ms +[2025-09-02 15:54:41] [Rank 0] step:7821/10000 train_time:591064ms step_avg:75.57ms +[2025-09-02 15:54:41] [Rank 0] step:7821/10000 train_time:591064ms step_avg:75.57ms +[2025-09-02 15:54:43] [Rank 0] step:7841/10000 train_time:592660ms step_avg:75.58ms +[2025-09-02 15:54:43] [Rank 0] step:7841/10000 train_time:592660ms step_avg:75.58ms +[2025-09-02 15:54:45] [Rank 0] step:7861/10000 train_time:594262ms step_avg:75.60ms +[2025-09-02 15:54:45] [Rank 0] step:7861/10000 train_time:594262ms step_avg:75.60ms +[2025-09-02 15:54:46] [Rank 0] step:7881/10000 train_time:595871ms step_avg:75.61ms +[2025-09-02 15:54:46] [Rank 0] step:7881/10000 train_time:595871ms step_avg:75.61ms +[2025-09-02 15:54:48] [Rank 0] step:7901/10000 train_time:597470ms step_avg:75.62ms +[2025-09-02 15:54:48] [Rank 0] step:7901/10000 train_time:597470ms step_avg:75.62ms +[2025-09-02 15:54:49] [Rank 0] step:7921/10000 train_time:599070ms step_avg:75.63ms +[2025-09-02 15:54:49] [Rank 0] step:7921/10000 train_time:599070ms step_avg:75.63ms +[2025-09-02 15:54:51] [Rank 0] step:7941/10000 train_time:600682ms step_avg:75.64ms +[2025-09-02 15:54:51] [Rank 0] step:7941/10000 train_time:600682ms step_avg:75.64ms +[2025-09-02 15:54:53] [Rank 0] step:7961/10000 train_time:602287ms step_avg:75.65ms +[2025-09-02 15:54:53] [Rank 0] step:7961/10000 train_time:602287ms step_avg:75.65ms +[2025-09-02 15:54:54] [Rank 0] step:7981/10000 train_time:603886ms step_avg:75.67ms +[2025-09-02 15:54:54] [Rank 0] step:7981/10000 train_time:603886ms step_avg:75.67ms +[2025-09-02 15:54:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:54:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:55:08] [Rank 0] PRINT: step:8000/10000 val_loss:3.8576 svd_entropy: attn_qk:H=0.7721,top10E=0.24,eRank=190.1,q75/q25=86.95 attn_vo:H=0.8024,top10E=0.13,eRank=282.3,q75/q25=inf mlp_w1:H=0.7804,top10E=0.27,eRank=207.9,q75/q25=14.64 mlp_w2:H=0.8556,top10E=0.13,eRank=304.2,q75/q25=20.06 vo_prod:H=0.6821,top10E=0.20,eRank=136.2,q75/q25=inf train_time:605648ms step_avg:75.71ms +[2025-09-02 15:55:08] [Rank 0] PRINT: step:8000/10000 val_loss:3.8576 svd_entropy: attn_qk:H=0.7721,top10E=0.24,eRank=190.1,q75/q25=86.95 attn_vo:H=0.8024,top10E=0.13,eRank=282.3,q75/q25=inf mlp_w1:H=0.7804,top10E=0.27,eRank=207.9,q75/q25=14.64 mlp_w2:H=0.8556,top10E=0.13,eRank=304.2,q75/q25=20.06 vo_prod:H=0.6821,top10E=0.20,eRank=136.2,q75/q25=inf train_time:605648ms step_avg:75.71ms +[2025-09-02 15:55:08] [Rank 0] step:8001/10000 train_time:605660ms step_avg:75.70ms +[2025-09-02 15:55:08] [Rank 0] step:8001/10000 train_time:605660ms step_avg:75.70ms +[2025-09-02 15:55:09] [Rank 0] step:8021/10000 train_time:607103ms step_avg:75.69ms +[2025-09-02 15:55:09] [Rank 0] step:8021/10000 train_time:607103ms step_avg:75.69ms +[2025-09-02 15:55:11] [Rank 0] step:8041/10000 train_time:608713ms step_avg:75.70ms +[2025-09-02 15:55:11] [Rank 0] step:8041/10000 train_time:608713ms step_avg:75.70ms +[2025-09-02 15:55:13] [Rank 0] step:8061/10000 train_time:610313ms step_avg:75.71ms +[2025-09-02 15:55:13] [Rank 0] step:8061/10000 train_time:610313ms step_avg:75.71ms +[2025-09-02 15:55:14] [Rank 0] step:8081/10000 train_time:611908ms step_avg:75.72ms +[2025-09-02 15:55:14] [Rank 0] step:8081/10000 train_time:611908ms step_avg:75.72ms +[2025-09-02 15:55:16] [Rank 0] step:8101/10000 train_time:613521ms step_avg:75.73ms +[2025-09-02 15:55:16] [Rank 0] step:8101/10000 train_time:613521ms step_avg:75.73ms +[2025-09-02 15:55:17] [Rank 0] step:8121/10000 train_time:615119ms step_avg:75.74ms +[2025-09-02 15:55:17] [Rank 0] step:8121/10000 train_time:615119ms step_avg:75.74ms +[2025-09-02 15:55:19] [Rank 0] step:8141/10000 train_time:616823ms step_avg:75.77ms +[2025-09-02 15:55:19] [Rank 0] step:8141/10000 train_time:616823ms step_avg:75.77ms +[2025-09-02 15:55:21] [Rank 0] step:8161/10000 train_time:618434ms step_avg:75.78ms +[2025-09-02 15:55:21] [Rank 0] step:8161/10000 train_time:618434ms step_avg:75.78ms +[2025-09-02 15:55:22] [Rank 0] step:8181/10000 train_time:620068ms step_avg:75.79ms +[2025-09-02 15:55:22] [Rank 0] step:8181/10000 train_time:620068ms step_avg:75.79ms +[2025-09-02 15:55:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:55:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:55:36] [Rank 0] PRINT: step:8200/10000 val_loss:3.8485 svd_entropy: attn_qk:H=0.7729,top10E=0.24,eRank=190.9,q75/q25=86.47 attn_vo:H=0.8029,top10E=0.13,eRank=283.2,q75/q25=inf mlp_w1:H=0.7814,top10E=0.27,eRank=209.2,q75/q25=14.67 mlp_w2:H=0.8561,top10E=0.13,eRank=305.2,q75/q25=20.16 vo_prod:H=0.6830,top10E=0.20,eRank=137.0,q75/q25=inf train_time:621885ms step_avg:75.84ms +[2025-09-02 15:55:36] [Rank 0] PRINT: step:8200/10000 val_loss:3.8485 svd_entropy: attn_qk:H=0.7729,top10E=0.24,eRank=190.9,q75/q25=86.47 attn_vo:H=0.8029,top10E=0.13,eRank=283.2,q75/q25=inf mlp_w1:H=0.7814,top10E=0.27,eRank=209.2,q75/q25=14.67 mlp_w2:H=0.8561,top10E=0.13,eRank=305.2,q75/q25=20.16 vo_prod:H=0.6830,top10E=0.20,eRank=137.0,q75/q25=inf train_time:621885ms step_avg:75.84ms +[2025-09-02 15:55:36] [Rank 0] step:8201/10000 train_time:621896ms step_avg:75.83ms +[2025-09-02 15:55:36] [Rank 0] step:8201/10000 train_time:621896ms step_avg:75.83ms +[2025-09-02 15:55:37] [Rank 0] step:8221/10000 train_time:623389ms step_avg:75.83ms +[2025-09-02 15:55:37] [Rank 0] step:8221/10000 train_time:623389ms step_avg:75.83ms +[2025-09-02 15:55:39] [Rank 0] step:8241/10000 train_time:625026ms step_avg:75.84ms +[2025-09-02 15:55:39] [Rank 0] step:8241/10000 train_time:625026ms step_avg:75.84ms +[2025-09-02 15:55:41] [Rank 0] step:8261/10000 train_time:626656ms step_avg:75.86ms +[2025-09-02 15:55:41] [Rank 0] step:8261/10000 train_time:626656ms step_avg:75.86ms +[2025-09-02 15:55:42] [Rank 0] step:8281/10000 train_time:628289ms step_avg:75.87ms +[2025-09-02 15:55:42] [Rank 0] step:8281/10000 train_time:628289ms step_avg:75.87ms +[2025-09-02 15:55:44] [Rank 0] step:8301/10000 train_time:629921ms step_avg:75.88ms +[2025-09-02 15:55:44] [Rank 0] step:8301/10000 train_time:629921ms step_avg:75.88ms +[2025-09-02 15:55:46] [Rank 0] step:8321/10000 train_time:631540ms step_avg:75.90ms +[2025-09-02 15:55:46] [Rank 0] step:8321/10000 train_time:631540ms step_avg:75.90ms +[2025-09-02 15:55:47] [Rank 0] step:8341/10000 train_time:633168ms step_avg:75.91ms +[2025-09-02 15:55:47] [Rank 0] step:8341/10000 train_time:633168ms step_avg:75.91ms +[2025-09-02 15:55:49] [Rank 0] step:8361/10000 train_time:634792ms step_avg:75.92ms +[2025-09-02 15:55:49] [Rank 0] step:8361/10000 train_time:634792ms step_avg:75.92ms +[2025-09-02 15:55:51] [Rank 0] step:8381/10000 train_time:636424ms step_avg:75.94ms +[2025-09-02 15:55:51] [Rank 0] step:8381/10000 train_time:636424ms step_avg:75.94ms +[2025-09-02 15:55:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:55:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:56:04] [Rank 0] PRINT: step:8400/10000 val_loss:3.8383 svd_entropy: attn_qk:H=0.7736,top10E=0.24,eRank=191.6,q75/q25=86.43 attn_vo:H=0.8035,top10E=0.13,eRank=284.0,q75/q25=inf mlp_w1:H=0.7825,top10E=0.27,eRank=210.4,q75/q25=14.73 mlp_w2:H=0.8566,top10E=0.13,eRank=306.3,q75/q25=20.19 vo_prod:H=0.6839,top10E=0.20,eRank=137.9,q75/q25=inf train_time:638210ms step_avg:75.98ms +[2025-09-02 15:56:04] [Rank 0] PRINT: step:8400/10000 val_loss:3.8383 svd_entropy: attn_qk:H=0.7736,top10E=0.24,eRank=191.6,q75/q25=86.43 attn_vo:H=0.8035,top10E=0.13,eRank=284.0,q75/q25=inf mlp_w1:H=0.7825,top10E=0.27,eRank=210.4,q75/q25=14.73 mlp_w2:H=0.8566,top10E=0.13,eRank=306.3,q75/q25=20.19 vo_prod:H=0.6839,top10E=0.20,eRank=137.9,q75/q25=inf train_time:638210ms step_avg:75.98ms +[2025-09-02 15:56:04] [Rank 0] step:8401/10000 train_time:638222ms step_avg:75.97ms +[2025-09-02 15:56:04] [Rank 0] step:8401/10000 train_time:638222ms step_avg:75.97ms +[2025-09-02 15:56:06] [Rank 0] step:8421/10000 train_time:639747ms step_avg:75.97ms +[2025-09-02 15:56:06] [Rank 0] step:8421/10000 train_time:639747ms step_avg:75.97ms +[2025-09-02 15:56:07] [Rank 0] step:8441/10000 train_time:641319ms step_avg:75.98ms +[2025-09-02 15:56:07] [Rank 0] step:8441/10000 train_time:641319ms step_avg:75.98ms +[2025-09-02 15:56:09] [Rank 0] step:8461/10000 train_time:642939ms step_avg:75.99ms +[2025-09-02 15:56:09] [Rank 0] step:8461/10000 train_time:642939ms step_avg:75.99ms +[2025-09-02 15:56:11] [Rank 0] step:8481/10000 train_time:644573ms step_avg:76.00ms +[2025-09-02 15:56:11] [Rank 0] step:8481/10000 train_time:644573ms step_avg:76.00ms +[2025-09-02 15:56:12] [Rank 0] step:8501/10000 train_time:646224ms step_avg:76.02ms +[2025-09-02 15:56:12] [Rank 0] step:8501/10000 train_time:646224ms step_avg:76.02ms +[2025-09-02 15:56:14] [Rank 0] step:8521/10000 train_time:647861ms step_avg:76.03ms +[2025-09-02 15:56:14] [Rank 0] step:8521/10000 train_time:647861ms step_avg:76.03ms +[2025-09-02 15:56:15] [Rank 0] step:8541/10000 train_time:649502ms step_avg:76.05ms +[2025-09-02 15:56:15] [Rank 0] step:8541/10000 train_time:649502ms step_avg:76.05ms +[2025-09-02 15:56:17] [Rank 0] step:8561/10000 train_time:651136ms step_avg:76.06ms +[2025-09-02 15:56:17] [Rank 0] step:8561/10000 train_time:651136ms step_avg:76.06ms +[2025-09-02 15:56:19] [Rank 0] step:8581/10000 train_time:652769ms step_avg:76.07ms +[2025-09-02 15:56:19] [Rank 0] step:8581/10000 train_time:652769ms step_avg:76.07ms +[2025-09-02 15:56:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:56:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:56:32] [Rank 0] PRINT: step:8600/10000 val_loss:3.8305 svd_entropy: attn_qk:H=0.7742,top10E=0.24,eRank=192.1,q75/q25=85.99 attn_vo:H=0.8040,top10E=0.13,eRank=284.7,q75/q25=inf mlp_w1:H=0.7833,top10E=0.27,eRank=211.5,q75/q25=14.82 mlp_w2:H=0.8570,top10E=0.13,eRank=307.1,q75/q25=20.25 vo_prod:H=0.6847,top10E=0.20,eRank=138.7,q75/q25=inf train_time:654555ms step_avg:76.11ms +[2025-09-02 15:56:32] [Rank 0] PRINT: step:8600/10000 val_loss:3.8305 svd_entropy: attn_qk:H=0.7742,top10E=0.24,eRank=192.1,q75/q25=85.99 attn_vo:H=0.8040,top10E=0.13,eRank=284.7,q75/q25=inf mlp_w1:H=0.7833,top10E=0.27,eRank=211.5,q75/q25=14.82 mlp_w2:H=0.8570,top10E=0.13,eRank=307.1,q75/q25=20.25 vo_prod:H=0.6847,top10E=0.20,eRank=138.7,q75/q25=inf train_time:654555ms step_avg:76.11ms +[2025-09-02 15:56:32] [Rank 0] step:8601/10000 train_time:654567ms step_avg:76.10ms +[2025-09-02 15:56:32] [Rank 0] step:8601/10000 train_time:654567ms step_avg:76.10ms +[2025-09-02 15:56:34] [Rank 0] step:8621/10000 train_time:656068ms step_avg:76.10ms +[2025-09-02 15:56:34] [Rank 0] step:8621/10000 train_time:656068ms step_avg:76.10ms +[2025-09-02 15:56:36] [Rank 0] step:8641/10000 train_time:657701ms step_avg:76.11ms +[2025-09-02 15:56:36] [Rank 0] step:8641/10000 train_time:657701ms step_avg:76.11ms +[2025-09-02 15:56:37] [Rank 0] step:8661/10000 train_time:659335ms step_avg:76.13ms +[2025-09-02 15:56:37] [Rank 0] step:8661/10000 train_time:659335ms step_avg:76.13ms +[2025-09-02 15:56:39] [Rank 0] step:8681/10000 train_time:660964ms step_avg:76.14ms +[2025-09-02 15:56:39] [Rank 0] step:8681/10000 train_time:660964ms step_avg:76.14ms +[2025-09-02 15:56:40] [Rank 0] step:8701/10000 train_time:662591ms step_avg:76.15ms +[2025-09-02 15:56:40] [Rank 0] step:8701/10000 train_time:662591ms step_avg:76.15ms +[2025-09-02 15:56:42] [Rank 0] step:8721/10000 train_time:664226ms step_avg:76.16ms +[2025-09-02 15:56:42] [Rank 0] step:8721/10000 train_time:664226ms step_avg:76.16ms +[2025-09-02 15:56:44] [Rank 0] step:8741/10000 train_time:665848ms step_avg:76.18ms +[2025-09-02 15:56:44] [Rank 0] step:8741/10000 train_time:665848ms step_avg:76.18ms +[2025-09-02 15:56:45] [Rank 0] step:8761/10000 train_time:667477ms step_avg:76.19ms +[2025-09-02 15:56:45] [Rank 0] step:8761/10000 train_time:667477ms step_avg:76.19ms +[2025-09-02 15:56:47] [Rank 0] step:8781/10000 train_time:669113ms step_avg:76.20ms +[2025-09-02 15:56:47] [Rank 0] step:8781/10000 train_time:669113ms step_avg:76.20ms +[2025-09-02 15:56:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:56:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:57:00] [Rank 0] PRINT: step:8800/10000 val_loss:3.8211 svd_entropy: attn_qk:H=0.7746,top10E=0.24,eRank=192.6,q75/q25=85.58 attn_vo:H=0.8044,top10E=0.13,eRank=285.3,q75/q25=inf mlp_w1:H=0.7841,top10E=0.27,eRank=212.4,q75/q25=14.85 mlp_w2:H=0.8574,top10E=0.13,eRank=308.0,q75/q25=20.21 vo_prod:H=0.6854,top10E=0.20,eRank=139.5,q75/q25=inf train_time:670910ms step_avg:76.24ms +[2025-09-02 15:57:00] [Rank 0] PRINT: step:8800/10000 val_loss:3.8211 svd_entropy: attn_qk:H=0.7746,top10E=0.24,eRank=192.6,q75/q25=85.58 attn_vo:H=0.8044,top10E=0.13,eRank=285.3,q75/q25=inf mlp_w1:H=0.7841,top10E=0.27,eRank=212.4,q75/q25=14.85 mlp_w2:H=0.8574,top10E=0.13,eRank=308.0,q75/q25=20.21 vo_prod:H=0.6854,top10E=0.20,eRank=139.5,q75/q25=inf train_time:670910ms step_avg:76.24ms +[2025-09-02 15:57:01] [Rank 0] step:8801/10000 train_time:670922ms step_avg:76.23ms +[2025-09-02 15:57:01] [Rank 0] step:8801/10000 train_time:670922ms step_avg:76.23ms +[2025-09-02 15:57:02] [Rank 0] step:8821/10000 train_time:672408ms step_avg:76.23ms +[2025-09-02 15:57:02] [Rank 0] step:8821/10000 train_time:672408ms step_avg:76.23ms +[2025-09-02 15:57:04] [Rank 0] step:8841/10000 train_time:674061ms step_avg:76.24ms +[2025-09-02 15:57:04] [Rank 0] step:8841/10000 train_time:674061ms step_avg:76.24ms +[2025-09-02 15:57:05] [Rank 0] step:8861/10000 train_time:675687ms step_avg:76.25ms +[2025-09-02 15:57:05] [Rank 0] step:8861/10000 train_time:675687ms step_avg:76.25ms +[2025-09-02 15:57:07] [Rank 0] step:8881/10000 train_time:677320ms step_avg:76.27ms +[2025-09-02 15:57:07] [Rank 0] step:8881/10000 train_time:677320ms step_avg:76.27ms +[2025-09-02 15:57:09] [Rank 0] step:8901/10000 train_time:679080ms step_avg:76.29ms +[2025-09-02 15:57:09] [Rank 0] step:8901/10000 train_time:679080ms step_avg:76.29ms +[2025-09-02 15:57:10] [Rank 0] step:8921/10000 train_time:680620ms step_avg:76.29ms +[2025-09-02 15:57:10] [Rank 0] step:8921/10000 train_time:680620ms step_avg:76.29ms +[2025-09-02 15:57:12] [Rank 0] step:8941/10000 train_time:682262ms step_avg:76.31ms +[2025-09-02 15:57:12] [Rank 0] step:8941/10000 train_time:682262ms step_avg:76.31ms +[2025-09-02 15:57:14] [Rank 0] step:8961/10000 train_time:683889ms step_avg:76.32ms +[2025-09-02 15:57:14] [Rank 0] step:8961/10000 train_time:683889ms step_avg:76.32ms +[2025-09-02 15:57:15] [Rank 0] step:8981/10000 train_time:685525ms step_avg:76.33ms +[2025-09-02 15:57:15] [Rank 0] step:8981/10000 train_time:685525ms step_avg:76.33ms +[2025-09-02 15:57:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:57:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:57:29] [Rank 0] PRINT: step:9000/10000 val_loss:3.8117 svd_entropy: attn_qk:H=0.7751,top10E=0.24,eRank=193.1,q75/q25=85.24 attn_vo:H=0.8048,top10E=0.13,eRank=285.8,q75/q25=inf mlp_w1:H=0.7847,top10E=0.26,eRank=213.2,q75/q25=14.87 mlp_w2:H=0.8578,top10E=0.13,eRank=308.7,q75/q25=20.27 vo_prod:H=0.6860,top10E=0.20,eRank=140.1,q75/q25=inf train_time:687316ms step_avg:76.37ms +[2025-09-02 15:57:29] [Rank 0] PRINT: step:9000/10000 val_loss:3.8117 svd_entropy: attn_qk:H=0.7751,top10E=0.24,eRank=193.1,q75/q25=85.24 attn_vo:H=0.8048,top10E=0.13,eRank=285.8,q75/q25=inf mlp_w1:H=0.7847,top10E=0.26,eRank=213.2,q75/q25=14.87 mlp_w2:H=0.8578,top10E=0.13,eRank=308.7,q75/q25=20.27 vo_prod:H=0.6860,top10E=0.20,eRank=140.1,q75/q25=inf train_time:687316ms step_avg:76.37ms +[2025-09-02 15:57:29] [Rank 0] step:9001/10000 train_time:687328ms step_avg:76.36ms +[2025-09-02 15:57:29] [Rank 0] step:9001/10000 train_time:687328ms step_avg:76.36ms +[2025-09-02 15:57:30] [Rank 0] step:9021/10000 train_time:688802ms step_avg:76.36ms +[2025-09-02 15:57:30] [Rank 0] step:9021/10000 train_time:688802ms step_avg:76.36ms +[2025-09-02 15:57:32] [Rank 0] step:9041/10000 train_time:690430ms step_avg:76.37ms +[2025-09-02 15:57:32] [Rank 0] step:9041/10000 train_time:690430ms step_avg:76.37ms +[2025-09-02 15:57:34] [Rank 0] step:9061/10000 train_time:692075ms step_avg:76.38ms +[2025-09-02 15:57:34] [Rank 0] step:9061/10000 train_time:692075ms step_avg:76.38ms +[2025-09-02 15:57:35] [Rank 0] step:9081/10000 train_time:693714ms step_avg:76.39ms +[2025-09-02 15:57:35] [Rank 0] step:9081/10000 train_time:693714ms step_avg:76.39ms +[2025-09-02 15:57:37] [Rank 0] step:9101/10000 train_time:695366ms step_avg:76.41ms +[2025-09-02 15:57:37] [Rank 0] step:9101/10000 train_time:695366ms step_avg:76.41ms +[2025-09-02 15:57:39] [Rank 0] step:9121/10000 train_time:696999ms step_avg:76.42ms +[2025-09-02 15:57:39] [Rank 0] step:9121/10000 train_time:696999ms step_avg:76.42ms +[2025-09-02 15:57:40] [Rank 0] step:9141/10000 train_time:698624ms step_avg:76.43ms +[2025-09-02 15:57:40] [Rank 0] step:9141/10000 train_time:698624ms step_avg:76.43ms +[2025-09-02 15:57:42] [Rank 0] step:9161/10000 train_time:700249ms step_avg:76.44ms +[2025-09-02 15:57:42] [Rank 0] step:9161/10000 train_time:700249ms step_avg:76.44ms +[2025-09-02 15:57:44] [Rank 0] step:9181/10000 train_time:701915ms step_avg:76.45ms +[2025-09-02 15:57:44] [Rank 0] step:9181/10000 train_time:701915ms step_avg:76.45ms +[2025-09-02 15:57:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:57:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:57:57] [Rank 0] PRINT: step:9200/10000 val_loss:3.8049 svd_entropy: attn_qk:H=0.7754,top10E=0.24,eRank=193.5,q75/q25=84.98 attn_vo:H=0.8051,top10E=0.13,eRank=286.3,q75/q25=inf mlp_w1:H=0.7853,top10E=0.26,eRank=213.9,q75/q25=14.90 mlp_w2:H=0.8581,top10E=0.13,eRank=309.4,q75/q25=20.20 vo_prod:H=0.6866,top10E=0.20,eRank=140.7,q75/q25=inf train_time:703709ms step_avg:76.49ms +[2025-09-02 15:57:57] [Rank 0] PRINT: step:9200/10000 val_loss:3.8049 svd_entropy: attn_qk:H=0.7754,top10E=0.24,eRank=193.5,q75/q25=84.98 attn_vo:H=0.8051,top10E=0.13,eRank=286.3,q75/q25=inf mlp_w1:H=0.7853,top10E=0.26,eRank=213.9,q75/q25=14.90 mlp_w2:H=0.8581,top10E=0.13,eRank=309.4,q75/q25=20.20 vo_prod:H=0.6866,top10E=0.20,eRank=140.7,q75/q25=inf train_time:703709ms step_avg:76.49ms +[2025-09-02 15:57:57] [Rank 0] step:9201/10000 train_time:703721ms step_avg:76.48ms +[2025-09-02 15:57:57] [Rank 0] step:9201/10000 train_time:703721ms step_avg:76.48ms +[2025-09-02 15:57:59] [Rank 0] step:9221/10000 train_time:705211ms step_avg:76.48ms +[2025-09-02 15:57:59] [Rank 0] step:9221/10000 train_time:705211ms step_avg:76.48ms +[2025-09-02 15:58:00] [Rank 0] step:9241/10000 train_time:706854ms step_avg:76.49ms +[2025-09-02 15:58:00] [Rank 0] step:9241/10000 train_time:706854ms step_avg:76.49ms +[2025-09-02 15:58:02] [Rank 0] step:9261/10000 train_time:708498ms step_avg:76.50ms +[2025-09-02 15:58:02] [Rank 0] step:9261/10000 train_time:708498ms step_avg:76.50ms +[2025-09-02 15:58:04] [Rank 0] step:9281/10000 train_time:710125ms step_avg:76.51ms +[2025-09-02 15:58:04] [Rank 0] step:9281/10000 train_time:710125ms step_avg:76.51ms +[2025-09-02 15:58:05] [Rank 0] step:9301/10000 train_time:711754ms step_avg:76.52ms +[2025-09-02 15:58:05] [Rank 0] step:9301/10000 train_time:711754ms step_avg:76.52ms +[2025-09-02 15:58:07] [Rank 0] step:9321/10000 train_time:713387ms step_avg:76.54ms +[2025-09-02 15:58:07] [Rank 0] step:9321/10000 train_time:713387ms step_avg:76.54ms +[2025-09-02 15:58:09] [Rank 0] step:9341/10000 train_time:715020ms step_avg:76.55ms +[2025-09-02 15:58:09] [Rank 0] step:9341/10000 train_time:715020ms step_avg:76.55ms +[2025-09-02 15:58:10] [Rank 0] step:9361/10000 train_time:716656ms step_avg:76.56ms +[2025-09-02 15:58:10] [Rank 0] step:9361/10000 train_time:716656ms step_avg:76.56ms +[2025-09-02 15:58:12] [Rank 0] step:9381/10000 train_time:718312ms step_avg:76.57ms +[2025-09-02 15:58:12] [Rank 0] step:9381/10000 train_time:718312ms step_avg:76.57ms +[2025-09-02 15:58:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:58:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:58:25] [Rank 0] PRINT: step:9400/10000 val_loss:3.7982 svd_entropy: attn_qk:H=0.7758,top10E=0.24,eRank=193.8,q75/q25=85.00 attn_vo:H=0.8054,top10E=0.13,eRank=286.7,q75/q25=inf mlp_w1:H=0.7858,top10E=0.26,eRank=214.6,q75/q25=14.89 mlp_w2:H=0.8584,top10E=0.13,eRank=310.0,q75/q25=20.23 vo_prod:H=0.6871,top10E=0.20,eRank=141.2,q75/q25=inf train_time:720133ms step_avg:76.61ms +[2025-09-02 15:58:25] [Rank 0] PRINT: step:9400/10000 val_loss:3.7982 svd_entropy: attn_qk:H=0.7758,top10E=0.24,eRank=193.8,q75/q25=85.00 attn_vo:H=0.8054,top10E=0.13,eRank=286.7,q75/q25=inf mlp_w1:H=0.7858,top10E=0.26,eRank=214.6,q75/q25=14.89 mlp_w2:H=0.8584,top10E=0.13,eRank=310.0,q75/q25=20.23 vo_prod:H=0.6871,top10E=0.20,eRank=141.2,q75/q25=inf train_time:720133ms step_avg:76.61ms +[2025-09-02 15:58:25] [Rank 0] step:9401/10000 train_time:720145ms step_avg:76.60ms +[2025-09-02 15:58:25] [Rank 0] step:9401/10000 train_time:720145ms step_avg:76.60ms +[2025-09-02 15:58:27] [Rank 0] step:9421/10000 train_time:721616ms step_avg:76.60ms +[2025-09-02 15:58:27] [Rank 0] step:9421/10000 train_time:721616ms step_avg:76.60ms +[2025-09-02 15:58:29] [Rank 0] step:9441/10000 train_time:723246ms step_avg:76.61ms +[2025-09-02 15:58:29] [Rank 0] step:9441/10000 train_time:723246ms step_avg:76.61ms +[2025-09-02 15:58:30] [Rank 0] step:9461/10000 train_time:724883ms step_avg:76.62ms +[2025-09-02 15:58:30] [Rank 0] step:9461/10000 train_time:724883ms step_avg:76.62ms +[2025-09-02 15:58:32] [Rank 0] step:9481/10000 train_time:726520ms step_avg:76.63ms +[2025-09-02 15:58:32] [Rank 0] step:9481/10000 train_time:726520ms step_avg:76.63ms +[2025-09-02 15:58:34] [Rank 0] step:9501/10000 train_time:728167ms step_avg:76.64ms +[2025-09-02 15:58:34] [Rank 0] step:9501/10000 train_time:728167ms step_avg:76.64ms +[2025-09-02 15:58:35] [Rank 0] step:9521/10000 train_time:729793ms step_avg:76.65ms +[2025-09-02 15:58:35] [Rank 0] step:9521/10000 train_time:729793ms step_avg:76.65ms +[2025-09-02 15:58:37] [Rank 0] step:9541/10000 train_time:731426ms step_avg:76.66ms +[2025-09-02 15:58:37] [Rank 0] step:9541/10000 train_time:731426ms step_avg:76.66ms +[2025-09-02 15:58:39] [Rank 0] step:9561/10000 train_time:733058ms step_avg:76.67ms +[2025-09-02 15:58:39] [Rank 0] step:9561/10000 train_time:733058ms step_avg:76.67ms +[2025-09-02 15:58:40] [Rank 0] step:9581/10000 train_time:734697ms step_avg:76.68ms +[2025-09-02 15:58:40] [Rank 0] step:9581/10000 train_time:734697ms step_avg:76.68ms +[2025-09-02 15:58:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:58:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:58:54] [Rank 0] PRINT: step:9600/10000 val_loss:3.7922 svd_entropy: attn_qk:H=0.7760,top10E=0.24,eRank=194.1,q75/q25=84.94 attn_vo:H=0.8056,top10E=0.13,eRank=287.1,q75/q25=inf mlp_w1:H=0.7862,top10E=0.26,eRank=215.1,q75/q25=14.91 mlp_w2:H=0.8586,top10E=0.13,eRank=310.5,q75/q25=20.19 vo_prod:H=0.6875,top10E=0.20,eRank=141.6,q75/q25=inf train_time:736509ms step_avg:76.72ms +[2025-09-02 15:58:54] [Rank 0] PRINT: step:9600/10000 val_loss:3.7922 svd_entropy: attn_qk:H=0.7760,top10E=0.24,eRank=194.1,q75/q25=84.94 attn_vo:H=0.8056,top10E=0.13,eRank=287.1,q75/q25=inf mlp_w1:H=0.7862,top10E=0.26,eRank=215.1,q75/q25=14.91 mlp_w2:H=0.8586,top10E=0.13,eRank=310.5,q75/q25=20.19 vo_prod:H=0.6875,top10E=0.20,eRank=141.6,q75/q25=inf train_time:736509ms step_avg:76.72ms +[2025-09-02 15:58:54] [Rank 0] step:9601/10000 train_time:736521ms step_avg:76.71ms +[2025-09-02 15:58:54] [Rank 0] step:9601/10000 train_time:736521ms step_avg:76.71ms +[2025-09-02 15:58:55] [Rank 0] step:9621/10000 train_time:738015ms step_avg:76.71ms +[2025-09-02 15:58:55] [Rank 0] step:9621/10000 train_time:738015ms step_avg:76.71ms +[2025-09-02 15:58:57] [Rank 0] step:9641/10000 train_time:739649ms step_avg:76.72ms +[2025-09-02 15:58:57] [Rank 0] step:9641/10000 train_time:739649ms step_avg:76.72ms +[2025-09-02 15:58:59] [Rank 0] step:9661/10000 train_time:741311ms step_avg:76.73ms +[2025-09-02 15:58:59] [Rank 0] step:9661/10000 train_time:741311ms step_avg:76.73ms +[2025-09-02 15:59:00] [Rank 0] step:9681/10000 train_time:742965ms step_avg:76.74ms +[2025-09-02 15:59:00] [Rank 0] step:9681/10000 train_time:742965ms step_avg:76.74ms +[2025-09-02 15:59:02] [Rank 0] step:9701/10000 train_time:744640ms step_avg:76.76ms +[2025-09-02 15:59:02] [Rank 0] step:9701/10000 train_time:744640ms step_avg:76.76ms +[2025-09-02 15:59:04] [Rank 0] step:9721/10000 train_time:746292ms step_avg:76.77ms +[2025-09-02 15:59:04] [Rank 0] step:9721/10000 train_time:746292ms step_avg:76.77ms +[2025-09-02 15:59:05] [Rank 0] step:9741/10000 train_time:747972ms step_avg:76.79ms +[2025-09-02 15:59:05] [Rank 0] step:9741/10000 train_time:747972ms step_avg:76.79ms +[2025-09-02 15:59:07] [Rank 0] step:9761/10000 train_time:749628ms step_avg:76.80ms +[2025-09-02 15:59:07] [Rank 0] step:9761/10000 train_time:749628ms step_avg:76.80ms +[2025-09-02 15:59:09] [Rank 0] step:9781/10000 train_time:751301ms step_avg:76.81ms +[2025-09-02 15:59:09] [Rank 0] step:9781/10000 train_time:751301ms step_avg:76.81ms +[2025-09-02 15:59:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:59:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:59:22] [Rank 0] PRINT: step:9800/10000 val_loss:3.7862 svd_entropy: attn_qk:H=0.7762,top10E=0.24,eRank=194.3,q75/q25=84.73 attn_vo:H=0.8058,top10E=0.13,eRank=287.4,q75/q25=inf mlp_w1:H=0.7865,top10E=0.26,eRank=215.4,q75/q25=14.88 mlp_w2:H=0.8588,top10E=0.13,eRank=310.9,q75/q25=20.17 vo_prod:H=0.6879,top10E=0.20,eRank=142.0,q75/q25=inf train_time:753149ms step_avg:76.85ms +[2025-09-02 15:59:22] [Rank 0] PRINT: step:9800/10000 val_loss:3.7862 svd_entropy: attn_qk:H=0.7762,top10E=0.24,eRank=194.3,q75/q25=84.73 attn_vo:H=0.8058,top10E=0.13,eRank=287.4,q75/q25=inf mlp_w1:H=0.7865,top10E=0.26,eRank=215.4,q75/q25=14.88 mlp_w2:H=0.8588,top10E=0.13,eRank=310.9,q75/q25=20.17 vo_prod:H=0.6879,top10E=0.20,eRank=142.0,q75/q25=inf train_time:753149ms step_avg:76.85ms +[2025-09-02 15:59:22] [Rank 0] step:9801/10000 train_time:753161ms step_avg:76.85ms +[2025-09-02 15:59:22] [Rank 0] step:9801/10000 train_time:753161ms step_avg:76.85ms +[2025-09-02 15:59:24] [Rank 0] step:9821/10000 train_time:754667ms step_avg:76.84ms +[2025-09-02 15:59:24] [Rank 0] step:9821/10000 train_time:754667ms step_avg:76.84ms +[2025-09-02 15:59:26] [Rank 0] step:9841/10000 train_time:756338ms step_avg:76.86ms +[2025-09-02 15:59:26] [Rank 0] step:9841/10000 train_time:756338ms step_avg:76.86ms +[2025-09-02 15:59:27] [Rank 0] step:9861/10000 train_time:757989ms step_avg:76.87ms +[2025-09-02 15:59:27] [Rank 0] step:9861/10000 train_time:757989ms step_avg:76.87ms +[2025-09-02 15:59:29] [Rank 0] step:9881/10000 train_time:759637ms step_avg:76.88ms +[2025-09-02 15:59:29] [Rank 0] step:9881/10000 train_time:759637ms step_avg:76.88ms +[2025-09-02 15:59:31] [Rank 0] step:9901/10000 train_time:761302ms step_avg:76.89ms +[2025-09-02 15:59:31] [Rank 0] step:9901/10000 train_time:761302ms step_avg:76.89ms +[2025-09-02 15:59:32] [Rank 0] step:9921/10000 train_time:762961ms step_avg:76.90ms +[2025-09-02 15:59:32] [Rank 0] step:9921/10000 train_time:762961ms step_avg:76.90ms +[2025-09-02 15:59:34] [Rank 0] step:9941/10000 train_time:764627ms step_avg:76.92ms +[2025-09-02 15:59:34] [Rank 0] step:9941/10000 train_time:764627ms step_avg:76.92ms +[2025-09-02 15:59:36] [Rank 0] step:9961/10000 train_time:766287ms step_avg:76.93ms +[2025-09-02 15:59:36] [Rank 0] step:9961/10000 train_time:766287ms step_avg:76.93ms +[2025-09-02 15:59:37] [Rank 0] step:9981/10000 train_time:767943ms step_avg:76.94ms +[2025-09-02 15:59:37] [Rank 0] step:9981/10000 train_time:767943ms step_avg:76.94ms +[2025-09-02 15:59:39] [Rank 0] step:10000/10000 train_time:769526ms step_avg:76.95ms +[2025-09-02 15:59:39] [Rank 0] step:10000/10000 train_time:769526ms step_avg:76.95ms +[2025-09-02 15:59:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:59:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:59:51] [Rank 0] PRINT: step:10000/10000 val_loss:3.7808 svd_entropy: attn_qk:H=0.7763,top10E=0.24,eRank=194.4,q75/q25=84.57 attn_vo:H=0.8059,top10E=0.13,eRank=287.5,q75/q25=inf mlp_w1:H=0.7868,top10E=0.26,eRank=215.7,q75/q25=14.89 mlp_w2:H=0.8590,top10E=0.13,eRank=311.2,q75/q25=20.14 vo_prod:H=0.6881,top10E=0.20,eRank=142.2,q75/q25=inf train_time:769782ms step_avg:76.98ms +[2025-09-02 15:59:51] [Rank 0] PRINT: step:10000/10000 val_loss:3.7808 svd_entropy: attn_qk:H=0.7763,top10E=0.24,eRank=194.4,q75/q25=84.57 attn_vo:H=0.8059,top10E=0.13,eRank=287.5,q75/q25=inf mlp_w1:H=0.7868,top10E=0.26,eRank=215.7,q75/q25=14.89 mlp_w2:H=0.8590,top10E=0.13,eRank=311.2,q75/q25=20.14 vo_prod:H=0.6881,top10E=0.20,eRank=142.2,q75/q25=inf train_time:769782ms step_avg:76.98ms +[2025-09-02 15:59:51] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 15:59:51 2025 --- +[2025-09-02 15:59:51] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 15:59:51 2025 --- +[2025-09-02 15:59:51] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14416 MiB +[2025-09-02 15:59:51] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14416 MiB diff --git a/logs_svd_qkvo/mode_14_param_qkvo_seed_48/config.json b/logs_svd_qkvo/mode_14_param_qkvo_seed_48/config.json new file mode 100644 index 0000000000000000000000000000000000000000..80e342c76a3eb5ec2473ab0bb8666ea7b4cec41a --- /dev/null +++ b/logs_svd_qkvo/mode_14_param_qkvo_seed_48/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 48, + "optimizer_mode": 14, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "27d7c0e1-0128-4d68-ba1e-0f445cc4e259", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_14_param_qkvo_seed_48/training_log_27d7c0e1-0128-4d68-ba1e-0f445cc4e259.txt b/logs_svd_qkvo/mode_14_param_qkvo_seed_48/training_log_27d7c0e1-0128-4d68-ba1e-0f445cc4e259.txt new file mode 100644 index 0000000000000000000000000000000000000000..29f08097efb5c4dd0404715c1d315150214382d8 --- /dev/null +++ b/logs_svd_qkvo/mode_14_param_qkvo_seed_48/training_log_27d7c0e1-0128-4d68-ba1e-0f445cc4e259.txt @@ -0,0 +1,2984 @@ +[2025-09-02 16:49:59] [Rank 0] PRINT: --- Script Start: Tue Sep 2 16:49:59 2025 --- +[2025-09-02 16:49:59] [Rank 0] PRINT: --- Script Start: Tue Sep 2 16:49:59 2025 --- +[2025-09-02 16:49:59] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=14, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 16:49:59] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=14, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 16:49:59] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 16:49:59] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 16:49:59] [Rank 0] PRINT: Using fixed seed: 48 +[2025-09-02 16:49:59] [Rank 0] PRINT: Using fixed seed: 48 +[2025-09-02 16:49:59] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_14_param_qkvo_seed_48 +[2025-09-02 16:49:59] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_14_param_qkvo_seed_48 +[2025-09-02 16:49:59] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 16:49:59] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 16:49:59] [Rank 0] PRINT: Constructing model... +[2025-09-02 16:49:59] [Rank 0] PRINT: Constructing model... +[2025-09-02 16:50:01] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 16:50:01] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 16:50:01] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 16:50:01] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 16:50:01] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 16:50:01] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 16:50:01] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 14 +[2025-09-02 16:50:01] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 14 +[2025-09-02 16:50:01] [Rank 0] PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 16:50:01] [Rank 0] PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 16:50:01] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 16:50:01] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 16:50:01] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 16:50:01] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 16:50:01] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 16:50:01] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 16:50:01] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 16:50:01] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 16:50:01] [Rank 0] PRINT: Starting warmup... +[2025-09-02 16:50:01] [Rank 0] PRINT: Starting warmup... +[2025-09-02 16:50:43] [Rank 0] PRINT: Warmup complete. +[2025-09-02 16:50:43] [Rank 0] PRINT: Warmup complete. +[2025-09-02 16:50:43] [Rank 0] PRINT: Starting training... +[2025-09-02 16:50:43] [Rank 0] PRINT: Starting training... +[2025-09-02 16:50:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:50:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:50:59] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.25 attn_vo:H=0.4624,top10E=0.02,eRank=233.0,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 16:50:59] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.25 attn_vo:H=0.4624,top10E=0.02,eRank=233.0,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 16:51:00] [Rank 0] step:21/10000 train_time:1303ms step_avg:62.07ms +[2025-09-02 16:51:00] [Rank 0] step:21/10000 train_time:1303ms step_avg:62.07ms +[2025-09-02 16:51:02] [Rank 0] step:41/10000 train_time:2697ms step_avg:65.79ms +[2025-09-02 16:51:02] [Rank 0] step:41/10000 train_time:2697ms step_avg:65.79ms +[2025-09-02 16:51:03] [Rank 0] step:61/10000 train_time:4097ms step_avg:67.16ms +[2025-09-02 16:51:03] [Rank 0] step:61/10000 train_time:4097ms step_avg:67.16ms +[2025-09-02 16:51:04] [Rank 0] step:81/10000 train_time:5498ms step_avg:67.88ms +[2025-09-02 16:51:04] [Rank 0] step:81/10000 train_time:5498ms step_avg:67.88ms +[2025-09-02 16:51:06] [Rank 0] step:101/10000 train_time:6900ms step_avg:68.32ms +[2025-09-02 16:51:06] [Rank 0] step:101/10000 train_time:6900ms step_avg:68.32ms +[2025-09-02 16:51:07] [Rank 0] step:121/10000 train_time:8303ms step_avg:68.62ms +[2025-09-02 16:51:07] [Rank 0] step:121/10000 train_time:8303ms step_avg:68.62ms +[2025-09-02 16:51:09] [Rank 0] step:141/10000 train_time:9706ms step_avg:68.84ms +[2025-09-02 16:51:09] [Rank 0] step:141/10000 train_time:9706ms step_avg:68.84ms +[2025-09-02 16:51:10] [Rank 0] step:161/10000 train_time:11109ms step_avg:69.00ms +[2025-09-02 16:51:10] [Rank 0] step:161/10000 train_time:11109ms step_avg:69.00ms +[2025-09-02 16:51:11] [Rank 0] step:181/10000 train_time:12513ms step_avg:69.13ms +[2025-09-02 16:51:11] [Rank 0] step:181/10000 train_time:12513ms step_avg:69.13ms +[2025-09-02 16:51:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:51:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:51:24] [Rank 0] PRINT: step:200/10000 val_loss:6.5046 svd_entropy: attn_qk:H=0.4991,top10E=0.73,eRank=74.6,q75/q25=12.02 attn_vo:H=0.4651,top10E=0.65,eRank=65.0,q75/q25=inf mlp_w1:H=0.4534,top10E=0.71,eRank=30.5,q75/q25=2.67 mlp_w2:H=0.1545,top10E=0.96,eRank=3.8,q75/q25=218.09 vo_prod:H=0.2403,top10E=0.86,eRank=8.8,q75/q25=inf train_time:14058ms step_avg:70.29ms +[2025-09-02 16:51:24] [Rank 0] PRINT: step:200/10000 val_loss:6.5046 svd_entropy: attn_qk:H=0.4991,top10E=0.73,eRank=74.6,q75/q25=12.02 attn_vo:H=0.4651,top10E=0.65,eRank=65.0,q75/q25=inf mlp_w1:H=0.4534,top10E=0.71,eRank=30.5,q75/q25=2.67 mlp_w2:H=0.1545,top10E=0.96,eRank=3.8,q75/q25=218.09 vo_prod:H=0.2403,top10E=0.86,eRank=8.8,q75/q25=inf train_time:14058ms step_avg:70.29ms +[2025-09-02 16:51:25] [Rank 0] step:201/10000 train_time:14071ms step_avg:70.01ms +[2025-09-02 16:51:25] [Rank 0] step:201/10000 train_time:14071ms step_avg:70.01ms +[2025-09-02 16:51:26] [Rank 0] step:221/10000 train_time:15352ms step_avg:69.47ms +[2025-09-02 16:51:26] [Rank 0] step:221/10000 train_time:15352ms step_avg:69.47ms +[2025-09-02 16:51:27] [Rank 0] step:241/10000 train_time:16754ms step_avg:69.52ms +[2025-09-02 16:51:27] [Rank 0] step:241/10000 train_time:16754ms step_avg:69.52ms +[2025-09-02 16:51:29] [Rank 0] step:261/10000 train_time:18156ms step_avg:69.56ms +[2025-09-02 16:51:29] [Rank 0] step:261/10000 train_time:18156ms step_avg:69.56ms +[2025-09-02 16:51:30] [Rank 0] step:281/10000 train_time:19558ms step_avg:69.60ms +[2025-09-02 16:51:30] [Rank 0] step:281/10000 train_time:19558ms step_avg:69.60ms +[2025-09-02 16:51:32] [Rank 0] step:301/10000 train_time:20963ms step_avg:69.64ms +[2025-09-02 16:51:32] [Rank 0] step:301/10000 train_time:20963ms step_avg:69.64ms +[2025-09-02 16:51:33] [Rank 0] step:321/10000 train_time:22368ms step_avg:69.68ms +[2025-09-02 16:51:33] [Rank 0] step:321/10000 train_time:22368ms step_avg:69.68ms +[2025-09-02 16:51:34] [Rank 0] step:341/10000 train_time:23774ms step_avg:69.72ms +[2025-09-02 16:51:34] [Rank 0] step:341/10000 train_time:23774ms step_avg:69.72ms +[2025-09-02 16:51:36] [Rank 0] step:361/10000 train_time:25179ms step_avg:69.75ms +[2025-09-02 16:51:36] [Rank 0] step:361/10000 train_time:25179ms step_avg:69.75ms +[2025-09-02 16:51:37] [Rank 0] step:381/10000 train_time:26584ms step_avg:69.77ms +[2025-09-02 16:51:37] [Rank 0] step:381/10000 train_time:26584ms step_avg:69.77ms +[2025-09-02 16:51:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:51:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:51:50] [Rank 0] PRINT: step:400/10000 val_loss:5.9784 svd_entropy: attn_qk:H=0.5438,top10E=0.63,eRank=82.3,q75/q25=13.16 attn_vo:H=0.5401,top10E=0.51,eRank=84.2,q75/q25=inf mlp_w1:H=0.4554,top10E=0.68,eRank=38.9,q75/q25=3.14 mlp_w2:H=0.5365,top10E=0.62,eRank=36.6,q75/q25=11.96 vo_prod:H=0.3630,top10E=0.76,eRank=17.0,q75/q25=inf train_time:28131ms step_avg:70.33ms +[2025-09-02 16:51:50] [Rank 0] PRINT: step:400/10000 val_loss:5.9784 svd_entropy: attn_qk:H=0.5438,top10E=0.63,eRank=82.3,q75/q25=13.16 attn_vo:H=0.5401,top10E=0.51,eRank=84.2,q75/q25=inf mlp_w1:H=0.4554,top10E=0.68,eRank=38.9,q75/q25=3.14 mlp_w2:H=0.5365,top10E=0.62,eRank=36.6,q75/q25=11.96 vo_prod:H=0.3630,top10E=0.76,eRank=17.0,q75/q25=inf train_time:28131ms step_avg:70.33ms +[2025-09-02 16:51:50] [Rank 0] step:401/10000 train_time:28143ms step_avg:70.18ms +[2025-09-02 16:51:50] [Rank 0] step:401/10000 train_time:28143ms step_avg:70.18ms +[2025-09-02 16:51:52] [Rank 0] step:421/10000 train_time:29415ms step_avg:69.87ms +[2025-09-02 16:51:52] [Rank 0] step:421/10000 train_time:29415ms step_avg:69.87ms +[2025-09-02 16:51:53] [Rank 0] step:441/10000 train_time:30819ms step_avg:69.88ms +[2025-09-02 16:51:53] [Rank 0] step:441/10000 train_time:30819ms step_avg:69.88ms +[2025-09-02 16:51:55] [Rank 0] step:461/10000 train_time:32223ms step_avg:69.90ms +[2025-09-02 16:51:55] [Rank 0] step:461/10000 train_time:32223ms step_avg:69.90ms +[2025-09-02 16:51:56] [Rank 0] step:481/10000 train_time:33626ms step_avg:69.91ms +[2025-09-02 16:51:56] [Rank 0] step:481/10000 train_time:33626ms step_avg:69.91ms +[2025-09-02 16:51:57] [Rank 0] step:501/10000 train_time:35061ms step_avg:69.98ms +[2025-09-02 16:51:57] [Rank 0] step:501/10000 train_time:35061ms step_avg:69.98ms +[2025-09-02 16:51:59] [Rank 0] step:521/10000 train_time:36467ms step_avg:69.99ms +[2025-09-02 16:51:59] [Rank 0] step:521/10000 train_time:36467ms step_avg:69.99ms +[2025-09-02 16:52:00] [Rank 0] step:541/10000 train_time:37876ms step_avg:70.01ms +[2025-09-02 16:52:00] [Rank 0] step:541/10000 train_time:37876ms step_avg:70.01ms +[2025-09-02 16:52:02] [Rank 0] step:561/10000 train_time:39281ms step_avg:70.02ms +[2025-09-02 16:52:02] [Rank 0] step:561/10000 train_time:39281ms step_avg:70.02ms +[2025-09-02 16:52:03] [Rank 0] step:581/10000 train_time:40686ms step_avg:70.03ms +[2025-09-02 16:52:03] [Rank 0] step:581/10000 train_time:40686ms step_avg:70.03ms +[2025-09-02 16:52:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:52:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:52:16] [Rank 0] PRINT: step:600/10000 val_loss:5.6842 svd_entropy: attn_qk:H=0.5749,top10E=0.57,eRank=89.1,q75/q25=14.57 attn_vo:H=0.5819,top10E=0.44,eRank=99.9,q75/q25=inf mlp_w1:H=0.4899,top10E=0.64,eRank=47.6,q75/q25=3.50 mlp_w2:H=0.6312,top10E=0.47,eRank=67.3,q75/q25=8.55 vo_prod:H=0.4235,top10E=0.65,eRank=24.1,q75/q25=inf train_time:42233ms step_avg:70.39ms +[2025-09-02 16:52:16] [Rank 0] PRINT: step:600/10000 val_loss:5.6842 svd_entropy: attn_qk:H=0.5749,top10E=0.57,eRank=89.1,q75/q25=14.57 attn_vo:H=0.5819,top10E=0.44,eRank=99.9,q75/q25=inf mlp_w1:H=0.4899,top10E=0.64,eRank=47.6,q75/q25=3.50 mlp_w2:H=0.6312,top10E=0.47,eRank=67.3,q75/q25=8.55 vo_prod:H=0.4235,top10E=0.65,eRank=24.1,q75/q25=inf train_time:42233ms step_avg:70.39ms +[2025-09-02 16:52:16] [Rank 0] step:601/10000 train_time:42245ms step_avg:70.29ms +[2025-09-02 16:52:16] [Rank 0] step:601/10000 train_time:42245ms step_avg:70.29ms +[2025-09-02 16:52:17] [Rank 0] step:621/10000 train_time:43509ms step_avg:70.06ms +[2025-09-02 16:52:17] [Rank 0] step:621/10000 train_time:43509ms step_avg:70.06ms +[2025-09-02 16:52:19] [Rank 0] step:641/10000 train_time:44912ms step_avg:70.07ms +[2025-09-02 16:52:19] [Rank 0] step:641/10000 train_time:44912ms step_avg:70.07ms +[2025-09-02 16:52:20] [Rank 0] step:661/10000 train_time:46316ms step_avg:70.07ms +[2025-09-02 16:52:20] [Rank 0] step:661/10000 train_time:46316ms step_avg:70.07ms +[2025-09-02 16:52:22] [Rank 0] step:681/10000 train_time:47720ms step_avg:70.07ms +[2025-09-02 16:52:22] [Rank 0] step:681/10000 train_time:47720ms step_avg:70.07ms +[2025-09-02 16:52:23] [Rank 0] step:701/10000 train_time:49125ms step_avg:70.08ms +[2025-09-02 16:52:23] [Rank 0] step:701/10000 train_time:49125ms step_avg:70.08ms +[2025-09-02 16:52:25] [Rank 0] step:721/10000 train_time:50530ms step_avg:70.08ms +[2025-09-02 16:52:25] [Rank 0] step:721/10000 train_time:50530ms step_avg:70.08ms +[2025-09-02 16:52:26] [Rank 0] step:741/10000 train_time:51936ms step_avg:70.09ms +[2025-09-02 16:52:26] [Rank 0] step:741/10000 train_time:51936ms step_avg:70.09ms +[2025-09-02 16:52:27] [Rank 0] step:761/10000 train_time:53353ms step_avg:70.11ms +[2025-09-02 16:52:27] [Rank 0] step:761/10000 train_time:53353ms step_avg:70.11ms +[2025-09-02 16:52:29] [Rank 0] step:781/10000 train_time:54772ms step_avg:70.13ms +[2025-09-02 16:52:29] [Rank 0] step:781/10000 train_time:54772ms step_avg:70.13ms +[2025-09-02 16:52:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:52:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:52:42] [Rank 0] PRINT: step:800/10000 val_loss:5.4563 svd_entropy: attn_qk:H=0.5991,top10E=0.52,eRank=94.9,q75/q25=16.33 attn_vo:H=0.6117,top10E=0.40,eRank=114.0,q75/q25=inf mlp_w1:H=0.5239,top10E=0.60,eRank=55.3,q75/q25=3.82 mlp_w2:H=0.6872,top10E=0.38,eRank=96.9,q75/q25=7.79 vo_prod:H=0.4611,top10E=0.58,eRank=30.3,q75/q25=inf train_time:56333ms step_avg:70.42ms +[2025-09-02 16:52:42] [Rank 0] PRINT: step:800/10000 val_loss:5.4563 svd_entropy: attn_qk:H=0.5991,top10E=0.52,eRank=94.9,q75/q25=16.33 attn_vo:H=0.6117,top10E=0.40,eRank=114.0,q75/q25=inf mlp_w1:H=0.5239,top10E=0.60,eRank=55.3,q75/q25=3.82 mlp_w2:H=0.6872,top10E=0.38,eRank=96.9,q75/q25=7.79 vo_prod:H=0.4611,top10E=0.58,eRank=30.3,q75/q25=inf train_time:56333ms step_avg:70.42ms +[2025-09-02 16:52:42] [Rank 0] step:801/10000 train_time:56345ms step_avg:70.34ms +[2025-09-02 16:52:42] [Rank 0] step:801/10000 train_time:56345ms step_avg:70.34ms +[2025-09-02 16:52:43] [Rank 0] step:821/10000 train_time:57638ms step_avg:70.21ms +[2025-09-02 16:52:43] [Rank 0] step:821/10000 train_time:57638ms step_avg:70.21ms +[2025-09-02 16:52:45] [Rank 0] step:841/10000 train_time:59055ms step_avg:70.22ms +[2025-09-02 16:52:45] [Rank 0] step:841/10000 train_time:59055ms step_avg:70.22ms +[2025-09-02 16:52:46] [Rank 0] step:861/10000 train_time:60472ms step_avg:70.23ms +[2025-09-02 16:52:46] [Rank 0] step:861/10000 train_time:60472ms step_avg:70.23ms +[2025-09-02 16:52:48] [Rank 0] step:881/10000 train_time:61890ms step_avg:70.25ms +[2025-09-02 16:52:48] [Rank 0] step:881/10000 train_time:61890ms step_avg:70.25ms +[2025-09-02 16:52:49] [Rank 0] step:901/10000 train_time:63309ms step_avg:70.27ms +[2025-09-02 16:52:49] [Rank 0] step:901/10000 train_time:63309ms step_avg:70.27ms +[2025-09-02 16:52:50] [Rank 0] step:921/10000 train_time:64728ms step_avg:70.28ms +[2025-09-02 16:52:50] [Rank 0] step:921/10000 train_time:64728ms step_avg:70.28ms +[2025-09-02 16:52:52] [Rank 0] step:941/10000 train_time:66156ms step_avg:70.30ms +[2025-09-02 16:52:52] [Rank 0] step:941/10000 train_time:66156ms step_avg:70.30ms +[2025-09-02 16:52:53] [Rank 0] step:961/10000 train_time:67575ms step_avg:70.32ms +[2025-09-02 16:52:53] [Rank 0] step:961/10000 train_time:67575ms step_avg:70.32ms +[2025-09-02 16:52:55] [Rank 0] step:981/10000 train_time:68995ms step_avg:70.33ms +[2025-09-02 16:52:55] [Rank 0] step:981/10000 train_time:68995ms step_avg:70.33ms +[2025-09-02 16:52:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:52:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:53:08] [Rank 0] PRINT: step:1000/10000 val_loss:5.2819 svd_entropy: attn_qk:H=0.6184,top10E=0.48,eRank=100.4,q75/q25=18.58 attn_vo:H=0.6357,top10E=0.36,eRank=128.5,q75/q25=inf mlp_w1:H=0.5486,top10E=0.57,eRank=61.4,q75/q25=4.11 mlp_w2:H=0.7214,top10E=0.32,eRank=121.5,q75/q25=7.89 vo_prod:H=0.4874,top10E=0.53,eRank=36.0,q75/q25=inf train_time:70556ms step_avg:70.56ms +[2025-09-02 16:53:08] [Rank 0] PRINT: step:1000/10000 val_loss:5.2819 svd_entropy: attn_qk:H=0.6184,top10E=0.48,eRank=100.4,q75/q25=18.58 attn_vo:H=0.6357,top10E=0.36,eRank=128.5,q75/q25=inf mlp_w1:H=0.5486,top10E=0.57,eRank=61.4,q75/q25=4.11 mlp_w2:H=0.7214,top10E=0.32,eRank=121.5,q75/q25=7.89 vo_prod:H=0.4874,top10E=0.53,eRank=36.0,q75/q25=inf train_time:70556ms step_avg:70.56ms +[2025-09-02 16:53:08] [Rank 0] step:1001/10000 train_time:70569ms step_avg:70.50ms +[2025-09-02 16:53:08] [Rank 0] step:1001/10000 train_time:70569ms step_avg:70.50ms +[2025-09-02 16:53:09] [Rank 0] step:1021/10000 train_time:71860ms step_avg:70.38ms +[2025-09-02 16:53:09] [Rank 0] step:1021/10000 train_time:71860ms step_avg:70.38ms +[2025-09-02 16:53:11] [Rank 0] step:1041/10000 train_time:73277ms step_avg:70.39ms +[2025-09-02 16:53:11] [Rank 0] step:1041/10000 train_time:73277ms step_avg:70.39ms +[2025-09-02 16:53:12] [Rank 0] step:1061/10000 train_time:74700ms step_avg:70.41ms +[2025-09-02 16:53:12] [Rank 0] step:1061/10000 train_time:74700ms step_avg:70.41ms +[2025-09-02 16:53:14] [Rank 0] step:1081/10000 train_time:76119ms step_avg:70.42ms +[2025-09-02 16:53:14] [Rank 0] step:1081/10000 train_time:76119ms step_avg:70.42ms +[2025-09-02 16:53:15] [Rank 0] step:1101/10000 train_time:77536ms step_avg:70.42ms +[2025-09-02 16:53:15] [Rank 0] step:1101/10000 train_time:77536ms step_avg:70.42ms +[2025-09-02 16:53:16] [Rank 0] step:1121/10000 train_time:78956ms step_avg:70.43ms +[2025-09-02 16:53:16] [Rank 0] step:1121/10000 train_time:78956ms step_avg:70.43ms +[2025-09-02 16:53:18] [Rank 0] step:1141/10000 train_time:80375ms step_avg:70.44ms +[2025-09-02 16:53:18] [Rank 0] step:1141/10000 train_time:80375ms step_avg:70.44ms +[2025-09-02 16:53:19] [Rank 0] step:1161/10000 train_time:81795ms step_avg:70.45ms +[2025-09-02 16:53:19] [Rank 0] step:1161/10000 train_time:81795ms step_avg:70.45ms +[2025-09-02 16:53:21] [Rank 0] step:1181/10000 train_time:83216ms step_avg:70.46ms +[2025-09-02 16:53:21] [Rank 0] step:1181/10000 train_time:83216ms step_avg:70.46ms +[2025-09-02 16:53:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:53:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:53:34] [Rank 0] PRINT: step:1200/10000 val_loss:5.1096 svd_entropy: attn_qk:H=0.6336,top10E=0.45,eRank=105.6,q75/q25=21.61 attn_vo:H=0.6561,top10E=0.33,eRank=142.5,q75/q25=inf mlp_w1:H=0.5675,top10E=0.54,eRank=66.7,q75/q25=4.44 mlp_w2:H=0.7404,top10E=0.29,eRank=137.9,q75/q25=8.77 vo_prod:H=0.5079,top10E=0.49,eRank=41.0,q75/q25=inf train_time:84778ms step_avg:70.65ms +[2025-09-02 16:53:34] [Rank 0] PRINT: step:1200/10000 val_loss:5.1096 svd_entropy: attn_qk:H=0.6336,top10E=0.45,eRank=105.6,q75/q25=21.61 attn_vo:H=0.6561,top10E=0.33,eRank=142.5,q75/q25=inf mlp_w1:H=0.5675,top10E=0.54,eRank=66.7,q75/q25=4.44 mlp_w2:H=0.7404,top10E=0.29,eRank=137.9,q75/q25=8.77 vo_prod:H=0.5079,top10E=0.49,eRank=41.0,q75/q25=inf train_time:84778ms step_avg:70.65ms +[2025-09-02 16:53:34] [Rank 0] step:1201/10000 train_time:84790ms step_avg:70.60ms +[2025-09-02 16:53:34] [Rank 0] step:1201/10000 train_time:84790ms step_avg:70.60ms +[2025-09-02 16:53:35] [Rank 0] step:1221/10000 train_time:86069ms step_avg:70.49ms +[2025-09-02 16:53:35] [Rank 0] step:1221/10000 train_time:86069ms step_avg:70.49ms +[2025-09-02 16:53:37] [Rank 0] step:1241/10000 train_time:87486ms step_avg:70.50ms +[2025-09-02 16:53:37] [Rank 0] step:1241/10000 train_time:87486ms step_avg:70.50ms +[2025-09-02 16:53:38] [Rank 0] step:1261/10000 train_time:88904ms step_avg:70.50ms +[2025-09-02 16:53:38] [Rank 0] step:1261/10000 train_time:88904ms step_avg:70.50ms +[2025-09-02 16:53:39] [Rank 0] step:1281/10000 train_time:90321ms step_avg:70.51ms +[2025-09-02 16:53:39] [Rank 0] step:1281/10000 train_time:90321ms step_avg:70.51ms +[2025-09-02 16:53:41] [Rank 0] step:1301/10000 train_time:91739ms step_avg:70.51ms +[2025-09-02 16:53:41] [Rank 0] step:1301/10000 train_time:91739ms step_avg:70.51ms +[2025-09-02 16:53:42] [Rank 0] step:1321/10000 train_time:93157ms step_avg:70.52ms +[2025-09-02 16:53:42] [Rank 0] step:1321/10000 train_time:93157ms step_avg:70.52ms +[2025-09-02 16:53:44] [Rank 0] step:1341/10000 train_time:94578ms step_avg:70.53ms +[2025-09-02 16:53:44] [Rank 0] step:1341/10000 train_time:94578ms step_avg:70.53ms +[2025-09-02 16:53:45] [Rank 0] step:1361/10000 train_time:95998ms step_avg:70.53ms +[2025-09-02 16:53:45] [Rank 0] step:1361/10000 train_time:95998ms step_avg:70.53ms +[2025-09-02 16:53:47] [Rank 0] step:1381/10000 train_time:97417ms step_avg:70.54ms +[2025-09-02 16:53:47] [Rank 0] step:1381/10000 train_time:97417ms step_avg:70.54ms +[2025-09-02 16:53:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:53:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:54:00] [Rank 0] PRINT: step:1400/10000 val_loss:4.9799 svd_entropy: attn_qk:H=0.6463,top10E=0.43,eRank=110.5,q75/q25=25.50 attn_vo:H=0.6725,top10E=0.31,eRank=154.0,q75/q25=inf mlp_w1:H=0.5885,top10E=0.52,eRank=72.9,q75/q25=4.80 mlp_w2:H=0.7580,top10E=0.26,eRank=155.2,q75/q25=9.59 vo_prod:H=0.5246,top10E=0.45,eRank=45.8,q75/q25=inf train_time:98979ms step_avg:70.70ms +[2025-09-02 16:54:00] [Rank 0] PRINT: step:1400/10000 val_loss:4.9799 svd_entropy: attn_qk:H=0.6463,top10E=0.43,eRank=110.5,q75/q25=25.50 attn_vo:H=0.6725,top10E=0.31,eRank=154.0,q75/q25=inf mlp_w1:H=0.5885,top10E=0.52,eRank=72.9,q75/q25=4.80 mlp_w2:H=0.7580,top10E=0.26,eRank=155.2,q75/q25=9.59 vo_prod:H=0.5246,top10E=0.45,eRank=45.8,q75/q25=inf train_time:98979ms step_avg:70.70ms +[2025-09-02 16:54:00] [Rank 0] step:1401/10000 train_time:98991ms step_avg:70.66ms +[2025-09-02 16:54:00] [Rank 0] step:1401/10000 train_time:98991ms step_avg:70.66ms +[2025-09-02 16:54:01] [Rank 0] step:1421/10000 train_time:100284ms step_avg:70.57ms +[2025-09-02 16:54:01] [Rank 0] step:1421/10000 train_time:100284ms step_avg:70.57ms +[2025-09-02 16:54:03] [Rank 0] step:1441/10000 train_time:101703ms step_avg:70.58ms +[2025-09-02 16:54:03] [Rank 0] step:1441/10000 train_time:101703ms step_avg:70.58ms +[2025-09-02 16:54:04] [Rank 0] step:1461/10000 train_time:103124ms step_avg:70.58ms +[2025-09-02 16:54:04] [Rank 0] step:1461/10000 train_time:103124ms step_avg:70.58ms +[2025-09-02 16:54:06] [Rank 0] step:1481/10000 train_time:104543ms step_avg:70.59ms +[2025-09-02 16:54:06] [Rank 0] step:1481/10000 train_time:104543ms step_avg:70.59ms +[2025-09-02 16:54:07] [Rank 0] step:1501/10000 train_time:105971ms step_avg:70.60ms +[2025-09-02 16:54:07] [Rank 0] step:1501/10000 train_time:105971ms step_avg:70.60ms +[2025-09-02 16:54:08] [Rank 0] step:1521/10000 train_time:107403ms step_avg:70.61ms +[2025-09-02 16:54:08] [Rank 0] step:1521/10000 train_time:107403ms step_avg:70.61ms +[2025-09-02 16:54:10] [Rank 0] step:1541/10000 train_time:108833ms step_avg:70.63ms +[2025-09-02 16:54:10] [Rank 0] step:1541/10000 train_time:108833ms step_avg:70.63ms +[2025-09-02 16:54:11] [Rank 0] step:1561/10000 train_time:110264ms step_avg:70.64ms +[2025-09-02 16:54:11] [Rank 0] step:1561/10000 train_time:110264ms step_avg:70.64ms +[2025-09-02 16:54:13] [Rank 0] step:1581/10000 train_time:111695ms step_avg:70.65ms +[2025-09-02 16:54:13] [Rank 0] step:1581/10000 train_time:111695ms step_avg:70.65ms +[2025-09-02 16:54:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:54:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:54:26] [Rank 0] PRINT: step:1600/10000 val_loss:4.8392 svd_entropy: attn_qk:H=0.6569,top10E=0.41,eRank=114.5,q75/q25=30.17 attn_vo:H=0.6863,top10E=0.29,eRank=163.9,q75/q25=inf mlp_w1:H=0.6066,top10E=0.50,eRank=79.0,q75/q25=5.20 mlp_w2:H=0.7712,top10E=0.24,eRank=169.6,q75/q25=10.54 vo_prod:H=0.5394,top10E=0.43,eRank=50.7,q75/q25=inf train_time:113270ms step_avg:70.79ms +[2025-09-02 16:54:26] [Rank 0] PRINT: step:1600/10000 val_loss:4.8392 svd_entropy: attn_qk:H=0.6569,top10E=0.41,eRank=114.5,q75/q25=30.17 attn_vo:H=0.6863,top10E=0.29,eRank=163.9,q75/q25=inf mlp_w1:H=0.6066,top10E=0.50,eRank=79.0,q75/q25=5.20 mlp_w2:H=0.7712,top10E=0.24,eRank=169.6,q75/q25=10.54 vo_prod:H=0.5394,top10E=0.43,eRank=50.7,q75/q25=inf train_time:113270ms step_avg:70.79ms +[2025-09-02 16:54:26] [Rank 0] step:1601/10000 train_time:113281ms step_avg:70.76ms +[2025-09-02 16:54:26] [Rank 0] step:1601/10000 train_time:113281ms step_avg:70.76ms +[2025-09-02 16:54:28] [Rank 0] step:1621/10000 train_time:114598ms step_avg:70.70ms +[2025-09-02 16:54:28] [Rank 0] step:1621/10000 train_time:114598ms step_avg:70.70ms +[2025-09-02 16:54:29] [Rank 0] step:1641/10000 train_time:116029ms step_avg:70.71ms +[2025-09-02 16:54:29] [Rank 0] step:1641/10000 train_time:116029ms step_avg:70.71ms +[2025-09-02 16:54:30] [Rank 0] step:1661/10000 train_time:117461ms step_avg:70.72ms +[2025-09-02 16:54:30] [Rank 0] step:1661/10000 train_time:117461ms step_avg:70.72ms +[2025-09-02 16:54:32] [Rank 0] step:1681/10000 train_time:118893ms step_avg:70.73ms +[2025-09-02 16:54:32] [Rank 0] step:1681/10000 train_time:118893ms step_avg:70.73ms +[2025-09-02 16:54:33] [Rank 0] step:1701/10000 train_time:120324ms step_avg:70.74ms +[2025-09-02 16:54:33] [Rank 0] step:1701/10000 train_time:120324ms step_avg:70.74ms +[2025-09-02 16:54:35] [Rank 0] step:1721/10000 train_time:121755ms step_avg:70.75ms +[2025-09-02 16:54:35] [Rank 0] step:1721/10000 train_time:121755ms step_avg:70.75ms +[2025-09-02 16:54:36] [Rank 0] step:1741/10000 train_time:123188ms step_avg:70.76ms +[2025-09-02 16:54:36] [Rank 0] step:1741/10000 train_time:123188ms step_avg:70.76ms +[2025-09-02 16:54:38] [Rank 0] step:1761/10000 train_time:124621ms step_avg:70.77ms +[2025-09-02 16:54:38] [Rank 0] step:1761/10000 train_time:124621ms step_avg:70.77ms +[2025-09-02 16:54:39] [Rank 0] step:1781/10000 train_time:126053ms step_avg:70.78ms +[2025-09-02 16:54:39] [Rank 0] step:1781/10000 train_time:126053ms step_avg:70.78ms +[2025-09-02 16:54:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:54:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:54:52] [Rank 0] PRINT: step:1800/10000 val_loss:4.7300 svd_entropy: attn_qk:H=0.6665,top10E=0.40,eRank=118.6,q75/q25=34.95 attn_vo:H=0.6976,top10E=0.27,eRank=172.3,q75/q25=inf mlp_w1:H=0.6229,top10E=0.48,eRank=85.2,q75/q25=5.60 mlp_w2:H=0.7819,top10E=0.23,eRank=182.3,q75/q25=11.33 vo_prod:H=0.5520,top10E=0.40,eRank=55.2,q75/q25=inf train_time:127630ms step_avg:70.91ms +[2025-09-02 16:54:52] [Rank 0] PRINT: step:1800/10000 val_loss:4.7300 svd_entropy: attn_qk:H=0.6665,top10E=0.40,eRank=118.6,q75/q25=34.95 attn_vo:H=0.6976,top10E=0.27,eRank=172.3,q75/q25=inf mlp_w1:H=0.6229,top10E=0.48,eRank=85.2,q75/q25=5.60 mlp_w2:H=0.7819,top10E=0.23,eRank=182.3,q75/q25=11.33 vo_prod:H=0.5520,top10E=0.40,eRank=55.2,q75/q25=inf train_time:127630ms step_avg:70.91ms +[2025-09-02 16:54:52] [Rank 0] step:1801/10000 train_time:127641ms step_avg:70.87ms +[2025-09-02 16:54:52] [Rank 0] step:1801/10000 train_time:127641ms step_avg:70.87ms +[2025-09-02 16:54:54] [Rank 0] step:1821/10000 train_time:128960ms step_avg:70.82ms +[2025-09-02 16:54:54] [Rank 0] step:1821/10000 train_time:128960ms step_avg:70.82ms +[2025-09-02 16:54:55] [Rank 0] step:1841/10000 train_time:130390ms step_avg:70.83ms +[2025-09-02 16:54:55] [Rank 0] step:1841/10000 train_time:130390ms step_avg:70.83ms +[2025-09-02 16:54:56] [Rank 0] step:1861/10000 train_time:131821ms step_avg:70.83ms +[2025-09-02 16:54:56] [Rank 0] step:1861/10000 train_time:131821ms step_avg:70.83ms +[2025-09-02 16:54:58] [Rank 0] step:1881/10000 train_time:133250ms step_avg:70.84ms +[2025-09-02 16:54:58] [Rank 0] step:1881/10000 train_time:133250ms step_avg:70.84ms +[2025-09-02 16:54:59] [Rank 0] step:1901/10000 train_time:134680ms step_avg:70.85ms +[2025-09-02 16:54:59] [Rank 0] step:1901/10000 train_time:134680ms step_avg:70.85ms +[2025-09-02 16:55:01] [Rank 0] step:1921/10000 train_time:136109ms step_avg:70.85ms +[2025-09-02 16:55:01] [Rank 0] step:1921/10000 train_time:136109ms step_avg:70.85ms +[2025-09-02 16:55:02] [Rank 0] step:1941/10000 train_time:137541ms step_avg:70.86ms +[2025-09-02 16:55:02] [Rank 0] step:1941/10000 train_time:137541ms step_avg:70.86ms +[2025-09-02 16:55:04] [Rank 0] step:1961/10000 train_time:138972ms step_avg:70.87ms +[2025-09-02 16:55:04] [Rank 0] step:1961/10000 train_time:138972ms step_avg:70.87ms +[2025-09-02 16:55:05] [Rank 0] step:1981/10000 train_time:140403ms step_avg:70.87ms +[2025-09-02 16:55:05] [Rank 0] step:1981/10000 train_time:140403ms step_avg:70.87ms +[2025-09-02 16:55:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:55:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:55:18] [Rank 0] PRINT: step:2000/10000 val_loss:4.6585 svd_entropy: attn_qk:H=0.6748,top10E=0.38,eRank=122.4,q75/q25=39.95 attn_vo:H=0.7072,top10E=0.26,eRank=179.6,q75/q25=inf mlp_w1:H=0.6369,top10E=0.46,eRank=91.1,q75/q25=5.97 mlp_w2:H=0.7905,top10E=0.22,eRank=193.3,q75/q25=12.14 vo_prod:H=0.5629,top10E=0.38,eRank=59.5,q75/q25=inf train_time:142004ms step_avg:71.00ms +[2025-09-02 16:55:18] [Rank 0] PRINT: step:2000/10000 val_loss:4.6585 svd_entropy: attn_qk:H=0.6748,top10E=0.38,eRank=122.4,q75/q25=39.95 attn_vo:H=0.7072,top10E=0.26,eRank=179.6,q75/q25=inf mlp_w1:H=0.6369,top10E=0.46,eRank=91.1,q75/q25=5.97 mlp_w2:H=0.7905,top10E=0.22,eRank=193.3,q75/q25=12.14 vo_prod:H=0.5629,top10E=0.38,eRank=59.5,q75/q25=inf train_time:142004ms step_avg:71.00ms +[2025-09-02 16:55:18] [Rank 0] step:2001/10000 train_time:142015ms step_avg:70.97ms +[2025-09-02 16:55:18] [Rank 0] step:2001/10000 train_time:142015ms step_avg:70.97ms +[2025-09-02 16:55:20] [Rank 0] step:2021/10000 train_time:143303ms step_avg:70.91ms +[2025-09-02 16:55:20] [Rank 0] step:2021/10000 train_time:143303ms step_avg:70.91ms +[2025-09-02 16:55:21] [Rank 0] step:2041/10000 train_time:144855ms step_avg:70.97ms +[2025-09-02 16:55:21] [Rank 0] step:2041/10000 train_time:144855ms step_avg:70.97ms +[2025-09-02 16:55:22] [Rank 0] step:2061/10000 train_time:146284ms step_avg:70.98ms +[2025-09-02 16:55:22] [Rank 0] step:2061/10000 train_time:146284ms step_avg:70.98ms +[2025-09-02 16:55:24] [Rank 0] step:2081/10000 train_time:147714ms step_avg:70.98ms +[2025-09-02 16:55:24] [Rank 0] step:2081/10000 train_time:147714ms step_avg:70.98ms +[2025-09-02 16:55:25] [Rank 0] step:2101/10000 train_time:149146ms step_avg:70.99ms +[2025-09-02 16:55:25] [Rank 0] step:2101/10000 train_time:149146ms step_avg:70.99ms +[2025-09-02 16:55:27] [Rank 0] step:2121/10000 train_time:150577ms step_avg:70.99ms +[2025-09-02 16:55:27] [Rank 0] step:2121/10000 train_time:150577ms step_avg:70.99ms +[2025-09-02 16:55:28] [Rank 0] step:2141/10000 train_time:152008ms step_avg:71.00ms +[2025-09-02 16:55:28] [Rank 0] step:2141/10000 train_time:152008ms step_avg:71.00ms +[2025-09-02 16:55:30] [Rank 0] step:2161/10000 train_time:153439ms step_avg:71.00ms +[2025-09-02 16:55:30] [Rank 0] step:2161/10000 train_time:153439ms step_avg:71.00ms +[2025-09-02 16:55:31] [Rank 0] step:2181/10000 train_time:154871ms step_avg:71.01ms +[2025-09-02 16:55:31] [Rank 0] step:2181/10000 train_time:154871ms step_avg:71.01ms +[2025-09-02 16:55:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:55:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:55:44] [Rank 0] PRINT: step:2200/10000 val_loss:4.5830 svd_entropy: attn_qk:H=0.6820,top10E=0.37,eRank=125.9,q75/q25=44.49 attn_vo:H=0.7151,top10E=0.25,eRank=186.0,q75/q25=inf mlp_w1:H=0.6500,top10E=0.44,eRank=97.1,q75/q25=6.33 mlp_w2:H=0.7979,top10E=0.21,eRank=203.2,q75/q25=12.65 vo_prod:H=0.5720,top10E=0.36,eRank=63.4,q75/q25=inf train_time:156445ms step_avg:71.11ms +[2025-09-02 16:55:44] [Rank 0] PRINT: step:2200/10000 val_loss:4.5830 svd_entropy: attn_qk:H=0.6820,top10E=0.37,eRank=125.9,q75/q25=44.49 attn_vo:H=0.7151,top10E=0.25,eRank=186.0,q75/q25=inf mlp_w1:H=0.6500,top10E=0.44,eRank=97.1,q75/q25=6.33 mlp_w2:H=0.7979,top10E=0.21,eRank=203.2,q75/q25=12.65 vo_prod:H=0.5720,top10E=0.36,eRank=63.4,q75/q25=inf train_time:156445ms step_avg:71.11ms +[2025-09-02 16:55:44] [Rank 0] step:2201/10000 train_time:156456ms step_avg:71.08ms +[2025-09-02 16:55:44] [Rank 0] step:2201/10000 train_time:156456ms step_avg:71.08ms +[2025-09-02 16:55:46] [Rank 0] step:2221/10000 train_time:157764ms step_avg:71.03ms +[2025-09-02 16:55:46] [Rank 0] step:2221/10000 train_time:157764ms step_avg:71.03ms +[2025-09-02 16:55:47] [Rank 0] step:2241/10000 train_time:159226ms step_avg:71.05ms +[2025-09-02 16:55:47] [Rank 0] step:2241/10000 train_time:159226ms step_avg:71.05ms +[2025-09-02 16:55:49] [Rank 0] step:2261/10000 train_time:160700ms step_avg:71.07ms +[2025-09-02 16:55:49] [Rank 0] step:2261/10000 train_time:160700ms step_avg:71.07ms +[2025-09-02 16:55:50] [Rank 0] step:2281/10000 train_time:162180ms step_avg:71.10ms +[2025-09-02 16:55:50] [Rank 0] step:2281/10000 train_time:162180ms step_avg:71.10ms +[2025-09-02 16:55:52] [Rank 0] step:2301/10000 train_time:163655ms step_avg:71.12ms +[2025-09-02 16:55:52] [Rank 0] step:2301/10000 train_time:163655ms step_avg:71.12ms +[2025-09-02 16:55:53] [Rank 0] step:2321/10000 train_time:165130ms step_avg:71.15ms +[2025-09-02 16:55:53] [Rank 0] step:2321/10000 train_time:165130ms step_avg:71.15ms +[2025-09-02 16:55:54] [Rank 0] step:2341/10000 train_time:166607ms step_avg:71.17ms +[2025-09-02 16:55:54] [Rank 0] step:2341/10000 train_time:166607ms step_avg:71.17ms +[2025-09-02 16:55:56] [Rank 0] step:2361/10000 train_time:168081ms step_avg:71.19ms +[2025-09-02 16:55:56] [Rank 0] step:2361/10000 train_time:168081ms step_avg:71.19ms +[2025-09-02 16:55:57] [Rank 0] step:2381/10000 train_time:169555ms step_avg:71.21ms +[2025-09-02 16:55:57] [Rank 0] step:2381/10000 train_time:169555ms step_avg:71.21ms +[2025-09-02 16:55:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:55:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:56:11] [Rank 0] PRINT: step:2400/10000 val_loss:4.5053 svd_entropy: attn_qk:H=0.6879,top10E=0.36,eRank=129.0,q75/q25=49.39 attn_vo:H=0.7223,top10E=0.23,eRank=191.9,q75/q25=inf mlp_w1:H=0.6627,top10E=0.43,eRank=103.5,q75/q25=6.68 mlp_w2:H=0.8057,top10E=0.20,eRank=213.9,q75/q25=13.04 vo_prod:H=0.5808,top10E=0.35,eRank=67.4,q75/q25=inf train_time:171179ms step_avg:71.32ms +[2025-09-02 16:56:11] [Rank 0] PRINT: step:2400/10000 val_loss:4.5053 svd_entropy: attn_qk:H=0.6879,top10E=0.36,eRank=129.0,q75/q25=49.39 attn_vo:H=0.7223,top10E=0.23,eRank=191.9,q75/q25=inf mlp_w1:H=0.6627,top10E=0.43,eRank=103.5,q75/q25=6.68 mlp_w2:H=0.8057,top10E=0.20,eRank=213.9,q75/q25=13.04 vo_prod:H=0.5808,top10E=0.35,eRank=67.4,q75/q25=inf train_time:171179ms step_avg:71.32ms +[2025-09-02 16:56:11] [Rank 0] step:2401/10000 train_time:171190ms step_avg:71.30ms +[2025-09-02 16:56:11] [Rank 0] step:2401/10000 train_time:171190ms step_avg:71.30ms +[2025-09-02 16:56:12] [Rank 0] step:2421/10000 train_time:172540ms step_avg:71.27ms +[2025-09-02 16:56:12] [Rank 0] step:2421/10000 train_time:172540ms step_avg:71.27ms +[2025-09-02 16:56:14] [Rank 0] step:2441/10000 train_time:174075ms step_avg:71.31ms +[2025-09-02 16:56:14] [Rank 0] step:2441/10000 train_time:174075ms step_avg:71.31ms +[2025-09-02 16:56:15] [Rank 0] step:2461/10000 train_time:175550ms step_avg:71.33ms +[2025-09-02 16:56:15] [Rank 0] step:2461/10000 train_time:175550ms step_avg:71.33ms +[2025-09-02 16:56:17] [Rank 0] step:2481/10000 train_time:177026ms step_avg:71.35ms +[2025-09-02 16:56:17] [Rank 0] step:2481/10000 train_time:177026ms step_avg:71.35ms +[2025-09-02 16:56:18] [Rank 0] step:2501/10000 train_time:178500ms step_avg:71.37ms +[2025-09-02 16:56:18] [Rank 0] step:2501/10000 train_time:178500ms step_avg:71.37ms +[2025-09-02 16:56:20] [Rank 0] step:2521/10000 train_time:179975ms step_avg:71.39ms +[2025-09-02 16:56:20] [Rank 0] step:2521/10000 train_time:179975ms step_avg:71.39ms +[2025-09-02 16:56:21] [Rank 0] step:2541/10000 train_time:181452ms step_avg:71.41ms +[2025-09-02 16:56:21] [Rank 0] step:2541/10000 train_time:181452ms step_avg:71.41ms +[2025-09-02 16:56:23] [Rank 0] step:2561/10000 train_time:182931ms step_avg:71.43ms +[2025-09-02 16:56:23] [Rank 0] step:2561/10000 train_time:182931ms step_avg:71.43ms +[2025-09-02 16:56:24] [Rank 0] step:2581/10000 train_time:184410ms step_avg:71.45ms +[2025-09-02 16:56:24] [Rank 0] step:2581/10000 train_time:184410ms step_avg:71.45ms +[2025-09-02 16:56:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:56:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:56:37] [Rank 0] PRINT: step:2600/10000 val_loss:4.4462 svd_entropy: attn_qk:H=0.6938,top10E=0.35,eRank=132.2,q75/q25=54.07 attn_vo:H=0.7285,top10E=0.22,eRank=197.2,q75/q25=inf mlp_w1:H=0.6738,top10E=0.41,eRank=109.7,q75/q25=7.00 mlp_w2:H=0.8124,top10E=0.19,eRank=223.7,q75/q25=13.35 vo_prod:H=0.5886,top10E=0.33,eRank=71.0,q75/q25=inf train_time:186038ms step_avg:71.55ms +[2025-09-02 16:56:37] [Rank 0] PRINT: step:2600/10000 val_loss:4.4462 svd_entropy: attn_qk:H=0.6938,top10E=0.35,eRank=132.2,q75/q25=54.07 attn_vo:H=0.7285,top10E=0.22,eRank=197.2,q75/q25=inf mlp_w1:H=0.6738,top10E=0.41,eRank=109.7,q75/q25=7.00 mlp_w2:H=0.8124,top10E=0.19,eRank=223.7,q75/q25=13.35 vo_prod:H=0.5886,top10E=0.33,eRank=71.0,q75/q25=inf train_time:186038ms step_avg:71.55ms +[2025-09-02 16:56:37] [Rank 0] step:2601/10000 train_time:186049ms step_avg:71.53ms +[2025-09-02 16:56:37] [Rank 0] step:2601/10000 train_time:186049ms step_avg:71.53ms +[2025-09-02 16:56:39] [Rank 0] step:2621/10000 train_time:187393ms step_avg:71.50ms +[2025-09-02 16:56:39] [Rank 0] step:2621/10000 train_time:187393ms step_avg:71.50ms +[2025-09-02 16:56:40] [Rank 0] step:2641/10000 train_time:188867ms step_avg:71.51ms +[2025-09-02 16:56:40] [Rank 0] step:2641/10000 train_time:188867ms step_avg:71.51ms +[2025-09-02 16:56:42] [Rank 0] step:2661/10000 train_time:190340ms step_avg:71.53ms +[2025-09-02 16:56:42] [Rank 0] step:2661/10000 train_time:190340ms step_avg:71.53ms +[2025-09-02 16:56:43] [Rank 0] step:2681/10000 train_time:191814ms step_avg:71.55ms +[2025-09-02 16:56:43] [Rank 0] step:2681/10000 train_time:191814ms step_avg:71.55ms +[2025-09-02 16:56:45] [Rank 0] step:2701/10000 train_time:193289ms step_avg:71.56ms +[2025-09-02 16:56:45] [Rank 0] step:2701/10000 train_time:193289ms step_avg:71.56ms +[2025-09-02 16:56:46] [Rank 0] step:2721/10000 train_time:194764ms step_avg:71.58ms +[2025-09-02 16:56:46] [Rank 0] step:2721/10000 train_time:194764ms step_avg:71.58ms +[2025-09-02 16:56:48] [Rank 0] step:2741/10000 train_time:196238ms step_avg:71.59ms +[2025-09-02 16:56:48] [Rank 0] step:2741/10000 train_time:196238ms step_avg:71.59ms +[2025-09-02 16:56:49] [Rank 0] step:2761/10000 train_time:197713ms step_avg:71.61ms +[2025-09-02 16:56:49] [Rank 0] step:2761/10000 train_time:197713ms step_avg:71.61ms +[2025-09-02 16:56:51] [Rank 0] step:2781/10000 train_time:199188ms step_avg:71.62ms +[2025-09-02 16:56:51] [Rank 0] step:2781/10000 train_time:199188ms step_avg:71.62ms +[2025-09-02 16:56:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:56:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:57:04] [Rank 0] PRINT: step:2800/10000 val_loss:4.4074 svd_entropy: attn_qk:H=0.6994,top10E=0.34,eRank=135.4,q75/q25=58.24 attn_vo:H=0.7341,top10E=0.22,eRank=202.3,q75/q25=inf mlp_w1:H=0.6844,top10E=0.40,eRank=116.0,q75/q25=7.31 mlp_w2:H=0.8183,top10E=0.18,eRank=232.6,q75/q25=13.58 vo_prod:H=0.5957,top10E=0.32,eRank=74.6,q75/q25=inf train_time:200811ms step_avg:71.72ms +[2025-09-02 16:57:04] [Rank 0] PRINT: step:2800/10000 val_loss:4.4074 svd_entropy: attn_qk:H=0.6994,top10E=0.34,eRank=135.4,q75/q25=58.24 attn_vo:H=0.7341,top10E=0.22,eRank=202.3,q75/q25=inf mlp_w1:H=0.6844,top10E=0.40,eRank=116.0,q75/q25=7.31 mlp_w2:H=0.8183,top10E=0.18,eRank=232.6,q75/q25=13.58 vo_prod:H=0.5957,top10E=0.32,eRank=74.6,q75/q25=inf train_time:200811ms step_avg:71.72ms +[2025-09-02 16:57:04] [Rank 0] step:2801/10000 train_time:200822ms step_avg:71.70ms +[2025-09-02 16:57:04] [Rank 0] step:2801/10000 train_time:200822ms step_avg:71.70ms +[2025-09-02 16:57:05] [Rank 0] step:2821/10000 train_time:202154ms step_avg:71.66ms +[2025-09-02 16:57:05] [Rank 0] step:2821/10000 train_time:202154ms step_avg:71.66ms +[2025-09-02 16:57:07] [Rank 0] step:2841/10000 train_time:203626ms step_avg:71.67ms +[2025-09-02 16:57:07] [Rank 0] step:2841/10000 train_time:203626ms step_avg:71.67ms +[2025-09-02 16:57:08] [Rank 0] step:2861/10000 train_time:205099ms step_avg:71.69ms +[2025-09-02 16:57:08] [Rank 0] step:2861/10000 train_time:205099ms step_avg:71.69ms +[2025-09-02 16:57:10] [Rank 0] step:2881/10000 train_time:206571ms step_avg:71.70ms +[2025-09-02 16:57:10] [Rank 0] step:2881/10000 train_time:206571ms step_avg:71.70ms +[2025-09-02 16:57:11] [Rank 0] step:2901/10000 train_time:208044ms step_avg:71.71ms +[2025-09-02 16:57:11] [Rank 0] step:2901/10000 train_time:208044ms step_avg:71.71ms +[2025-09-02 16:57:13] [Rank 0] step:2921/10000 train_time:209519ms step_avg:71.73ms +[2025-09-02 16:57:13] [Rank 0] step:2921/10000 train_time:209519ms step_avg:71.73ms +[2025-09-02 16:57:14] [Rank 0] step:2941/10000 train_time:210993ms step_avg:71.74ms +[2025-09-02 16:57:14] [Rank 0] step:2941/10000 train_time:210993ms step_avg:71.74ms +[2025-09-02 16:57:16] [Rank 0] step:2961/10000 train_time:212467ms step_avg:71.76ms +[2025-09-02 16:57:16] [Rank 0] step:2961/10000 train_time:212467ms step_avg:71.76ms +[2025-09-02 16:57:17] [Rank 0] step:2981/10000 train_time:213946ms step_avg:71.77ms +[2025-09-02 16:57:17] [Rank 0] step:2981/10000 train_time:213946ms step_avg:71.77ms +[2025-09-02 16:57:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:57:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:57:31] [Rank 0] PRINT: step:3000/10000 val_loss:4.3616 svd_entropy: attn_qk:H=0.7045,top10E=0.34,eRank=138.3,q75/q25=61.57 attn_vo:H=0.7392,top10E=0.21,eRank=206.8,q75/q25=inf mlp_w1:H=0.6935,top10E=0.39,eRank=121.8,q75/q25=7.64 mlp_w2:H=0.8230,top10E=0.17,eRank=240.0,q75/q25=13.87 vo_prod:H=0.6020,top10E=0.31,eRank=77.9,q75/q25=inf train_time:215575ms step_avg:71.86ms +[2025-09-02 16:57:31] [Rank 0] PRINT: step:3000/10000 val_loss:4.3616 svd_entropy: attn_qk:H=0.7045,top10E=0.34,eRank=138.3,q75/q25=61.57 attn_vo:H=0.7392,top10E=0.21,eRank=206.8,q75/q25=inf mlp_w1:H=0.6935,top10E=0.39,eRank=121.8,q75/q25=7.64 mlp_w2:H=0.8230,top10E=0.17,eRank=240.0,q75/q25=13.87 vo_prod:H=0.6020,top10E=0.31,eRank=77.9,q75/q25=inf train_time:215575ms step_avg:71.86ms +[2025-09-02 16:57:31] [Rank 0] step:3001/10000 train_time:215587ms step_avg:71.84ms +[2025-09-02 16:57:31] [Rank 0] step:3001/10000 train_time:215587ms step_avg:71.84ms +[2025-09-02 16:57:32] [Rank 0] step:3021/10000 train_time:216922ms step_avg:71.80ms +[2025-09-02 16:57:32] [Rank 0] step:3021/10000 train_time:216922ms step_avg:71.80ms +[2025-09-02 16:57:34] [Rank 0] step:3041/10000 train_time:218401ms step_avg:71.82ms +[2025-09-02 16:57:34] [Rank 0] step:3041/10000 train_time:218401ms step_avg:71.82ms +[2025-09-02 16:57:35] [Rank 0] step:3061/10000 train_time:219881ms step_avg:71.83ms +[2025-09-02 16:57:35] [Rank 0] step:3061/10000 train_time:219881ms step_avg:71.83ms +[2025-09-02 16:57:37] [Rank 0] step:3081/10000 train_time:221361ms step_avg:71.85ms +[2025-09-02 16:57:37] [Rank 0] step:3081/10000 train_time:221361ms step_avg:71.85ms +[2025-09-02 16:57:38] [Rank 0] step:3101/10000 train_time:222842ms step_avg:71.86ms +[2025-09-02 16:57:38] [Rank 0] step:3101/10000 train_time:222842ms step_avg:71.86ms +[2025-09-02 16:57:40] [Rank 0] step:3121/10000 train_time:224322ms step_avg:71.88ms +[2025-09-02 16:57:40] [Rank 0] step:3121/10000 train_time:224322ms step_avg:71.88ms +[2025-09-02 16:57:41] [Rank 0] step:3141/10000 train_time:225802ms step_avg:71.89ms +[2025-09-02 16:57:41] [Rank 0] step:3141/10000 train_time:225802ms step_avg:71.89ms +[2025-09-02 16:57:42] [Rank 0] step:3161/10000 train_time:227284ms step_avg:71.90ms +[2025-09-02 16:57:42] [Rank 0] step:3161/10000 train_time:227284ms step_avg:71.90ms +[2025-09-02 16:57:44] [Rank 0] step:3181/10000 train_time:228767ms step_avg:71.92ms +[2025-09-02 16:57:44] [Rank 0] step:3181/10000 train_time:228767ms step_avg:71.92ms +[2025-09-02 16:57:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:57:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:57:57] [Rank 0] PRINT: step:3200/10000 val_loss:4.3266 svd_entropy: attn_qk:H=0.7091,top10E=0.33,eRank=141.2,q75/q25=64.88 attn_vo:H=0.7437,top10E=0.20,eRank=211.2,q75/q25=inf mlp_w1:H=0.7016,top10E=0.38,eRank=127.5,q75/q25=7.97 mlp_w2:H=0.8268,top10E=0.17,eRank=246.2,q75/q25=14.21 vo_prod:H=0.6078,top10E=0.30,eRank=81.2,q75/q25=inf train_time:230400ms step_avg:72.00ms +[2025-09-02 16:57:57] [Rank 0] PRINT: step:3200/10000 val_loss:4.3266 svd_entropy: attn_qk:H=0.7091,top10E=0.33,eRank=141.2,q75/q25=64.88 attn_vo:H=0.7437,top10E=0.20,eRank=211.2,q75/q25=inf mlp_w1:H=0.7016,top10E=0.38,eRank=127.5,q75/q25=7.97 mlp_w2:H=0.8268,top10E=0.17,eRank=246.2,q75/q25=14.21 vo_prod:H=0.6078,top10E=0.30,eRank=81.2,q75/q25=inf train_time:230400ms step_avg:72.00ms +[2025-09-02 16:57:57] [Rank 0] step:3201/10000 train_time:230411ms step_avg:71.98ms +[2025-09-02 16:57:57] [Rank 0] step:3201/10000 train_time:230411ms step_avg:71.98ms +[2025-09-02 16:57:59] [Rank 0] step:3221/10000 train_time:231753ms step_avg:71.95ms +[2025-09-02 16:57:59] [Rank 0] step:3221/10000 train_time:231753ms step_avg:71.95ms +[2025-09-02 16:58:00] [Rank 0] step:3241/10000 train_time:233235ms step_avg:71.96ms +[2025-09-02 16:58:00] [Rank 0] step:3241/10000 train_time:233235ms step_avg:71.96ms +[2025-09-02 16:58:02] [Rank 0] step:3261/10000 train_time:234718ms step_avg:71.98ms +[2025-09-02 16:58:02] [Rank 0] step:3261/10000 train_time:234718ms step_avg:71.98ms +[2025-09-02 16:58:03] [Rank 0] step:3281/10000 train_time:236201ms step_avg:71.99ms +[2025-09-02 16:58:03] [Rank 0] step:3281/10000 train_time:236201ms step_avg:71.99ms +[2025-09-02 16:58:05] [Rank 0] step:3301/10000 train_time:237685ms step_avg:72.00ms +[2025-09-02 16:58:05] [Rank 0] step:3301/10000 train_time:237685ms step_avg:72.00ms +[2025-09-02 16:58:06] [Rank 0] step:3321/10000 train_time:239170ms step_avg:72.02ms +[2025-09-02 16:58:06] [Rank 0] step:3321/10000 train_time:239170ms step_avg:72.02ms +[2025-09-02 16:58:08] [Rank 0] step:3341/10000 train_time:240653ms step_avg:72.03ms +[2025-09-02 16:58:08] [Rank 0] step:3341/10000 train_time:240653ms step_avg:72.03ms +[2025-09-02 16:58:09] [Rank 0] step:3361/10000 train_time:242138ms step_avg:72.04ms +[2025-09-02 16:58:09] [Rank 0] step:3361/10000 train_time:242138ms step_avg:72.04ms +[2025-09-02 16:58:11] [Rank 0] step:3381/10000 train_time:243624ms step_avg:72.06ms +[2025-09-02 16:58:11] [Rank 0] step:3381/10000 train_time:243624ms step_avg:72.06ms +[2025-09-02 16:58:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:58:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:58:24] [Rank 0] PRINT: step:3400/10000 val_loss:4.2844 svd_entropy: attn_qk:H=0.7137,top10E=0.32,eRank=144.0,q75/q25=68.16 attn_vo:H=0.7480,top10E=0.20,eRank=215.5,q75/q25=inf mlp_w1:H=0.7094,top10E=0.37,eRank=133.2,q75/q25=8.28 mlp_w2:H=0.8306,top10E=0.16,eRank=252.5,q75/q25=14.40 vo_prod:H=0.6133,top10E=0.29,eRank=84.3,q75/q25=inf train_time:245259ms step_avg:72.13ms +[2025-09-02 16:58:24] [Rank 0] PRINT: step:3400/10000 val_loss:4.2844 svd_entropy: attn_qk:H=0.7137,top10E=0.32,eRank=144.0,q75/q25=68.16 attn_vo:H=0.7480,top10E=0.20,eRank=215.5,q75/q25=inf mlp_w1:H=0.7094,top10E=0.37,eRank=133.2,q75/q25=8.28 mlp_w2:H=0.8306,top10E=0.16,eRank=252.5,q75/q25=14.40 vo_prod:H=0.6133,top10E=0.29,eRank=84.3,q75/q25=inf train_time:245259ms step_avg:72.13ms +[2025-09-02 16:58:24] [Rank 0] step:3401/10000 train_time:245270ms step_avg:72.12ms +[2025-09-02 16:58:24] [Rank 0] step:3401/10000 train_time:245270ms step_avg:72.12ms +[2025-09-02 16:58:25] [Rank 0] step:3421/10000 train_time:246634ms step_avg:72.09ms +[2025-09-02 16:58:25] [Rank 0] step:3421/10000 train_time:246634ms step_avg:72.09ms +[2025-09-02 16:58:27] [Rank 0] step:3441/10000 train_time:248114ms step_avg:72.11ms +[2025-09-02 16:58:27] [Rank 0] step:3441/10000 train_time:248114ms step_avg:72.11ms +[2025-09-02 16:58:28] [Rank 0] step:3461/10000 train_time:249595ms step_avg:72.12ms +[2025-09-02 16:58:28] [Rank 0] step:3461/10000 train_time:249595ms step_avg:72.12ms +[2025-09-02 16:58:30] [Rank 0] step:3481/10000 train_time:251077ms step_avg:72.13ms +[2025-09-02 16:58:30] [Rank 0] step:3481/10000 train_time:251077ms step_avg:72.13ms +[2025-09-02 16:58:31] [Rank 0] step:3501/10000 train_time:252559ms step_avg:72.14ms +[2025-09-02 16:58:31] [Rank 0] step:3501/10000 train_time:252559ms step_avg:72.14ms +[2025-09-02 16:58:33] [Rank 0] step:3521/10000 train_time:254065ms step_avg:72.16ms +[2025-09-02 16:58:33] [Rank 0] step:3521/10000 train_time:254065ms step_avg:72.16ms +[2025-09-02 16:58:34] [Rank 0] step:3541/10000 train_time:255559ms step_avg:72.17ms +[2025-09-02 16:58:34] [Rank 0] step:3541/10000 train_time:255559ms step_avg:72.17ms +[2025-09-02 16:58:36] [Rank 0] step:3561/10000 train_time:257039ms step_avg:72.18ms +[2025-09-02 16:58:36] [Rank 0] step:3561/10000 train_time:257039ms step_avg:72.18ms +[2025-09-02 16:58:37] [Rank 0] step:3581/10000 train_time:258521ms step_avg:72.19ms +[2025-09-02 16:58:37] [Rank 0] step:3581/10000 train_time:258521ms step_avg:72.19ms +[2025-09-02 16:58:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:58:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:58:50] [Rank 0] PRINT: step:3600/10000 val_loss:4.2718 svd_entropy: attn_qk:H=0.7177,top10E=0.31,eRank=146.7,q75/q25=70.64 attn_vo:H=0.7518,top10E=0.19,eRank=219.3,q75/q25=inf mlp_w1:H=0.7166,top10E=0.36,eRank=138.7,q75/q25=8.66 mlp_w2:H=0.8335,top10E=0.16,eRank=257.5,q75/q25=14.71 vo_prod:H=0.6179,top10E=0.28,eRank=87.0,q75/q25=inf train_time:260153ms step_avg:72.26ms +[2025-09-02 16:58:50] [Rank 0] PRINT: step:3600/10000 val_loss:4.2718 svd_entropy: attn_qk:H=0.7177,top10E=0.31,eRank=146.7,q75/q25=70.64 attn_vo:H=0.7518,top10E=0.19,eRank=219.3,q75/q25=inf mlp_w1:H=0.7166,top10E=0.36,eRank=138.7,q75/q25=8.66 mlp_w2:H=0.8335,top10E=0.16,eRank=257.5,q75/q25=14.71 vo_prod:H=0.6179,top10E=0.28,eRank=87.0,q75/q25=inf train_time:260153ms step_avg:72.26ms +[2025-09-02 16:58:50] [Rank 0] step:3601/10000 train_time:260164ms step_avg:72.25ms +[2025-09-02 16:58:50] [Rank 0] step:3601/10000 train_time:260164ms step_avg:72.25ms +[2025-09-02 16:58:52] [Rank 0] step:3621/10000 train_time:261501ms step_avg:72.22ms +[2025-09-02 16:58:52] [Rank 0] step:3621/10000 train_time:261501ms step_avg:72.22ms +[2025-09-02 16:58:53] [Rank 0] step:3641/10000 train_time:262978ms step_avg:72.23ms +[2025-09-02 16:58:53] [Rank 0] step:3641/10000 train_time:262978ms step_avg:72.23ms +[2025-09-02 16:58:55] [Rank 0] step:3661/10000 train_time:264458ms step_avg:72.24ms +[2025-09-02 16:58:55] [Rank 0] step:3661/10000 train_time:264458ms step_avg:72.24ms +[2025-09-02 16:58:56] [Rank 0] step:3681/10000 train_time:265940ms step_avg:72.25ms +[2025-09-02 16:58:56] [Rank 0] step:3681/10000 train_time:265940ms step_avg:72.25ms +[2025-09-02 16:58:58] [Rank 0] step:3701/10000 train_time:267421ms step_avg:72.26ms +[2025-09-02 16:58:58] [Rank 0] step:3701/10000 train_time:267421ms step_avg:72.26ms +[2025-09-02 16:58:59] [Rank 0] step:3721/10000 train_time:268928ms step_avg:72.27ms +[2025-09-02 16:58:59] [Rank 0] step:3721/10000 train_time:268928ms step_avg:72.27ms +[2025-09-02 16:59:01] [Rank 0] step:3741/10000 train_time:270445ms step_avg:72.29ms +[2025-09-02 16:59:01] [Rank 0] step:3741/10000 train_time:270445ms step_avg:72.29ms +[2025-09-02 16:59:02] [Rank 0] step:3761/10000 train_time:271965ms step_avg:72.31ms +[2025-09-02 16:59:02] [Rank 0] step:3761/10000 train_time:271965ms step_avg:72.31ms +[2025-09-02 16:59:04] [Rank 0] step:3781/10000 train_time:273484ms step_avg:72.33ms +[2025-09-02 16:59:04] [Rank 0] step:3781/10000 train_time:273484ms step_avg:72.33ms +[2025-09-02 16:59:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:59:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:59:17] [Rank 0] PRINT: step:3800/10000 val_loss:4.2121 svd_entropy: attn_qk:H=0.7213,top10E=0.31,eRank=149.1,q75/q25=73.53 attn_vo:H=0.7553,top10E=0.19,eRank=223.0,q75/q25=inf mlp_w1:H=0.7233,top10E=0.35,eRank=144.0,q75/q25=8.98 mlp_w2:H=0.8361,top10E=0.16,eRank=262.1,q75/q25=15.01 vo_prod:H=0.6223,top10E=0.28,eRank=89.8,q75/q25=inf train_time:275154ms step_avg:72.41ms +[2025-09-02 16:59:17] [Rank 0] PRINT: step:3800/10000 val_loss:4.2121 svd_entropy: attn_qk:H=0.7213,top10E=0.31,eRank=149.1,q75/q25=73.53 attn_vo:H=0.7553,top10E=0.19,eRank=223.0,q75/q25=inf mlp_w1:H=0.7233,top10E=0.35,eRank=144.0,q75/q25=8.98 mlp_w2:H=0.8361,top10E=0.16,eRank=262.1,q75/q25=15.01 vo_prod:H=0.6223,top10E=0.28,eRank=89.8,q75/q25=inf train_time:275154ms step_avg:72.41ms +[2025-09-02 16:59:17] [Rank 0] step:3801/10000 train_time:275165ms step_avg:72.39ms +[2025-09-02 16:59:17] [Rank 0] step:3801/10000 train_time:275165ms step_avg:72.39ms +[2025-09-02 16:59:19] [Rank 0] step:3821/10000 train_time:276555ms step_avg:72.38ms +[2025-09-02 16:59:19] [Rank 0] step:3821/10000 train_time:276555ms step_avg:72.38ms +[2025-09-02 16:59:20] [Rank 0] step:3841/10000 train_time:278075ms step_avg:72.40ms +[2025-09-02 16:59:20] [Rank 0] step:3841/10000 train_time:278075ms step_avg:72.40ms +[2025-09-02 16:59:22] [Rank 0] step:3861/10000 train_time:279591ms step_avg:72.41ms +[2025-09-02 16:59:22] [Rank 0] step:3861/10000 train_time:279591ms step_avg:72.41ms +[2025-09-02 16:59:23] [Rank 0] step:3881/10000 train_time:281164ms step_avg:72.45ms +[2025-09-02 16:59:23] [Rank 0] step:3881/10000 train_time:281164ms step_avg:72.45ms +[2025-09-02 16:59:25] [Rank 0] step:3901/10000 train_time:282682ms step_avg:72.46ms +[2025-09-02 16:59:25] [Rank 0] step:3901/10000 train_time:282682ms step_avg:72.46ms +[2025-09-02 16:59:26] [Rank 0] step:3921/10000 train_time:284199ms step_avg:72.48ms +[2025-09-02 16:59:26] [Rank 0] step:3921/10000 train_time:284199ms step_avg:72.48ms +[2025-09-02 16:59:28] [Rank 0] step:3941/10000 train_time:285718ms step_avg:72.50ms +[2025-09-02 16:59:28] [Rank 0] step:3941/10000 train_time:285718ms step_avg:72.50ms +[2025-09-02 16:59:29] [Rank 0] step:3961/10000 train_time:287234ms step_avg:72.52ms +[2025-09-02 16:59:29] [Rank 0] step:3961/10000 train_time:287234ms step_avg:72.52ms +[2025-09-02 16:59:31] [Rank 0] step:3981/10000 train_time:288752ms step_avg:72.53ms +[2025-09-02 16:59:31] [Rank 0] step:3981/10000 train_time:288752ms step_avg:72.53ms +[2025-09-02 16:59:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:59:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:59:44] [Rank 0] PRINT: step:4000/10000 val_loss:4.1844 svd_entropy: attn_qk:H=0.7248,top10E=0.30,eRank=151.5,q75/q25=75.52 attn_vo:H=0.7586,top10E=0.18,eRank=226.4,q75/q25=inf mlp_w1:H=0.7297,top10E=0.34,eRank=149.4,q75/q25=9.40 mlp_w2:H=0.8385,top10E=0.15,eRank=266.3,q75/q25=15.39 vo_prod:H=0.6264,top10E=0.27,eRank=92.5,q75/q25=inf train_time:290421ms step_avg:72.61ms +[2025-09-02 16:59:44] [Rank 0] PRINT: step:4000/10000 val_loss:4.1844 svd_entropy: attn_qk:H=0.7248,top10E=0.30,eRank=151.5,q75/q25=75.52 attn_vo:H=0.7586,top10E=0.18,eRank=226.4,q75/q25=inf mlp_w1:H=0.7297,top10E=0.34,eRank=149.4,q75/q25=9.40 mlp_w2:H=0.8385,top10E=0.15,eRank=266.3,q75/q25=15.39 vo_prod:H=0.6264,top10E=0.27,eRank=92.5,q75/q25=inf train_time:290421ms step_avg:72.61ms +[2025-09-02 16:59:44] [Rank 0] step:4001/10000 train_time:290432ms step_avg:72.59ms +[2025-09-02 16:59:44] [Rank 0] step:4001/10000 train_time:290432ms step_avg:72.59ms +[2025-09-02 16:59:46] [Rank 0] step:4021/10000 train_time:291802ms step_avg:72.57ms +[2025-09-02 16:59:46] [Rank 0] step:4021/10000 train_time:291802ms step_avg:72.57ms +[2025-09-02 16:59:47] [Rank 0] step:4041/10000 train_time:293320ms step_avg:72.59ms +[2025-09-02 16:59:47] [Rank 0] step:4041/10000 train_time:293320ms step_avg:72.59ms +[2025-09-02 16:59:49] [Rank 0] step:4061/10000 train_time:294839ms step_avg:72.60ms +[2025-09-02 16:59:49] [Rank 0] step:4061/10000 train_time:294839ms step_avg:72.60ms +[2025-09-02 16:59:50] [Rank 0] step:4081/10000 train_time:296462ms step_avg:72.64ms +[2025-09-02 16:59:50] [Rank 0] step:4081/10000 train_time:296462ms step_avg:72.64ms +[2025-09-02 16:59:52] [Rank 0] step:4101/10000 train_time:297980ms step_avg:72.66ms +[2025-09-02 16:59:52] [Rank 0] step:4101/10000 train_time:297980ms step_avg:72.66ms +[2025-09-02 16:59:53] [Rank 0] step:4121/10000 train_time:299500ms step_avg:72.68ms +[2025-09-02 16:59:53] [Rank 0] step:4121/10000 train_time:299500ms step_avg:72.68ms +[2025-09-02 16:59:55] [Rank 0] step:4141/10000 train_time:301019ms step_avg:72.69ms +[2025-09-02 16:59:55] [Rank 0] step:4141/10000 train_time:301019ms step_avg:72.69ms +[2025-09-02 16:59:56] [Rank 0] step:4161/10000 train_time:302538ms step_avg:72.71ms +[2025-09-02 16:59:56] [Rank 0] step:4161/10000 train_time:302538ms step_avg:72.71ms +[2025-09-02 16:59:58] [Rank 0] step:4181/10000 train_time:304059ms step_avg:72.72ms +[2025-09-02 16:59:58] [Rank 0] step:4181/10000 train_time:304059ms step_avg:72.72ms +[2025-09-02 16:59:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:59:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:00:11] [Rank 0] PRINT: step:4200/10000 val_loss:4.1680 svd_entropy: attn_qk:H=0.7282,top10E=0.30,eRank=153.8,q75/q25=77.19 attn_vo:H=0.7615,top10E=0.18,eRank=229.6,q75/q25=inf mlp_w1:H=0.7352,top10E=0.33,eRank=154.2,q75/q25=9.79 mlp_w2:H=0.8405,top10E=0.15,eRank=269.9,q75/q25=15.65 vo_prod:H=0.6300,top10E=0.27,eRank=94.9,q75/q25=inf train_time:305731ms step_avg:72.79ms +[2025-09-02 17:00:11] [Rank 0] PRINT: step:4200/10000 val_loss:4.1680 svd_entropy: attn_qk:H=0.7282,top10E=0.30,eRank=153.8,q75/q25=77.19 attn_vo:H=0.7615,top10E=0.18,eRank=229.6,q75/q25=inf mlp_w1:H=0.7352,top10E=0.33,eRank=154.2,q75/q25=9.79 mlp_w2:H=0.8405,top10E=0.15,eRank=269.9,q75/q25=15.65 vo_prod:H=0.6300,top10E=0.27,eRank=94.9,q75/q25=inf train_time:305731ms step_avg:72.79ms +[2025-09-02 17:00:11] [Rank 0] step:4201/10000 train_time:305743ms step_avg:72.78ms +[2025-09-02 17:00:11] [Rank 0] step:4201/10000 train_time:305743ms step_avg:72.78ms +[2025-09-02 17:00:13] [Rank 0] step:4221/10000 train_time:307111ms step_avg:72.76ms +[2025-09-02 17:00:13] [Rank 0] step:4221/10000 train_time:307111ms step_avg:72.76ms +[2025-09-02 17:00:14] [Rank 0] step:4241/10000 train_time:308632ms step_avg:72.77ms +[2025-09-02 17:00:14] [Rank 0] step:4241/10000 train_time:308632ms step_avg:72.77ms +[2025-09-02 17:00:16] [Rank 0] step:4261/10000 train_time:310152ms step_avg:72.79ms +[2025-09-02 17:00:16] [Rank 0] step:4261/10000 train_time:310152ms step_avg:72.79ms +[2025-09-02 17:00:17] [Rank 0] step:4281/10000 train_time:311674ms step_avg:72.80ms +[2025-09-02 17:00:17] [Rank 0] step:4281/10000 train_time:311674ms step_avg:72.80ms +[2025-09-02 17:00:19] [Rank 0] step:4301/10000 train_time:313197ms step_avg:72.82ms +[2025-09-02 17:00:19] [Rank 0] step:4301/10000 train_time:313197ms step_avg:72.82ms +[2025-09-02 17:00:20] [Rank 0] step:4321/10000 train_time:314721ms step_avg:72.84ms +[2025-09-02 17:00:20] [Rank 0] step:4321/10000 train_time:314721ms step_avg:72.84ms +[2025-09-02 17:00:22] [Rank 0] step:4341/10000 train_time:316240ms step_avg:72.85ms +[2025-09-02 17:00:22] [Rank 0] step:4341/10000 train_time:316240ms step_avg:72.85ms +[2025-09-02 17:00:23] [Rank 0] step:4361/10000 train_time:317762ms step_avg:72.86ms +[2025-09-02 17:00:23] [Rank 0] step:4361/10000 train_time:317762ms step_avg:72.86ms +[2025-09-02 17:00:25] [Rank 0] step:4381/10000 train_time:319332ms step_avg:72.89ms +[2025-09-02 17:00:25] [Rank 0] step:4381/10000 train_time:319332ms step_avg:72.89ms +[2025-09-02 17:00:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:00:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:00:38] [Rank 0] PRINT: step:4400/10000 val_loss:4.1406 svd_entropy: attn_qk:H=0.7313,top10E=0.30,eRank=156.1,q75/q25=78.97 attn_vo:H=0.7643,top10E=0.17,eRank=232.7,q75/q25=inf mlp_w1:H=0.7407,top10E=0.33,eRank=159.1,q75/q25=10.15 mlp_w2:H=0.8425,top10E=0.15,eRank=273.5,q75/q25=16.00 vo_prod:H=0.6334,top10E=0.26,eRank=97.1,q75/q25=inf train_time:321007ms step_avg:72.96ms +[2025-09-02 17:00:38] [Rank 0] PRINT: step:4400/10000 val_loss:4.1406 svd_entropy: attn_qk:H=0.7313,top10E=0.30,eRank=156.1,q75/q25=78.97 attn_vo:H=0.7643,top10E=0.17,eRank=232.7,q75/q25=inf mlp_w1:H=0.7407,top10E=0.33,eRank=159.1,q75/q25=10.15 mlp_w2:H=0.8425,top10E=0.15,eRank=273.5,q75/q25=16.00 vo_prod:H=0.6334,top10E=0.26,eRank=97.1,q75/q25=inf train_time:321007ms step_avg:72.96ms +[2025-09-02 17:00:38] [Rank 0] step:4401/10000 train_time:321018ms step_avg:72.94ms +[2025-09-02 17:00:38] [Rank 0] step:4401/10000 train_time:321018ms step_avg:72.94ms +[2025-09-02 17:00:40] [Rank 0] step:4421/10000 train_time:322393ms step_avg:72.92ms +[2025-09-02 17:00:40] [Rank 0] step:4421/10000 train_time:322393ms step_avg:72.92ms +[2025-09-02 17:00:41] [Rank 0] step:4441/10000 train_time:323909ms step_avg:72.94ms +[2025-09-02 17:00:41] [Rank 0] step:4441/10000 train_time:323909ms step_avg:72.94ms +[2025-09-02 17:00:43] [Rank 0] step:4461/10000 train_time:325432ms step_avg:72.95ms +[2025-09-02 17:00:43] [Rank 0] step:4461/10000 train_time:325432ms step_avg:72.95ms +[2025-09-02 17:00:44] [Rank 0] step:4481/10000 train_time:326954ms step_avg:72.96ms +[2025-09-02 17:00:44] [Rank 0] step:4481/10000 train_time:326954ms step_avg:72.96ms +[2025-09-02 17:00:46] [Rank 0] step:4501/10000 train_time:328479ms step_avg:72.98ms +[2025-09-02 17:00:46] [Rank 0] step:4501/10000 train_time:328479ms step_avg:72.98ms +[2025-09-02 17:00:47] [Rank 0] step:4521/10000 train_time:330001ms step_avg:72.99ms +[2025-09-02 17:00:47] [Rank 0] step:4521/10000 train_time:330001ms step_avg:72.99ms +[2025-09-02 17:00:49] [Rank 0] step:4541/10000 train_time:331526ms step_avg:73.01ms +[2025-09-02 17:00:49] [Rank 0] step:4541/10000 train_time:331526ms step_avg:73.01ms +[2025-09-02 17:00:50] [Rank 0] step:4561/10000 train_time:333051ms step_avg:73.02ms +[2025-09-02 17:00:50] [Rank 0] step:4561/10000 train_time:333051ms step_avg:73.02ms +[2025-09-02 17:00:52] [Rank 0] step:4581/10000 train_time:334577ms step_avg:73.04ms +[2025-09-02 17:00:52] [Rank 0] step:4581/10000 train_time:334577ms step_avg:73.04ms +[2025-09-02 17:00:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:00:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:01:05] [Rank 0] PRINT: step:4600/10000 val_loss:4.1081 svd_entropy: attn_qk:H=0.7342,top10E=0.29,eRank=158.2,q75/q25=81.12 attn_vo:H=0.7670,top10E=0.17,eRank=235.8,q75/q25=inf mlp_w1:H=0.7457,top10E=0.32,eRank=163.9,q75/q25=10.56 mlp_w2:H=0.8443,top10E=0.15,eRank=276.9,q75/q25=16.26 vo_prod:H=0.6368,top10E=0.26,eRank=99.4,q75/q25=inf train_time:336258ms step_avg:73.10ms +[2025-09-02 17:01:05] [Rank 0] PRINT: step:4600/10000 val_loss:4.1081 svd_entropy: attn_qk:H=0.7342,top10E=0.29,eRank=158.2,q75/q25=81.12 attn_vo:H=0.7670,top10E=0.17,eRank=235.8,q75/q25=inf mlp_w1:H=0.7457,top10E=0.32,eRank=163.9,q75/q25=10.56 mlp_w2:H=0.8443,top10E=0.15,eRank=276.9,q75/q25=16.26 vo_prod:H=0.6368,top10E=0.26,eRank=99.4,q75/q25=inf train_time:336258ms step_avg:73.10ms +[2025-09-02 17:01:05] [Rank 0] step:4601/10000 train_time:336269ms step_avg:73.09ms +[2025-09-02 17:01:05] [Rank 0] step:4601/10000 train_time:336269ms step_avg:73.09ms +[2025-09-02 17:01:07] [Rank 0] step:4621/10000 train_time:337668ms step_avg:73.07ms +[2025-09-02 17:01:07] [Rank 0] step:4621/10000 train_time:337668ms step_avg:73.07ms +[2025-09-02 17:01:08] [Rank 0] step:4641/10000 train_time:339197ms step_avg:73.09ms +[2025-09-02 17:01:08] [Rank 0] step:4641/10000 train_time:339197ms step_avg:73.09ms +[2025-09-02 17:01:10] [Rank 0] step:4661/10000 train_time:340722ms step_avg:73.10ms +[2025-09-02 17:01:10] [Rank 0] step:4661/10000 train_time:340722ms step_avg:73.10ms +[2025-09-02 17:01:11] [Rank 0] step:4681/10000 train_time:342247ms step_avg:73.11ms +[2025-09-02 17:01:11] [Rank 0] step:4681/10000 train_time:342247ms step_avg:73.11ms +[2025-09-02 17:01:13] [Rank 0] step:4701/10000 train_time:343774ms step_avg:73.13ms +[2025-09-02 17:01:13] [Rank 0] step:4701/10000 train_time:343774ms step_avg:73.13ms +[2025-09-02 17:01:14] [Rank 0] step:4721/10000 train_time:345299ms step_avg:73.14ms +[2025-09-02 17:01:14] [Rank 0] step:4721/10000 train_time:345299ms step_avg:73.14ms +[2025-09-02 17:01:16] [Rank 0] step:4741/10000 train_time:346825ms step_avg:73.15ms +[2025-09-02 17:01:16] [Rank 0] step:4741/10000 train_time:346825ms step_avg:73.15ms +[2025-09-02 17:01:17] [Rank 0] step:4761/10000 train_time:348349ms step_avg:73.17ms +[2025-09-02 17:01:17] [Rank 0] step:4761/10000 train_time:348349ms step_avg:73.17ms +[2025-09-02 17:01:19] [Rank 0] step:4781/10000 train_time:349873ms step_avg:73.18ms +[2025-09-02 17:01:19] [Rank 0] step:4781/10000 train_time:349873ms step_avg:73.18ms +[2025-09-02 17:01:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:01:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:01:32] [Rank 0] PRINT: step:4800/10000 val_loss:4.0942 svd_entropy: attn_qk:H=0.7369,top10E=0.29,eRank=160.4,q75/q25=81.96 attn_vo:H=0.7695,top10E=0.17,eRank=238.7,q75/q25=inf mlp_w1:H=0.7505,top10E=0.31,eRank=168.4,q75/q25=10.92 mlp_w2:H=0.8460,top10E=0.14,eRank=280.1,q75/q25=16.57 vo_prod:H=0.6399,top10E=0.25,eRank=101.6,q75/q25=inf train_time:351553ms step_avg:73.24ms +[2025-09-02 17:01:32] [Rank 0] PRINT: step:4800/10000 val_loss:4.0942 svd_entropy: attn_qk:H=0.7369,top10E=0.29,eRank=160.4,q75/q25=81.96 attn_vo:H=0.7695,top10E=0.17,eRank=238.7,q75/q25=inf mlp_w1:H=0.7505,top10E=0.31,eRank=168.4,q75/q25=10.92 mlp_w2:H=0.8460,top10E=0.14,eRank=280.1,q75/q25=16.57 vo_prod:H=0.6399,top10E=0.25,eRank=101.6,q75/q25=inf train_time:351553ms step_avg:73.24ms +[2025-09-02 17:01:32] [Rank 0] step:4801/10000 train_time:351564ms step_avg:73.23ms +[2025-09-02 17:01:32] [Rank 0] step:4801/10000 train_time:351564ms step_avg:73.23ms +[2025-09-02 17:01:34] [Rank 0] step:4821/10000 train_time:352968ms step_avg:73.21ms +[2025-09-02 17:01:34] [Rank 0] step:4821/10000 train_time:352968ms step_avg:73.21ms +[2025-09-02 17:01:35] [Rank 0] step:4841/10000 train_time:354491ms step_avg:73.23ms +[2025-09-02 17:01:35] [Rank 0] step:4841/10000 train_time:354491ms step_avg:73.23ms +[2025-09-02 17:01:37] [Rank 0] step:4861/10000 train_time:356018ms step_avg:73.24ms +[2025-09-02 17:01:37] [Rank 0] step:4861/10000 train_time:356018ms step_avg:73.24ms +[2025-09-02 17:01:38] [Rank 0] step:4881/10000 train_time:357541ms step_avg:73.25ms +[2025-09-02 17:01:38] [Rank 0] step:4881/10000 train_time:357541ms step_avg:73.25ms +[2025-09-02 17:01:40] [Rank 0] step:4901/10000 train_time:359065ms step_avg:73.26ms +[2025-09-02 17:01:40] [Rank 0] step:4901/10000 train_time:359065ms step_avg:73.26ms +[2025-09-02 17:01:41] [Rank 0] step:4921/10000 train_time:360592ms step_avg:73.28ms +[2025-09-02 17:01:41] [Rank 0] step:4921/10000 train_time:360592ms step_avg:73.28ms +[2025-09-02 17:01:43] [Rank 0] step:4941/10000 train_time:362120ms step_avg:73.29ms +[2025-09-02 17:01:43] [Rank 0] step:4941/10000 train_time:362120ms step_avg:73.29ms +[2025-09-02 17:01:44] [Rank 0] step:4961/10000 train_time:363641ms step_avg:73.30ms +[2025-09-02 17:01:44] [Rank 0] step:4961/10000 train_time:363641ms step_avg:73.30ms +[2025-09-02 17:01:46] [Rank 0] step:4981/10000 train_time:365169ms step_avg:73.31ms +[2025-09-02 17:01:46] [Rank 0] step:4981/10000 train_time:365169ms step_avg:73.31ms +[2025-09-02 17:01:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:01:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:01:59] [Rank 0] PRINT: step:5000/10000 val_loss:4.0772 svd_entropy: attn_qk:H=0.7395,top10E=0.28,eRank=162.3,q75/q25=83.11 attn_vo:H=0.7719,top10E=0.16,eRank=241.5,q75/q25=inf mlp_w1:H=0.7548,top10E=0.31,eRank=172.6,q75/q25=11.35 mlp_w2:H=0.8475,top10E=0.14,eRank=283.0,q75/q25=16.86 vo_prod:H=0.6428,top10E=0.25,eRank=103.6,q75/q25=inf train_time:366851ms step_avg:73.37ms +[2025-09-02 17:01:59] [Rank 0] PRINT: step:5000/10000 val_loss:4.0772 svd_entropy: attn_qk:H=0.7395,top10E=0.28,eRank=162.3,q75/q25=83.11 attn_vo:H=0.7719,top10E=0.16,eRank=241.5,q75/q25=inf mlp_w1:H=0.7548,top10E=0.31,eRank=172.6,q75/q25=11.35 mlp_w2:H=0.8475,top10E=0.14,eRank=283.0,q75/q25=16.86 vo_prod:H=0.6428,top10E=0.25,eRank=103.6,q75/q25=inf train_time:366851ms step_avg:73.37ms +[2025-09-02 17:01:59] [Rank 0] step:5001/10000 train_time:366862ms step_avg:73.36ms +[2025-09-02 17:01:59] [Rank 0] step:5001/10000 train_time:366862ms step_avg:73.36ms +[2025-09-02 17:02:01] [Rank 0] step:5021/10000 train_time:368236ms step_avg:73.34ms +[2025-09-02 17:02:01] [Rank 0] step:5021/10000 train_time:368236ms step_avg:73.34ms +[2025-09-02 17:02:02] [Rank 0] step:5041/10000 train_time:369760ms step_avg:73.35ms +[2025-09-02 17:02:02] [Rank 0] step:5041/10000 train_time:369760ms step_avg:73.35ms +[2025-09-02 17:02:04] [Rank 0] step:5061/10000 train_time:371282ms step_avg:73.36ms +[2025-09-02 17:02:04] [Rank 0] step:5061/10000 train_time:371282ms step_avg:73.36ms +[2025-09-02 17:02:05] [Rank 0] step:5081/10000 train_time:372807ms step_avg:73.37ms +[2025-09-02 17:02:05] [Rank 0] step:5081/10000 train_time:372807ms step_avg:73.37ms +[2025-09-02 17:02:07] [Rank 0] step:5101/10000 train_time:374334ms step_avg:73.38ms +[2025-09-02 17:02:07] [Rank 0] step:5101/10000 train_time:374334ms step_avg:73.38ms +[2025-09-02 17:02:08] [Rank 0] step:5121/10000 train_time:375861ms step_avg:73.40ms +[2025-09-02 17:02:08] [Rank 0] step:5121/10000 train_time:375861ms step_avg:73.40ms +[2025-09-02 17:02:10] [Rank 0] step:5141/10000 train_time:377391ms step_avg:73.41ms +[2025-09-02 17:02:10] [Rank 0] step:5141/10000 train_time:377391ms step_avg:73.41ms +[2025-09-02 17:02:11] [Rank 0] step:5161/10000 train_time:378928ms step_avg:73.42ms +[2025-09-02 17:02:11] [Rank 0] step:5161/10000 train_time:378928ms step_avg:73.42ms +[2025-09-02 17:02:13] [Rank 0] step:5181/10000 train_time:380457ms step_avg:73.43ms +[2025-09-02 17:02:13] [Rank 0] step:5181/10000 train_time:380457ms step_avg:73.43ms +[2025-09-02 17:02:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:02:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:02:26] [Rank 0] PRINT: step:5200/10000 val_loss:4.0538 svd_entropy: attn_qk:H=0.7420,top10E=0.28,eRank=164.3,q75/q25=83.96 attn_vo:H=0.7740,top10E=0.16,eRank=244.0,q75/q25=inf mlp_w1:H=0.7587,top10E=0.30,eRank=176.5,q75/q25=11.78 mlp_w2:H=0.8489,top10E=0.14,eRank=285.6,q75/q25=17.16 vo_prod:H=0.6453,top10E=0.25,eRank=105.5,q75/q25=inf train_time:382162ms step_avg:73.49ms +[2025-09-02 17:02:26] [Rank 0] PRINT: step:5200/10000 val_loss:4.0538 svd_entropy: attn_qk:H=0.7420,top10E=0.28,eRank=164.3,q75/q25=83.96 attn_vo:H=0.7740,top10E=0.16,eRank=244.0,q75/q25=inf mlp_w1:H=0.7587,top10E=0.30,eRank=176.5,q75/q25=11.78 mlp_w2:H=0.8489,top10E=0.14,eRank=285.6,q75/q25=17.16 vo_prod:H=0.6453,top10E=0.25,eRank=105.5,q75/q25=inf train_time:382162ms step_avg:73.49ms +[2025-09-02 17:02:26] [Rank 0] step:5201/10000 train_time:382173ms step_avg:73.48ms +[2025-09-02 17:02:26] [Rank 0] step:5201/10000 train_time:382173ms step_avg:73.48ms +[2025-09-02 17:02:28] [Rank 0] step:5221/10000 train_time:383586ms step_avg:73.47ms +[2025-09-02 17:02:28] [Rank 0] step:5221/10000 train_time:383586ms step_avg:73.47ms +[2025-09-02 17:02:29] [Rank 0] step:5241/10000 train_time:385140ms step_avg:73.49ms +[2025-09-02 17:02:29] [Rank 0] step:5241/10000 train_time:385140ms step_avg:73.49ms +[2025-09-02 17:02:31] [Rank 0] step:5261/10000 train_time:386769ms step_avg:73.52ms +[2025-09-02 17:02:31] [Rank 0] step:5261/10000 train_time:386769ms step_avg:73.52ms +[2025-09-02 17:02:32] [Rank 0] step:5281/10000 train_time:388328ms step_avg:73.53ms +[2025-09-02 17:02:32] [Rank 0] step:5281/10000 train_time:388328ms step_avg:73.53ms +[2025-09-02 17:02:34] [Rank 0] step:5301/10000 train_time:389897ms step_avg:73.55ms +[2025-09-02 17:02:34] [Rank 0] step:5301/10000 train_time:389897ms step_avg:73.55ms +[2025-09-02 17:02:35] [Rank 0] step:5321/10000 train_time:391455ms step_avg:73.57ms +[2025-09-02 17:02:35] [Rank 0] step:5321/10000 train_time:391455ms step_avg:73.57ms +[2025-09-02 17:02:37] [Rank 0] step:5341/10000 train_time:393014ms step_avg:73.58ms +[2025-09-02 17:02:37] [Rank 0] step:5341/10000 train_time:393014ms step_avg:73.58ms +[2025-09-02 17:02:39] [Rank 0] step:5361/10000 train_time:394577ms step_avg:73.60ms +[2025-09-02 17:02:39] [Rank 0] step:5361/10000 train_time:394577ms step_avg:73.60ms +[2025-09-02 17:02:40] [Rank 0] step:5381/10000 train_time:396138ms step_avg:73.62ms +[2025-09-02 17:02:40] [Rank 0] step:5381/10000 train_time:396138ms step_avg:73.62ms +[2025-09-02 17:02:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:02:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:02:53] [Rank 0] PRINT: step:5400/10000 val_loss:4.0353 svd_entropy: attn_qk:H=0.7442,top10E=0.28,eRank=166.1,q75/q25=84.94 attn_vo:H=0.7761,top10E=0.16,eRank=246.5,q75/q25=inf mlp_w1:H=0.7624,top10E=0.29,eRank=180.3,q75/q25=12.16 mlp_w2:H=0.8502,top10E=0.14,eRank=288.2,q75/q25=17.46 vo_prod:H=0.6480,top10E=0.24,eRank=107.5,q75/q25=inf train_time:397851ms step_avg:73.68ms +[2025-09-02 17:02:53] [Rank 0] PRINT: step:5400/10000 val_loss:4.0353 svd_entropy: attn_qk:H=0.7442,top10E=0.28,eRank=166.1,q75/q25=84.94 attn_vo:H=0.7761,top10E=0.16,eRank=246.5,q75/q25=inf mlp_w1:H=0.7624,top10E=0.29,eRank=180.3,q75/q25=12.16 mlp_w2:H=0.8502,top10E=0.14,eRank=288.2,q75/q25=17.46 vo_prod:H=0.6480,top10E=0.24,eRank=107.5,q75/q25=inf train_time:397851ms step_avg:73.68ms +[2025-09-02 17:02:53] [Rank 0] step:5401/10000 train_time:397862ms step_avg:73.66ms +[2025-09-02 17:02:53] [Rank 0] step:5401/10000 train_time:397862ms step_avg:73.66ms +[2025-09-02 17:02:55] [Rank 0] step:5421/10000 train_time:399269ms step_avg:73.65ms +[2025-09-02 17:02:55] [Rank 0] step:5421/10000 train_time:399269ms step_avg:73.65ms +[2025-09-02 17:02:57] [Rank 0] step:5441/10000 train_time:400821ms step_avg:73.67ms +[2025-09-02 17:02:57] [Rank 0] step:5441/10000 train_time:400821ms step_avg:73.67ms +[2025-09-02 17:02:58] [Rank 0] step:5461/10000 train_time:402379ms step_avg:73.68ms +[2025-09-02 17:02:58] [Rank 0] step:5461/10000 train_time:402379ms step_avg:73.68ms +[2025-09-02 17:03:00] [Rank 0] step:5481/10000 train_time:403939ms step_avg:73.70ms +[2025-09-02 17:03:00] [Rank 0] step:5481/10000 train_time:403939ms step_avg:73.70ms +[2025-09-02 17:03:01] [Rank 0] step:5501/10000 train_time:405501ms step_avg:73.71ms +[2025-09-02 17:03:01] [Rank 0] step:5501/10000 train_time:405501ms step_avg:73.71ms +[2025-09-02 17:03:03] [Rank 0] step:5521/10000 train_time:407066ms step_avg:73.73ms +[2025-09-02 17:03:03] [Rank 0] step:5521/10000 train_time:407066ms step_avg:73.73ms +[2025-09-02 17:03:04] [Rank 0] step:5541/10000 train_time:408622ms step_avg:73.75ms +[2025-09-02 17:03:04] [Rank 0] step:5541/10000 train_time:408622ms step_avg:73.75ms +[2025-09-02 17:03:06] [Rank 0] step:5561/10000 train_time:410180ms step_avg:73.76ms +[2025-09-02 17:03:06] [Rank 0] step:5561/10000 train_time:410180ms step_avg:73.76ms +[2025-09-02 17:03:07] [Rank 0] step:5581/10000 train_time:411741ms step_avg:73.78ms +[2025-09-02 17:03:07] [Rank 0] step:5581/10000 train_time:411741ms step_avg:73.78ms +[2025-09-02 17:03:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:03:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:03:21] [Rank 0] PRINT: step:5600/10000 val_loss:4.0209 svd_entropy: attn_qk:H=0.7464,top10E=0.27,eRank=167.9,q75/q25=85.29 attn_vo:H=0.7780,top10E=0.16,eRank=248.8,q75/q25=inf mlp_w1:H=0.7661,top10E=0.29,eRank=184.1,q75/q25=12.51 mlp_w2:H=0.8515,top10E=0.14,eRank=290.7,q75/q25=17.69 vo_prod:H=0.6502,top10E=0.24,eRank=109.2,q75/q25=inf train_time:413458ms step_avg:73.83ms +[2025-09-02 17:03:21] [Rank 0] PRINT: step:5600/10000 val_loss:4.0209 svd_entropy: attn_qk:H=0.7464,top10E=0.27,eRank=167.9,q75/q25=85.29 attn_vo:H=0.7780,top10E=0.16,eRank=248.8,q75/q25=inf mlp_w1:H=0.7661,top10E=0.29,eRank=184.1,q75/q25=12.51 mlp_w2:H=0.8515,top10E=0.14,eRank=290.7,q75/q25=17.69 vo_prod:H=0.6502,top10E=0.24,eRank=109.2,q75/q25=inf train_time:413458ms step_avg:73.83ms +[2025-09-02 17:03:21] [Rank 0] step:5601/10000 train_time:413469ms step_avg:73.82ms +[2025-09-02 17:03:21] [Rank 0] step:5601/10000 train_time:413469ms step_avg:73.82ms +[2025-09-02 17:03:23] [Rank 0] step:5621/10000 train_time:414899ms step_avg:73.81ms +[2025-09-02 17:03:23] [Rank 0] step:5621/10000 train_time:414899ms step_avg:73.81ms +[2025-09-02 17:03:24] [Rank 0] step:5641/10000 train_time:416455ms step_avg:73.83ms +[2025-09-02 17:03:24] [Rank 0] step:5641/10000 train_time:416455ms step_avg:73.83ms +[2025-09-02 17:03:26] [Rank 0] step:5661/10000 train_time:418008ms step_avg:73.84ms +[2025-09-02 17:03:26] [Rank 0] step:5661/10000 train_time:418008ms step_avg:73.84ms +[2025-09-02 17:03:27] [Rank 0] step:5681/10000 train_time:419569ms step_avg:73.85ms +[2025-09-02 17:03:27] [Rank 0] step:5681/10000 train_time:419569ms step_avg:73.85ms +[2025-09-02 17:03:29] [Rank 0] step:5701/10000 train_time:421122ms step_avg:73.87ms +[2025-09-02 17:03:29] [Rank 0] step:5701/10000 train_time:421122ms step_avg:73.87ms +[2025-09-02 17:03:30] [Rank 0] step:5721/10000 train_time:422682ms step_avg:73.88ms +[2025-09-02 17:03:30] [Rank 0] step:5721/10000 train_time:422682ms step_avg:73.88ms +[2025-09-02 17:03:32] [Rank 0] step:5741/10000 train_time:424240ms step_avg:73.90ms +[2025-09-02 17:03:32] [Rank 0] step:5741/10000 train_time:424240ms step_avg:73.90ms +[2025-09-02 17:03:34] [Rank 0] step:5761/10000 train_time:425857ms step_avg:73.92ms +[2025-09-02 17:03:34] [Rank 0] step:5761/10000 train_time:425857ms step_avg:73.92ms +[2025-09-02 17:03:35] [Rank 0] step:5781/10000 train_time:427413ms step_avg:73.93ms +[2025-09-02 17:03:35] [Rank 0] step:5781/10000 train_time:427413ms step_avg:73.93ms +[2025-09-02 17:03:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:03:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:03:48] [Rank 0] PRINT: step:5800/10000 val_loss:4.0093 svd_entropy: attn_qk:H=0.7486,top10E=0.27,eRank=169.6,q75/q25=85.64 attn_vo:H=0.7798,top10E=0.15,eRank=251.1,q75/q25=inf mlp_w1:H=0.7694,top10E=0.28,eRank=187.6,q75/q25=12.91 mlp_w2:H=0.8527,top10E=0.14,eRank=293.0,q75/q25=17.96 vo_prod:H=0.6525,top10E=0.24,eRank=110.9,q75/q25=inf train_time:429129ms step_avg:73.99ms +[2025-09-02 17:03:48] [Rank 0] PRINT: step:5800/10000 val_loss:4.0093 svd_entropy: attn_qk:H=0.7486,top10E=0.27,eRank=169.6,q75/q25=85.64 attn_vo:H=0.7798,top10E=0.15,eRank=251.1,q75/q25=inf mlp_w1:H=0.7694,top10E=0.28,eRank=187.6,q75/q25=12.91 mlp_w2:H=0.8527,top10E=0.14,eRank=293.0,q75/q25=17.96 vo_prod:H=0.6525,top10E=0.24,eRank=110.9,q75/q25=inf train_time:429129ms step_avg:73.99ms +[2025-09-02 17:03:49] [Rank 0] step:5801/10000 train_time:429140ms step_avg:73.98ms +[2025-09-02 17:03:49] [Rank 0] step:5801/10000 train_time:429140ms step_avg:73.98ms +[2025-09-02 17:03:50] [Rank 0] step:5821/10000 train_time:430573ms step_avg:73.97ms +[2025-09-02 17:03:50] [Rank 0] step:5821/10000 train_time:430573ms step_avg:73.97ms +[2025-09-02 17:03:52] [Rank 0] step:5841/10000 train_time:432128ms step_avg:73.98ms +[2025-09-02 17:03:52] [Rank 0] step:5841/10000 train_time:432128ms step_avg:73.98ms +[2025-09-02 17:03:53] [Rank 0] step:5861/10000 train_time:433688ms step_avg:74.00ms +[2025-09-02 17:03:53] [Rank 0] step:5861/10000 train_time:433688ms step_avg:74.00ms +[2025-09-02 17:03:55] [Rank 0] step:5881/10000 train_time:435248ms step_avg:74.01ms +[2025-09-02 17:03:55] [Rank 0] step:5881/10000 train_time:435248ms step_avg:74.01ms +[2025-09-02 17:03:56] [Rank 0] step:5901/10000 train_time:436806ms step_avg:74.02ms +[2025-09-02 17:03:56] [Rank 0] step:5901/10000 train_time:436806ms step_avg:74.02ms +[2025-09-02 17:03:58] [Rank 0] step:5921/10000 train_time:438365ms step_avg:74.04ms +[2025-09-02 17:03:58] [Rank 0] step:5921/10000 train_time:438365ms step_avg:74.04ms +[2025-09-02 17:04:00] [Rank 0] step:5941/10000 train_time:439927ms step_avg:74.05ms +[2025-09-02 17:04:00] [Rank 0] step:5941/10000 train_time:439927ms step_avg:74.05ms +[2025-09-02 17:04:01] [Rank 0] step:5961/10000 train_time:441491ms step_avg:74.06ms +[2025-09-02 17:04:01] [Rank 0] step:5961/10000 train_time:441491ms step_avg:74.06ms +[2025-09-02 17:04:03] [Rank 0] step:5981/10000 train_time:443055ms step_avg:74.08ms +[2025-09-02 17:04:03] [Rank 0] step:5981/10000 train_time:443055ms step_avg:74.08ms +[2025-09-02 17:04:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:04:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:04:16] [Rank 0] PRINT: step:6000/10000 val_loss:3.9925 svd_entropy: attn_qk:H=0.7506,top10E=0.27,eRank=171.3,q75/q25=85.99 attn_vo:H=0.7816,top10E=0.15,eRank=253.3,q75/q25=inf mlp_w1:H=0.7726,top10E=0.28,eRank=191.1,q75/q25=13.35 mlp_w2:H=0.8538,top10E=0.14,eRank=295.1,q75/q25=18.22 vo_prod:H=0.6546,top10E=0.24,eRank=112.5,q75/q25=inf train_time:444770ms step_avg:74.13ms +[2025-09-02 17:04:16] [Rank 0] PRINT: step:6000/10000 val_loss:3.9925 svd_entropy: attn_qk:H=0.7506,top10E=0.27,eRank=171.3,q75/q25=85.99 attn_vo:H=0.7816,top10E=0.15,eRank=253.3,q75/q25=inf mlp_w1:H=0.7726,top10E=0.28,eRank=191.1,q75/q25=13.35 mlp_w2:H=0.8538,top10E=0.14,eRank=295.1,q75/q25=18.22 vo_prod:H=0.6546,top10E=0.24,eRank=112.5,q75/q25=inf train_time:444770ms step_avg:74.13ms +[2025-09-02 17:04:16] [Rank 0] step:6001/10000 train_time:444782ms step_avg:74.12ms +[2025-09-02 17:04:16] [Rank 0] step:6001/10000 train_time:444782ms step_avg:74.12ms +[2025-09-02 17:04:18] [Rank 0] step:6021/10000 train_time:446197ms step_avg:74.11ms +[2025-09-02 17:04:18] [Rank 0] step:6021/10000 train_time:446197ms step_avg:74.11ms +[2025-09-02 17:04:19] [Rank 0] step:6041/10000 train_time:447757ms step_avg:74.12ms +[2025-09-02 17:04:19] [Rank 0] step:6041/10000 train_time:447757ms step_avg:74.12ms +[2025-09-02 17:04:21] [Rank 0] step:6061/10000 train_time:449325ms step_avg:74.13ms +[2025-09-02 17:04:21] [Rank 0] step:6061/10000 train_time:449325ms step_avg:74.13ms +[2025-09-02 17:04:22] [Rank 0] step:6081/10000 train_time:450889ms step_avg:74.15ms +[2025-09-02 17:04:22] [Rank 0] step:6081/10000 train_time:450889ms step_avg:74.15ms +[2025-09-02 17:04:24] [Rank 0] step:6101/10000 train_time:452455ms step_avg:74.16ms +[2025-09-02 17:04:24] [Rank 0] step:6101/10000 train_time:452455ms step_avg:74.16ms +[2025-09-02 17:04:25] [Rank 0] step:6121/10000 train_time:454096ms step_avg:74.19ms +[2025-09-02 17:04:25] [Rank 0] step:6121/10000 train_time:454096ms step_avg:74.19ms +[2025-09-02 17:04:27] [Rank 0] step:6141/10000 train_time:455668ms step_avg:74.20ms +[2025-09-02 17:04:27] [Rank 0] step:6141/10000 train_time:455668ms step_avg:74.20ms +[2025-09-02 17:04:29] [Rank 0] step:6161/10000 train_time:457235ms step_avg:74.21ms +[2025-09-02 17:04:29] [Rank 0] step:6161/10000 train_time:457235ms step_avg:74.21ms +[2025-09-02 17:04:30] [Rank 0] step:6181/10000 train_time:458800ms step_avg:74.23ms +[2025-09-02 17:04:30] [Rank 0] step:6181/10000 train_time:458800ms step_avg:74.23ms +[2025-09-02 17:04:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:04:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:04:43] [Rank 0] PRINT: step:6200/10000 val_loss:3.9717 svd_entropy: attn_qk:H=0.7524,top10E=0.27,eRank=172.9,q75/q25=86.40 attn_vo:H=0.7833,top10E=0.15,eRank=255.5,q75/q25=inf mlp_w1:H=0.7755,top10E=0.28,eRank=194.3,q75/q25=13.68 mlp_w2:H=0.8549,top10E=0.13,eRank=297.5,q75/q25=18.44 vo_prod:H=0.6567,top10E=0.23,eRank=114.2,q75/q25=inf train_time:460522ms step_avg:74.28ms +[2025-09-02 17:04:43] [Rank 0] PRINT: step:6200/10000 val_loss:3.9717 svd_entropy: attn_qk:H=0.7524,top10E=0.27,eRank=172.9,q75/q25=86.40 attn_vo:H=0.7833,top10E=0.15,eRank=255.5,q75/q25=inf mlp_w1:H=0.7755,top10E=0.28,eRank=194.3,q75/q25=13.68 mlp_w2:H=0.8549,top10E=0.13,eRank=297.5,q75/q25=18.44 vo_prod:H=0.6567,top10E=0.23,eRank=114.2,q75/q25=inf train_time:460522ms step_avg:74.28ms +[2025-09-02 17:04:43] [Rank 0] step:6201/10000 train_time:460533ms step_avg:74.27ms +[2025-09-02 17:04:43] [Rank 0] step:6201/10000 train_time:460533ms step_avg:74.27ms +[2025-09-02 17:04:45] [Rank 0] step:6221/10000 train_time:461966ms step_avg:74.26ms +[2025-09-02 17:04:45] [Rank 0] step:6221/10000 train_time:461966ms step_avg:74.26ms +[2025-09-02 17:04:47] [Rank 0] step:6241/10000 train_time:463527ms step_avg:74.27ms +[2025-09-02 17:04:47] [Rank 0] step:6241/10000 train_time:463527ms step_avg:74.27ms +[2025-09-02 17:04:48] [Rank 0] step:6261/10000 train_time:465091ms step_avg:74.28ms +[2025-09-02 17:04:48] [Rank 0] step:6261/10000 train_time:465091ms step_avg:74.28ms +[2025-09-02 17:04:50] [Rank 0] step:6281/10000 train_time:466659ms step_avg:74.30ms +[2025-09-02 17:04:50] [Rank 0] step:6281/10000 train_time:466659ms step_avg:74.30ms +[2025-09-02 17:04:51] [Rank 0] step:6301/10000 train_time:468227ms step_avg:74.31ms +[2025-09-02 17:04:51] [Rank 0] step:6301/10000 train_time:468227ms step_avg:74.31ms +[2025-09-02 17:04:53] [Rank 0] step:6321/10000 train_time:469790ms step_avg:74.32ms +[2025-09-02 17:04:53] [Rank 0] step:6321/10000 train_time:469790ms step_avg:74.32ms +[2025-09-02 17:04:54] [Rank 0] step:6341/10000 train_time:471360ms step_avg:74.34ms +[2025-09-02 17:04:54] [Rank 0] step:6341/10000 train_time:471360ms step_avg:74.34ms +[2025-09-02 17:04:56] [Rank 0] step:6361/10000 train_time:472930ms step_avg:74.35ms +[2025-09-02 17:04:56] [Rank 0] step:6361/10000 train_time:472930ms step_avg:74.35ms +[2025-09-02 17:04:58] [Rank 0] step:6381/10000 train_time:474508ms step_avg:74.36ms +[2025-09-02 17:04:58] [Rank 0] step:6381/10000 train_time:474508ms step_avg:74.36ms +[2025-09-02 17:04:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:04:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:05:11] [Rank 0] PRINT: step:6400/10000 val_loss:3.9566 svd_entropy: attn_qk:H=0.7542,top10E=0.26,eRank=174.4,q75/q25=86.81 attn_vo:H=0.7848,top10E=0.15,eRank=257.4,q75/q25=inf mlp_w1:H=0.7781,top10E=0.27,eRank=197.0,q75/q25=14.05 mlp_w2:H=0.8559,top10E=0.13,eRank=299.4,q75/q25=18.54 vo_prod:H=0.6586,top10E=0.23,eRank=115.7,q75/q25=inf train_time:476235ms step_avg:74.41ms +[2025-09-02 17:05:11] [Rank 0] PRINT: step:6400/10000 val_loss:3.9566 svd_entropy: attn_qk:H=0.7542,top10E=0.26,eRank=174.4,q75/q25=86.81 attn_vo:H=0.7848,top10E=0.15,eRank=257.4,q75/q25=inf mlp_w1:H=0.7781,top10E=0.27,eRank=197.0,q75/q25=14.05 mlp_w2:H=0.8559,top10E=0.13,eRank=299.4,q75/q25=18.54 vo_prod:H=0.6586,top10E=0.23,eRank=115.7,q75/q25=inf train_time:476235ms step_avg:74.41ms +[2025-09-02 17:05:11] [Rank 0] step:6401/10000 train_time:476246ms step_avg:74.40ms +[2025-09-02 17:05:11] [Rank 0] step:6401/10000 train_time:476246ms step_avg:74.40ms +[2025-09-02 17:05:12] [Rank 0] step:6421/10000 train_time:477673ms step_avg:74.39ms +[2025-09-02 17:05:12] [Rank 0] step:6421/10000 train_time:477673ms step_avg:74.39ms +[2025-09-02 17:05:14] [Rank 0] step:6441/10000 train_time:479235ms step_avg:74.40ms +[2025-09-02 17:05:14] [Rank 0] step:6441/10000 train_time:479235ms step_avg:74.40ms +[2025-09-02 17:05:15] [Rank 0] step:6461/10000 train_time:480800ms step_avg:74.42ms +[2025-09-02 17:05:15] [Rank 0] step:6461/10000 train_time:480800ms step_avg:74.42ms +[2025-09-02 17:05:17] [Rank 0] step:6481/10000 train_time:482370ms step_avg:74.43ms +[2025-09-02 17:05:17] [Rank 0] step:6481/10000 train_time:482370ms step_avg:74.43ms +[2025-09-02 17:05:19] [Rank 0] step:6501/10000 train_time:483929ms step_avg:74.44ms +[2025-09-02 17:05:19] [Rank 0] step:6501/10000 train_time:483929ms step_avg:74.44ms +[2025-09-02 17:05:20] [Rank 0] step:6521/10000 train_time:485488ms step_avg:74.45ms +[2025-09-02 17:05:20] [Rank 0] step:6521/10000 train_time:485488ms step_avg:74.45ms +[2025-09-02 17:05:22] [Rank 0] step:6541/10000 train_time:487053ms step_avg:74.46ms +[2025-09-02 17:05:22] [Rank 0] step:6541/10000 train_time:487053ms step_avg:74.46ms +[2025-09-02 17:05:23] [Rank 0] step:6561/10000 train_time:488619ms step_avg:74.47ms +[2025-09-02 17:05:23] [Rank 0] step:6561/10000 train_time:488619ms step_avg:74.47ms +[2025-09-02 17:05:25] [Rank 0] step:6581/10000 train_time:490179ms step_avg:74.48ms +[2025-09-02 17:05:25] [Rank 0] step:6581/10000 train_time:490179ms step_avg:74.48ms +[2025-09-02 17:05:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:05:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:05:38] [Rank 0] PRINT: step:6600/10000 val_loss:3.9446 svd_entropy: attn_qk:H=0.7557,top10E=0.26,eRank=175.7,q75/q25=86.99 attn_vo:H=0.7861,top10E=0.15,eRank=259.2,q75/q25=inf mlp_w1:H=0.7804,top10E=0.27,eRank=199.6,q75/q25=14.31 mlp_w2:H=0.8569,top10E=0.13,eRank=301.5,q75/q25=18.66 vo_prod:H=0.6604,top10E=0.23,eRank=117.2,q75/q25=inf train_time:491900ms step_avg:74.53ms +[2025-09-02 17:05:38] [Rank 0] PRINT: step:6600/10000 val_loss:3.9446 svd_entropy: attn_qk:H=0.7557,top10E=0.26,eRank=175.7,q75/q25=86.99 attn_vo:H=0.7861,top10E=0.15,eRank=259.2,q75/q25=inf mlp_w1:H=0.7804,top10E=0.27,eRank=199.6,q75/q25=14.31 mlp_w2:H=0.8569,top10E=0.13,eRank=301.5,q75/q25=18.66 vo_prod:H=0.6604,top10E=0.23,eRank=117.2,q75/q25=inf train_time:491900ms step_avg:74.53ms +[2025-09-02 17:05:38] [Rank 0] step:6601/10000 train_time:491911ms step_avg:74.52ms +[2025-09-02 17:05:38] [Rank 0] step:6601/10000 train_time:491911ms step_avg:74.52ms +[2025-09-02 17:05:40] [Rank 0] step:6621/10000 train_time:493347ms step_avg:74.51ms +[2025-09-02 17:05:40] [Rank 0] step:6621/10000 train_time:493347ms step_avg:74.51ms +[2025-09-02 17:05:41] [Rank 0] step:6641/10000 train_time:494910ms step_avg:74.52ms +[2025-09-02 17:05:41] [Rank 0] step:6641/10000 train_time:494910ms step_avg:74.52ms +[2025-09-02 17:05:43] [Rank 0] step:6661/10000 train_time:496475ms step_avg:74.53ms +[2025-09-02 17:05:43] [Rank 0] step:6661/10000 train_time:496475ms step_avg:74.53ms +[2025-09-02 17:05:44] [Rank 0] step:6681/10000 train_time:498055ms step_avg:74.55ms +[2025-09-02 17:05:44] [Rank 0] step:6681/10000 train_time:498055ms step_avg:74.55ms +[2025-09-02 17:05:46] [Rank 0] step:6701/10000 train_time:499653ms step_avg:74.56ms +[2025-09-02 17:05:46] [Rank 0] step:6701/10000 train_time:499653ms step_avg:74.56ms +[2025-09-02 17:05:48] [Rank 0] step:6721/10000 train_time:501246ms step_avg:74.58ms +[2025-09-02 17:05:48] [Rank 0] step:6721/10000 train_time:501246ms step_avg:74.58ms +[2025-09-02 17:05:49] [Rank 0] step:6741/10000 train_time:502831ms step_avg:74.59ms +[2025-09-02 17:05:49] [Rank 0] step:6741/10000 train_time:502831ms step_avg:74.59ms +[2025-09-02 17:05:51] [Rank 0] step:6761/10000 train_time:504421ms step_avg:74.61ms +[2025-09-02 17:05:51] [Rank 0] step:6761/10000 train_time:504421ms step_avg:74.61ms +[2025-09-02 17:05:52] [Rank 0] step:6781/10000 train_time:506018ms step_avg:74.62ms +[2025-09-02 17:05:52] [Rank 0] step:6781/10000 train_time:506018ms step_avg:74.62ms +[2025-09-02 17:05:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:05:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:06:06] [Rank 0] PRINT: step:6800/10000 val_loss:3.9290 svd_entropy: attn_qk:H=0.7571,top10E=0.26,eRank=177.0,q75/q25=87.49 attn_vo:H=0.7874,top10E=0.15,eRank=260.9,q75/q25=inf mlp_w1:H=0.7825,top10E=0.27,eRank=202.0,q75/q25=14.59 mlp_w2:H=0.8579,top10E=0.13,eRank=303.4,q75/q25=18.86 vo_prod:H=0.6622,top10E=0.23,eRank=118.6,q75/q25=inf train_time:507775ms step_avg:74.67ms +[2025-09-02 17:06:06] [Rank 0] PRINT: step:6800/10000 val_loss:3.9290 svd_entropy: attn_qk:H=0.7571,top10E=0.26,eRank=177.0,q75/q25=87.49 attn_vo:H=0.7874,top10E=0.15,eRank=260.9,q75/q25=inf mlp_w1:H=0.7825,top10E=0.27,eRank=202.0,q75/q25=14.59 mlp_w2:H=0.8579,top10E=0.13,eRank=303.4,q75/q25=18.86 vo_prod:H=0.6622,top10E=0.23,eRank=118.6,q75/q25=inf train_time:507775ms step_avg:74.67ms +[2025-09-02 17:06:06] [Rank 0] step:6801/10000 train_time:507787ms step_avg:74.66ms +[2025-09-02 17:06:06] [Rank 0] step:6801/10000 train_time:507787ms step_avg:74.66ms +[2025-09-02 17:06:07] [Rank 0] step:6821/10000 train_time:509241ms step_avg:74.66ms +[2025-09-02 17:06:07] [Rank 0] step:6821/10000 train_time:509241ms step_avg:74.66ms +[2025-09-02 17:06:09] [Rank 0] step:6841/10000 train_time:510826ms step_avg:74.67ms +[2025-09-02 17:06:09] [Rank 0] step:6841/10000 train_time:510826ms step_avg:74.67ms +[2025-09-02 17:06:11] [Rank 0] step:6861/10000 train_time:512417ms step_avg:74.69ms +[2025-09-02 17:06:11] [Rank 0] step:6861/10000 train_time:512417ms step_avg:74.69ms +[2025-09-02 17:06:12] [Rank 0] step:6881/10000 train_time:514008ms step_avg:74.70ms +[2025-09-02 17:06:12] [Rank 0] step:6881/10000 train_time:514008ms step_avg:74.70ms +[2025-09-02 17:06:14] [Rank 0] step:6901/10000 train_time:515601ms step_avg:74.71ms +[2025-09-02 17:06:14] [Rank 0] step:6901/10000 train_time:515601ms step_avg:74.71ms +[2025-09-02 17:06:15] [Rank 0] step:6921/10000 train_time:517188ms step_avg:74.73ms +[2025-09-02 17:06:15] [Rank 0] step:6921/10000 train_time:517188ms step_avg:74.73ms +[2025-09-02 17:06:17] [Rank 0] step:6941/10000 train_time:518784ms step_avg:74.74ms +[2025-09-02 17:06:17] [Rank 0] step:6941/10000 train_time:518784ms step_avg:74.74ms +[2025-09-02 17:06:19] [Rank 0] step:6961/10000 train_time:520391ms step_avg:74.76ms +[2025-09-02 17:06:19] [Rank 0] step:6961/10000 train_time:520391ms step_avg:74.76ms +[2025-09-02 17:06:20] [Rank 0] step:6981/10000 train_time:521987ms step_avg:74.77ms +[2025-09-02 17:06:20] [Rank 0] step:6981/10000 train_time:521987ms step_avg:74.77ms +[2025-09-02 17:06:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:06:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:06:34] [Rank 0] PRINT: step:7000/10000 val_loss:3.9152 svd_entropy: attn_qk:H=0.7584,top10E=0.26,eRank=178.2,q75/q25=87.77 attn_vo:H=0.7886,top10E=0.14,eRank=262.4,q75/q25=inf mlp_w1:H=0.7843,top10E=0.26,eRank=204.2,q75/q25=14.85 mlp_w2:H=0.8587,top10E=0.13,eRank=305.1,q75/q25=18.90 vo_prod:H=0.6637,top10E=0.22,eRank=119.9,q75/q25=inf train_time:523742ms step_avg:74.82ms +[2025-09-02 17:06:34] [Rank 0] PRINT: step:7000/10000 val_loss:3.9152 svd_entropy: attn_qk:H=0.7584,top10E=0.26,eRank=178.2,q75/q25=87.77 attn_vo:H=0.7886,top10E=0.14,eRank=262.4,q75/q25=inf mlp_w1:H=0.7843,top10E=0.26,eRank=204.2,q75/q25=14.85 mlp_w2:H=0.8587,top10E=0.13,eRank=305.1,q75/q25=18.90 vo_prod:H=0.6637,top10E=0.22,eRank=119.9,q75/q25=inf train_time:523742ms step_avg:74.82ms +[2025-09-02 17:06:34] [Rank 0] step:7001/10000 train_time:523754ms step_avg:74.81ms +[2025-09-02 17:06:34] [Rank 0] step:7001/10000 train_time:523754ms step_avg:74.81ms +[2025-09-02 17:06:35] [Rank 0] step:7021/10000 train_time:525189ms step_avg:74.80ms +[2025-09-02 17:06:35] [Rank 0] step:7021/10000 train_time:525189ms step_avg:74.80ms +[2025-09-02 17:06:37] [Rank 0] step:7041/10000 train_time:526781ms step_avg:74.82ms +[2025-09-02 17:06:37] [Rank 0] step:7041/10000 train_time:526781ms step_avg:74.82ms +[2025-09-02 17:06:39] [Rank 0] step:7061/10000 train_time:528369ms step_avg:74.83ms +[2025-09-02 17:06:39] [Rank 0] step:7061/10000 train_time:528369ms step_avg:74.83ms +[2025-09-02 17:06:40] [Rank 0] step:7081/10000 train_time:529963ms step_avg:74.84ms +[2025-09-02 17:06:40] [Rank 0] step:7081/10000 train_time:529963ms step_avg:74.84ms +[2025-09-02 17:06:42] [Rank 0] step:7101/10000 train_time:531556ms step_avg:74.86ms +[2025-09-02 17:06:42] [Rank 0] step:7101/10000 train_time:531556ms step_avg:74.86ms +[2025-09-02 17:06:43] [Rank 0] step:7121/10000 train_time:533148ms step_avg:74.87ms +[2025-09-02 17:06:43] [Rank 0] step:7121/10000 train_time:533148ms step_avg:74.87ms +[2025-09-02 17:06:45] [Rank 0] step:7141/10000 train_time:534741ms step_avg:74.88ms +[2025-09-02 17:06:45] [Rank 0] step:7141/10000 train_time:534741ms step_avg:74.88ms +[2025-09-02 17:06:47] [Rank 0] step:7161/10000 train_time:536337ms step_avg:74.90ms +[2025-09-02 17:06:47] [Rank 0] step:7161/10000 train_time:536337ms step_avg:74.90ms +[2025-09-02 17:06:48] [Rank 0] step:7181/10000 train_time:537934ms step_avg:74.91ms +[2025-09-02 17:06:48] [Rank 0] step:7181/10000 train_time:537934ms step_avg:74.91ms +[2025-09-02 17:06:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:06:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:07:01] [Rank 0] PRINT: step:7200/10000 val_loss:3.9023 svd_entropy: attn_qk:H=0.7596,top10E=0.26,eRank=179.2,q75/q25=87.71 attn_vo:H=0.7897,top10E=0.14,eRank=263.9,q75/q25=inf mlp_w1:H=0.7860,top10E=0.26,eRank=206.2,q75/q25=15.08 mlp_w2:H=0.8595,top10E=0.13,eRank=306.7,q75/q25=19.04 vo_prod:H=0.6653,top10E=0.22,eRank=121.2,q75/q25=inf train_time:539693ms step_avg:74.96ms +[2025-09-02 17:07:01] [Rank 0] PRINT: step:7200/10000 val_loss:3.9023 svd_entropy: attn_qk:H=0.7596,top10E=0.26,eRank=179.2,q75/q25=87.71 attn_vo:H=0.7897,top10E=0.14,eRank=263.9,q75/q25=inf mlp_w1:H=0.7860,top10E=0.26,eRank=206.2,q75/q25=15.08 mlp_w2:H=0.8595,top10E=0.13,eRank=306.7,q75/q25=19.04 vo_prod:H=0.6653,top10E=0.22,eRank=121.2,q75/q25=inf train_time:539693ms step_avg:74.96ms +[2025-09-02 17:07:02] [Rank 0] step:7201/10000 train_time:539706ms step_avg:74.95ms +[2025-09-02 17:07:02] [Rank 0] step:7201/10000 train_time:539706ms step_avg:74.95ms +[2025-09-02 17:07:03] [Rank 0] step:7221/10000 train_time:541161ms step_avg:74.94ms +[2025-09-02 17:07:03] [Rank 0] step:7221/10000 train_time:541161ms step_avg:74.94ms +[2025-09-02 17:07:05] [Rank 0] step:7241/10000 train_time:542750ms step_avg:74.96ms +[2025-09-02 17:07:05] [Rank 0] step:7241/10000 train_time:542750ms step_avg:74.96ms +[2025-09-02 17:07:06] [Rank 0] step:7261/10000 train_time:544341ms step_avg:74.97ms +[2025-09-02 17:07:06] [Rank 0] step:7261/10000 train_time:544341ms step_avg:74.97ms +[2025-09-02 17:07:08] [Rank 0] step:7281/10000 train_time:545944ms step_avg:74.98ms +[2025-09-02 17:07:08] [Rank 0] step:7281/10000 train_time:545944ms step_avg:74.98ms +[2025-09-02 17:07:10] [Rank 0] step:7301/10000 train_time:547539ms step_avg:75.00ms +[2025-09-02 17:07:10] [Rank 0] step:7301/10000 train_time:547539ms step_avg:75.00ms +[2025-09-02 17:07:11] [Rank 0] step:7321/10000 train_time:549140ms step_avg:75.01ms +[2025-09-02 17:07:11] [Rank 0] step:7321/10000 train_time:549140ms step_avg:75.01ms +[2025-09-02 17:07:13] [Rank 0] step:7341/10000 train_time:550738ms step_avg:75.02ms +[2025-09-02 17:07:13] [Rank 0] step:7341/10000 train_time:550738ms step_avg:75.02ms +[2025-09-02 17:07:14] [Rank 0] step:7361/10000 train_time:552338ms step_avg:75.04ms +[2025-09-02 17:07:14] [Rank 0] step:7361/10000 train_time:552338ms step_avg:75.04ms +[2025-09-02 17:07:16] [Rank 0] step:7381/10000 train_time:553940ms step_avg:75.05ms +[2025-09-02 17:07:16] [Rank 0] step:7381/10000 train_time:553940ms step_avg:75.05ms +[2025-09-02 17:07:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:07:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:07:29] [Rank 0] PRINT: step:7400/10000 val_loss:3.8839 svd_entropy: attn_qk:H=0.7607,top10E=0.26,eRank=180.2,q75/q25=87.37 attn_vo:H=0.7906,top10E=0.14,eRank=265.2,q75/q25=inf mlp_w1:H=0.7875,top10E=0.26,eRank=207.9,q75/q25=15.26 mlp_w2:H=0.8602,top10E=0.13,eRank=308.2,q75/q25=19.12 vo_prod:H=0.6667,top10E=0.22,eRank=122.4,q75/q25=inf train_time:555680ms step_avg:75.09ms +[2025-09-02 17:07:29] [Rank 0] PRINT: step:7400/10000 val_loss:3.8839 svd_entropy: attn_qk:H=0.7607,top10E=0.26,eRank=180.2,q75/q25=87.37 attn_vo:H=0.7906,top10E=0.14,eRank=265.2,q75/q25=inf mlp_w1:H=0.7875,top10E=0.26,eRank=207.9,q75/q25=15.26 mlp_w2:H=0.8602,top10E=0.13,eRank=308.2,q75/q25=19.12 vo_prod:H=0.6667,top10E=0.22,eRank=122.4,q75/q25=inf train_time:555680ms step_avg:75.09ms +[2025-09-02 17:07:29] [Rank 0] step:7401/10000 train_time:555693ms step_avg:75.08ms +[2025-09-02 17:07:29] [Rank 0] step:7401/10000 train_time:555693ms step_avg:75.08ms +[2025-09-02 17:07:31] [Rank 0] step:7421/10000 train_time:557153ms step_avg:75.08ms +[2025-09-02 17:07:31] [Rank 0] step:7421/10000 train_time:557153ms step_avg:75.08ms +[2025-09-02 17:07:33] [Rank 0] step:7441/10000 train_time:558742ms step_avg:75.09ms +[2025-09-02 17:07:33] [Rank 0] step:7441/10000 train_time:558742ms step_avg:75.09ms +[2025-09-02 17:07:34] [Rank 0] step:7461/10000 train_time:560336ms step_avg:75.10ms +[2025-09-02 17:07:34] [Rank 0] step:7461/10000 train_time:560336ms step_avg:75.10ms +[2025-09-02 17:07:36] [Rank 0] step:7481/10000 train_time:561936ms step_avg:75.12ms +[2025-09-02 17:07:36] [Rank 0] step:7481/10000 train_time:561936ms step_avg:75.12ms +[2025-09-02 17:07:37] [Rank 0] step:7501/10000 train_time:563535ms step_avg:75.13ms +[2025-09-02 17:07:37] [Rank 0] step:7501/10000 train_time:563535ms step_avg:75.13ms +[2025-09-02 17:07:39] [Rank 0] step:7521/10000 train_time:565130ms step_avg:75.14ms +[2025-09-02 17:07:39] [Rank 0] step:7521/10000 train_time:565130ms step_avg:75.14ms +[2025-09-02 17:07:41] [Rank 0] step:7541/10000 train_time:566741ms step_avg:75.15ms +[2025-09-02 17:07:41] [Rank 0] step:7541/10000 train_time:566741ms step_avg:75.15ms +[2025-09-02 17:07:42] [Rank 0] step:7561/10000 train_time:568326ms step_avg:75.17ms +[2025-09-02 17:07:42] [Rank 0] step:7561/10000 train_time:568326ms step_avg:75.17ms +[2025-09-02 17:07:44] [Rank 0] step:7581/10000 train_time:569931ms step_avg:75.18ms +[2025-09-02 17:07:44] [Rank 0] step:7581/10000 train_time:569931ms step_avg:75.18ms +[2025-09-02 17:07:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:07:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:07:57] [Rank 0] PRINT: step:7600/10000 val_loss:3.8823 svd_entropy: attn_qk:H=0.7617,top10E=0.25,eRank=181.2,q75/q25=87.15 attn_vo:H=0.7915,top10E=0.14,eRank=266.4,q75/q25=inf mlp_w1:H=0.7889,top10E=0.26,eRank=209.5,q75/q25=15.46 mlp_w2:H=0.8609,top10E=0.13,eRank=309.7,q75/q25=19.17 vo_prod:H=0.6681,top10E=0.22,eRank=123.6,q75/q25=inf train_time:571696ms step_avg:75.22ms +[2025-09-02 17:07:57] [Rank 0] PRINT: step:7600/10000 val_loss:3.8823 svd_entropy: attn_qk:H=0.7617,top10E=0.25,eRank=181.2,q75/q25=87.15 attn_vo:H=0.7915,top10E=0.14,eRank=266.4,q75/q25=inf mlp_w1:H=0.7889,top10E=0.26,eRank=209.5,q75/q25=15.46 mlp_w2:H=0.8609,top10E=0.13,eRank=309.7,q75/q25=19.17 vo_prod:H=0.6681,top10E=0.22,eRank=123.6,q75/q25=inf train_time:571696ms step_avg:75.22ms +[2025-09-02 17:07:57] [Rank 0] step:7601/10000 train_time:571709ms step_avg:75.21ms +[2025-09-02 17:07:57] [Rank 0] step:7601/10000 train_time:571709ms step_avg:75.21ms +[2025-09-02 17:07:58] [Rank 0] step:7621/10000 train_time:573147ms step_avg:75.21ms +[2025-09-02 17:07:58] [Rank 0] step:7621/10000 train_time:573147ms step_avg:75.21ms +[2025-09-02 17:08:00] [Rank 0] step:7641/10000 train_time:574740ms step_avg:75.22ms +[2025-09-02 17:08:00] [Rank 0] step:7641/10000 train_time:574740ms step_avg:75.22ms +[2025-09-02 17:08:02] [Rank 0] step:7661/10000 train_time:576335ms step_avg:75.23ms +[2025-09-02 17:08:02] [Rank 0] step:7661/10000 train_time:576335ms step_avg:75.23ms +[2025-09-02 17:08:03] [Rank 0] step:7681/10000 train_time:577923ms step_avg:75.24ms +[2025-09-02 17:08:03] [Rank 0] step:7681/10000 train_time:577923ms step_avg:75.24ms +[2025-09-02 17:08:05] [Rank 0] step:7701/10000 train_time:579514ms step_avg:75.25ms +[2025-09-02 17:08:05] [Rank 0] step:7701/10000 train_time:579514ms step_avg:75.25ms +[2025-09-02 17:08:06] [Rank 0] step:7721/10000 train_time:581123ms step_avg:75.27ms +[2025-09-02 17:08:06] [Rank 0] step:7721/10000 train_time:581123ms step_avg:75.27ms +[2025-09-02 17:08:08] [Rank 0] step:7741/10000 train_time:582719ms step_avg:75.28ms +[2025-09-02 17:08:08] [Rank 0] step:7741/10000 train_time:582719ms step_avg:75.28ms +[2025-09-02 17:08:10] [Rank 0] step:7761/10000 train_time:584319ms step_avg:75.29ms +[2025-09-02 17:08:10] [Rank 0] step:7761/10000 train_time:584319ms step_avg:75.29ms +[2025-09-02 17:08:11] [Rank 0] step:7781/10000 train_time:585924ms step_avg:75.30ms +[2025-09-02 17:08:11] [Rank 0] step:7781/10000 train_time:585924ms step_avg:75.30ms +[2025-09-02 17:08:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:08:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:08:25] [Rank 0] PRINT: step:7800/10000 val_loss:3.8672 svd_entropy: attn_qk:H=0.7626,top10E=0.25,eRank=182.0,q75/q25=87.36 attn_vo:H=0.7923,top10E=0.14,eRank=267.4,q75/q25=inf mlp_w1:H=0.7903,top10E=0.26,eRank=211.1,q75/q25=15.60 mlp_w2:H=0.8616,top10E=0.13,eRank=311.1,q75/q25=19.23 vo_prod:H=0.6692,top10E=0.22,eRank=124.5,q75/q25=inf train_time:587695ms step_avg:75.35ms +[2025-09-02 17:08:25] [Rank 0] PRINT: step:7800/10000 val_loss:3.8672 svd_entropy: attn_qk:H=0.7626,top10E=0.25,eRank=182.0,q75/q25=87.36 attn_vo:H=0.7923,top10E=0.14,eRank=267.4,q75/q25=inf mlp_w1:H=0.7903,top10E=0.26,eRank=211.1,q75/q25=15.60 mlp_w2:H=0.8616,top10E=0.13,eRank=311.1,q75/q25=19.23 vo_prod:H=0.6692,top10E=0.22,eRank=124.5,q75/q25=inf train_time:587695ms step_avg:75.35ms +[2025-09-02 17:08:25] [Rank 0] step:7801/10000 train_time:587708ms step_avg:75.34ms +[2025-09-02 17:08:25] [Rank 0] step:7801/10000 train_time:587708ms step_avg:75.34ms +[2025-09-02 17:08:26] [Rank 0] step:7821/10000 train_time:589143ms step_avg:75.33ms +[2025-09-02 17:08:26] [Rank 0] step:7821/10000 train_time:589143ms step_avg:75.33ms +[2025-09-02 17:08:28] [Rank 0] step:7841/10000 train_time:590736ms step_avg:75.34ms +[2025-09-02 17:08:28] [Rank 0] step:7841/10000 train_time:590736ms step_avg:75.34ms +[2025-09-02 17:08:29] [Rank 0] step:7861/10000 train_time:592337ms step_avg:75.35ms +[2025-09-02 17:08:29] [Rank 0] step:7861/10000 train_time:592337ms step_avg:75.35ms +[2025-09-02 17:08:31] [Rank 0] step:7881/10000 train_time:593939ms step_avg:75.36ms +[2025-09-02 17:08:31] [Rank 0] step:7881/10000 train_time:593939ms step_avg:75.36ms +[2025-09-02 17:08:33] [Rank 0] step:7901/10000 train_time:595535ms step_avg:75.37ms +[2025-09-02 17:08:33] [Rank 0] step:7901/10000 train_time:595535ms step_avg:75.37ms +[2025-09-02 17:08:34] [Rank 0] step:7921/10000 train_time:597133ms step_avg:75.39ms +[2025-09-02 17:08:34] [Rank 0] step:7921/10000 train_time:597133ms step_avg:75.39ms +[2025-09-02 17:08:36] [Rank 0] step:7941/10000 train_time:598740ms step_avg:75.40ms +[2025-09-02 17:08:36] [Rank 0] step:7941/10000 train_time:598740ms step_avg:75.40ms +[2025-09-02 17:08:38] [Rank 0] step:7961/10000 train_time:600343ms step_avg:75.41ms +[2025-09-02 17:08:38] [Rank 0] step:7961/10000 train_time:600343ms step_avg:75.41ms +[2025-09-02 17:08:39] [Rank 0] step:7981/10000 train_time:601937ms step_avg:75.42ms +[2025-09-02 17:08:39] [Rank 0] step:7981/10000 train_time:601937ms step_avg:75.42ms +[2025-09-02 17:08:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:08:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:08:52] [Rank 0] PRINT: step:8000/10000 val_loss:3.8519 svd_entropy: attn_qk:H=0.7634,top10E=0.25,eRank=182.8,q75/q25=86.55 attn_vo:H=0.7931,top10E=0.14,eRank=268.5,q75/q25=inf mlp_w1:H=0.7914,top10E=0.25,eRank=212.5,q75/q25=15.67 mlp_w2:H=0.8622,top10E=0.13,eRank=312.4,q75/q25=19.28 vo_prod:H=0.6703,top10E=0.22,eRank=125.5,q75/q25=inf train_time:603698ms step_avg:75.46ms +[2025-09-02 17:08:52] [Rank 0] PRINT: step:8000/10000 val_loss:3.8519 svd_entropy: attn_qk:H=0.7634,top10E=0.25,eRank=182.8,q75/q25=86.55 attn_vo:H=0.7931,top10E=0.14,eRank=268.5,q75/q25=inf mlp_w1:H=0.7914,top10E=0.25,eRank=212.5,q75/q25=15.67 mlp_w2:H=0.8622,top10E=0.13,eRank=312.4,q75/q25=19.28 vo_prod:H=0.6703,top10E=0.22,eRank=125.5,q75/q25=inf train_time:603698ms step_avg:75.46ms +[2025-09-02 17:08:53] [Rank 0] step:8001/10000 train_time:603710ms step_avg:75.45ms +[2025-09-02 17:08:53] [Rank 0] step:8001/10000 train_time:603710ms step_avg:75.45ms +[2025-09-02 17:08:54] [Rank 0] step:8021/10000 train_time:605152ms step_avg:75.45ms +[2025-09-02 17:08:54] [Rank 0] step:8021/10000 train_time:605152ms step_avg:75.45ms +[2025-09-02 17:08:56] [Rank 0] step:8041/10000 train_time:606759ms step_avg:75.46ms +[2025-09-02 17:08:56] [Rank 0] step:8041/10000 train_time:606759ms step_avg:75.46ms +[2025-09-02 17:08:57] [Rank 0] step:8061/10000 train_time:608356ms step_avg:75.47ms +[2025-09-02 17:08:57] [Rank 0] step:8061/10000 train_time:608356ms step_avg:75.47ms +[2025-09-02 17:08:59] [Rank 0] step:8081/10000 train_time:609946ms step_avg:75.48ms +[2025-09-02 17:08:59] [Rank 0] step:8081/10000 train_time:609946ms step_avg:75.48ms +[2025-09-02 17:09:01] [Rank 0] step:8101/10000 train_time:611550ms step_avg:75.49ms +[2025-09-02 17:09:01] [Rank 0] step:8101/10000 train_time:611550ms step_avg:75.49ms +[2025-09-02 17:09:02] [Rank 0] step:8121/10000 train_time:613148ms step_avg:75.50ms +[2025-09-02 17:09:02] [Rank 0] step:8121/10000 train_time:613148ms step_avg:75.50ms +[2025-09-02 17:09:04] [Rank 0] step:8141/10000 train_time:614857ms step_avg:75.53ms +[2025-09-02 17:09:04] [Rank 0] step:8141/10000 train_time:614857ms step_avg:75.53ms +[2025-09-02 17:09:05] [Rank 0] step:8161/10000 train_time:616466ms step_avg:75.54ms +[2025-09-02 17:09:05] [Rank 0] step:8161/10000 train_time:616466ms step_avg:75.54ms +[2025-09-02 17:09:07] [Rank 0] step:8181/10000 train_time:618098ms step_avg:75.55ms +[2025-09-02 17:09:07] [Rank 0] step:8181/10000 train_time:618098ms step_avg:75.55ms +[2025-09-02 17:09:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:09:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:09:20] [Rank 0] PRINT: step:8200/10000 val_loss:3.8433 svd_entropy: attn_qk:H=0.7642,top10E=0.25,eRank=183.5,q75/q25=86.78 attn_vo:H=0.7938,top10E=0.14,eRank=269.4,q75/q25=inf mlp_w1:H=0.7924,top10E=0.25,eRank=213.8,q75/q25=15.74 mlp_w2:H=0.8628,top10E=0.12,eRank=313.7,q75/q25=19.28 vo_prod:H=0.6712,top10E=0.22,eRank=126.3,q75/q25=inf train_time:619910ms step_avg:75.60ms +[2025-09-02 17:09:20] [Rank 0] PRINT: step:8200/10000 val_loss:3.8433 svd_entropy: attn_qk:H=0.7642,top10E=0.25,eRank=183.5,q75/q25=86.78 attn_vo:H=0.7938,top10E=0.14,eRank=269.4,q75/q25=inf mlp_w1:H=0.7924,top10E=0.25,eRank=213.8,q75/q25=15.74 mlp_w2:H=0.8628,top10E=0.12,eRank=313.7,q75/q25=19.28 vo_prod:H=0.6712,top10E=0.22,eRank=126.3,q75/q25=inf train_time:619910ms step_avg:75.60ms +[2025-09-02 17:09:21] [Rank 0] step:8201/10000 train_time:619923ms step_avg:75.59ms +[2025-09-02 17:09:21] [Rank 0] step:8201/10000 train_time:619923ms step_avg:75.59ms +[2025-09-02 17:09:22] [Rank 0] step:8221/10000 train_time:621396ms step_avg:75.59ms +[2025-09-02 17:09:22] [Rank 0] step:8221/10000 train_time:621396ms step_avg:75.59ms +[2025-09-02 17:09:24] [Rank 0] step:8241/10000 train_time:623034ms step_avg:75.60ms +[2025-09-02 17:09:24] [Rank 0] step:8241/10000 train_time:623034ms step_avg:75.60ms +[2025-09-02 17:09:26] [Rank 0] step:8261/10000 train_time:624657ms step_avg:75.62ms +[2025-09-02 17:09:26] [Rank 0] step:8261/10000 train_time:624657ms step_avg:75.62ms +[2025-09-02 17:09:27] [Rank 0] step:8281/10000 train_time:626285ms step_avg:75.63ms +[2025-09-02 17:09:27] [Rank 0] step:8281/10000 train_time:626285ms step_avg:75.63ms +[2025-09-02 17:09:29] [Rank 0] step:8301/10000 train_time:627911ms step_avg:75.64ms +[2025-09-02 17:09:29] [Rank 0] step:8301/10000 train_time:627911ms step_avg:75.64ms +[2025-09-02 17:09:30] [Rank 0] step:8321/10000 train_time:629531ms step_avg:75.66ms +[2025-09-02 17:09:30] [Rank 0] step:8321/10000 train_time:629531ms step_avg:75.66ms +[2025-09-02 17:09:32] [Rank 0] step:8341/10000 train_time:631161ms step_avg:75.67ms +[2025-09-02 17:09:32] [Rank 0] step:8341/10000 train_time:631161ms step_avg:75.67ms +[2025-09-02 17:09:34] [Rank 0] step:8361/10000 train_time:632782ms step_avg:75.68ms +[2025-09-02 17:09:34] [Rank 0] step:8361/10000 train_time:632782ms step_avg:75.68ms +[2025-09-02 17:09:35] [Rank 0] step:8381/10000 train_time:634412ms step_avg:75.70ms +[2025-09-02 17:09:35] [Rank 0] step:8381/10000 train_time:634412ms step_avg:75.70ms +[2025-09-02 17:09:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:09:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:09:49] [Rank 0] PRINT: step:8400/10000 val_loss:3.8333 svd_entropy: attn_qk:H=0.7648,top10E=0.25,eRank=184.1,q75/q25=86.77 attn_vo:H=0.7944,top10E=0.14,eRank=270.2,q75/q25=inf mlp_w1:H=0.7933,top10E=0.25,eRank=214.8,q75/q25=15.80 mlp_w2:H=0.8633,top10E=0.12,eRank=314.8,q75/q25=19.29 vo_prod:H=0.6722,top10E=0.22,eRank=127.2,q75/q25=inf train_time:636193ms step_avg:75.74ms +[2025-09-02 17:09:49] [Rank 0] PRINT: step:8400/10000 val_loss:3.8333 svd_entropy: attn_qk:H=0.7648,top10E=0.25,eRank=184.1,q75/q25=86.77 attn_vo:H=0.7944,top10E=0.14,eRank=270.2,q75/q25=inf mlp_w1:H=0.7933,top10E=0.25,eRank=214.8,q75/q25=15.80 mlp_w2:H=0.8633,top10E=0.12,eRank=314.8,q75/q25=19.29 vo_prod:H=0.6722,top10E=0.22,eRank=127.2,q75/q25=inf train_time:636193ms step_avg:75.74ms +[2025-09-02 17:09:49] [Rank 0] step:8401/10000 train_time:636206ms step_avg:75.73ms +[2025-09-02 17:09:49] [Rank 0] step:8401/10000 train_time:636206ms step_avg:75.73ms +[2025-09-02 17:09:50] [Rank 0] step:8421/10000 train_time:637687ms step_avg:75.73ms +[2025-09-02 17:09:50] [Rank 0] step:8421/10000 train_time:637687ms step_avg:75.73ms +[2025-09-02 17:09:52] [Rank 0] step:8441/10000 train_time:639316ms step_avg:75.74ms +[2025-09-02 17:09:52] [Rank 0] step:8441/10000 train_time:639316ms step_avg:75.74ms +[2025-09-02 17:09:54] [Rank 0] step:8461/10000 train_time:640979ms step_avg:75.76ms +[2025-09-02 17:09:54] [Rank 0] step:8461/10000 train_time:640979ms step_avg:75.76ms +[2025-09-02 17:09:55] [Rank 0] step:8481/10000 train_time:642637ms step_avg:75.77ms +[2025-09-02 17:09:55] [Rank 0] step:8481/10000 train_time:642637ms step_avg:75.77ms +[2025-09-02 17:09:57] [Rank 0] step:8501/10000 train_time:644284ms step_avg:75.79ms +[2025-09-02 17:09:57] [Rank 0] step:8501/10000 train_time:644284ms step_avg:75.79ms +[2025-09-02 17:09:59] [Rank 0] step:8521/10000 train_time:645915ms step_avg:75.80ms +[2025-09-02 17:09:59] [Rank 0] step:8521/10000 train_time:645915ms step_avg:75.80ms +[2025-09-02 17:10:00] [Rank 0] step:8541/10000 train_time:647556ms step_avg:75.82ms +[2025-09-02 17:10:00] [Rank 0] step:8541/10000 train_time:647556ms step_avg:75.82ms +[2025-09-02 17:10:02] [Rank 0] step:8561/10000 train_time:649183ms step_avg:75.83ms +[2025-09-02 17:10:02] [Rank 0] step:8561/10000 train_time:649183ms step_avg:75.83ms +[2025-09-02 17:10:03] [Rank 0] step:8581/10000 train_time:650813ms step_avg:75.84ms +[2025-09-02 17:10:03] [Rank 0] step:8581/10000 train_time:650813ms step_avg:75.84ms +[2025-09-02 17:10:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:10:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:10:17] [Rank 0] PRINT: step:8600/10000 val_loss:3.8240 svd_entropy: attn_qk:H=0.7654,top10E=0.25,eRank=184.6,q75/q25=86.59 attn_vo:H=0.7949,top10E=0.14,eRank=270.9,q75/q25=inf mlp_w1:H=0.7941,top10E=0.25,eRank=215.8,q75/q25=15.86 mlp_w2:H=0.8638,top10E=0.12,eRank=315.8,q75/q25=19.33 vo_prod:H=0.6729,top10E=0.21,eRank=127.8,q75/q25=inf train_time:652595ms step_avg:75.88ms +[2025-09-02 17:10:17] [Rank 0] PRINT: step:8600/10000 val_loss:3.8240 svd_entropy: attn_qk:H=0.7654,top10E=0.25,eRank=184.6,q75/q25=86.59 attn_vo:H=0.7949,top10E=0.14,eRank=270.9,q75/q25=inf mlp_w1:H=0.7941,top10E=0.25,eRank=215.8,q75/q25=15.86 mlp_w2:H=0.8638,top10E=0.12,eRank=315.8,q75/q25=19.33 vo_prod:H=0.6729,top10E=0.21,eRank=127.8,q75/q25=inf train_time:652595ms step_avg:75.88ms +[2025-09-02 17:10:17] [Rank 0] step:8601/10000 train_time:652608ms step_avg:75.88ms +[2025-09-02 17:10:17] [Rank 0] step:8601/10000 train_time:652608ms step_avg:75.88ms +[2025-09-02 17:10:19] [Rank 0] step:8621/10000 train_time:654075ms step_avg:75.87ms +[2025-09-02 17:10:19] [Rank 0] step:8621/10000 train_time:654075ms step_avg:75.87ms +[2025-09-02 17:10:20] [Rank 0] step:8641/10000 train_time:655697ms step_avg:75.88ms +[2025-09-02 17:10:20] [Rank 0] step:8641/10000 train_time:655697ms step_avg:75.88ms +[2025-09-02 17:10:22] [Rank 0] step:8661/10000 train_time:657324ms step_avg:75.89ms +[2025-09-02 17:10:22] [Rank 0] step:8661/10000 train_time:657324ms step_avg:75.89ms +[2025-09-02 17:10:23] [Rank 0] step:8681/10000 train_time:658949ms step_avg:75.91ms +[2025-09-02 17:10:23] [Rank 0] step:8681/10000 train_time:658949ms step_avg:75.91ms +[2025-09-02 17:10:25] [Rank 0] step:8701/10000 train_time:660566ms step_avg:75.92ms +[2025-09-02 17:10:25] [Rank 0] step:8701/10000 train_time:660566ms step_avg:75.92ms +[2025-09-02 17:10:27] [Rank 0] step:8721/10000 train_time:662192ms step_avg:75.93ms +[2025-09-02 17:10:27] [Rank 0] step:8721/10000 train_time:662192ms step_avg:75.93ms +[2025-09-02 17:10:28] [Rank 0] step:8741/10000 train_time:663809ms step_avg:75.94ms +[2025-09-02 17:10:28] [Rank 0] step:8741/10000 train_time:663809ms step_avg:75.94ms +[2025-09-02 17:10:30] [Rank 0] step:8761/10000 train_time:665426ms step_avg:75.95ms +[2025-09-02 17:10:30] [Rank 0] step:8761/10000 train_time:665426ms step_avg:75.95ms +[2025-09-02 17:10:32] [Rank 0] step:8781/10000 train_time:667056ms step_avg:75.97ms +[2025-09-02 17:10:32] [Rank 0] step:8781/10000 train_time:667056ms step_avg:75.97ms +[2025-09-02 17:10:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:10:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:10:45] [Rank 0] PRINT: step:8800/10000 val_loss:3.8151 svd_entropy: attn_qk:H=0.7659,top10E=0.25,eRank=185.1,q75/q25=86.66 attn_vo:H=0.7954,top10E=0.14,eRank=271.6,q75/q25=inf mlp_w1:H=0.7948,top10E=0.25,eRank=216.6,q75/q25=15.92 mlp_w2:H=0.8643,top10E=0.12,eRank=316.8,q75/q25=19.32 vo_prod:H=0.6737,top10E=0.21,eRank=128.6,q75/q25=inf train_time:668847ms step_avg:76.01ms +[2025-09-02 17:10:45] [Rank 0] PRINT: step:8800/10000 val_loss:3.8151 svd_entropy: attn_qk:H=0.7659,top10E=0.25,eRank=185.1,q75/q25=86.66 attn_vo:H=0.7954,top10E=0.14,eRank=271.6,q75/q25=inf mlp_w1:H=0.7948,top10E=0.25,eRank=216.6,q75/q25=15.92 mlp_w2:H=0.8643,top10E=0.12,eRank=316.8,q75/q25=19.32 vo_prod:H=0.6737,top10E=0.21,eRank=128.6,q75/q25=inf train_time:668847ms step_avg:76.01ms +[2025-09-02 17:10:45] [Rank 0] step:8801/10000 train_time:668860ms step_avg:76.00ms +[2025-09-02 17:10:45] [Rank 0] step:8801/10000 train_time:668860ms step_avg:76.00ms +[2025-09-02 17:10:47] [Rank 0] step:8821/10000 train_time:670344ms step_avg:75.99ms +[2025-09-02 17:10:47] [Rank 0] step:8821/10000 train_time:670344ms step_avg:75.99ms +[2025-09-02 17:10:48] [Rank 0] step:8841/10000 train_time:671990ms step_avg:76.01ms +[2025-09-02 17:10:48] [Rank 0] step:8841/10000 train_time:671990ms step_avg:76.01ms +[2025-09-02 17:10:50] [Rank 0] step:8861/10000 train_time:673613ms step_avg:76.02ms +[2025-09-02 17:10:50] [Rank 0] step:8861/10000 train_time:673613ms step_avg:76.02ms +[2025-09-02 17:10:51] [Rank 0] step:8881/10000 train_time:675240ms step_avg:76.03ms +[2025-09-02 17:10:51] [Rank 0] step:8881/10000 train_time:675240ms step_avg:76.03ms +[2025-09-02 17:10:53] [Rank 0] step:8901/10000 train_time:676868ms step_avg:76.04ms +[2025-09-02 17:10:53] [Rank 0] step:8901/10000 train_time:676868ms step_avg:76.04ms +[2025-09-02 17:10:55] [Rank 0] step:8921/10000 train_time:678641ms step_avg:76.07ms +[2025-09-02 17:10:55] [Rank 0] step:8921/10000 train_time:678641ms step_avg:76.07ms +[2025-09-02 17:10:56] [Rank 0] step:8941/10000 train_time:680159ms step_avg:76.07ms +[2025-09-02 17:10:56] [Rank 0] step:8941/10000 train_time:680159ms step_avg:76.07ms +[2025-09-02 17:10:58] [Rank 0] step:8961/10000 train_time:681784ms step_avg:76.08ms +[2025-09-02 17:10:58] [Rank 0] step:8961/10000 train_time:681784ms step_avg:76.08ms +[2025-09-02 17:11:00] [Rank 0] step:8981/10000 train_time:683407ms step_avg:76.09ms +[2025-09-02 17:11:00] [Rank 0] step:8981/10000 train_time:683407ms step_avg:76.09ms +[2025-09-02 17:11:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:11:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:11:13] [Rank 0] PRINT: step:9000/10000 val_loss:3.8067 svd_entropy: attn_qk:H=0.7664,top10E=0.25,eRank=185.6,q75/q25=86.52 attn_vo:H=0.7958,top10E=0.14,eRank=272.2,q75/q25=inf mlp_w1:H=0.7955,top10E=0.25,eRank=217.4,q75/q25=15.93 mlp_w2:H=0.8647,top10E=0.12,eRank=317.7,q75/q25=19.27 vo_prod:H=0.6744,top10E=0.21,eRank=129.2,q75/q25=inf train_time:685192ms step_avg:76.13ms +[2025-09-02 17:11:13] [Rank 0] PRINT: step:9000/10000 val_loss:3.8067 svd_entropy: attn_qk:H=0.7664,top10E=0.25,eRank=185.6,q75/q25=86.52 attn_vo:H=0.7958,top10E=0.14,eRank=272.2,q75/q25=inf mlp_w1:H=0.7955,top10E=0.25,eRank=217.4,q75/q25=15.93 mlp_w2:H=0.8647,top10E=0.12,eRank=317.7,q75/q25=19.27 vo_prod:H=0.6744,top10E=0.21,eRank=129.2,q75/q25=inf train_time:685192ms step_avg:76.13ms +[2025-09-02 17:11:13] [Rank 0] step:9001/10000 train_time:685205ms step_avg:76.13ms +[2025-09-02 17:11:13] [Rank 0] step:9001/10000 train_time:685205ms step_avg:76.13ms +[2025-09-02 17:11:15] [Rank 0] step:9021/10000 train_time:686672ms step_avg:76.12ms +[2025-09-02 17:11:15] [Rank 0] step:9021/10000 train_time:686672ms step_avg:76.12ms +[2025-09-02 17:11:16] [Rank 0] step:9041/10000 train_time:688293ms step_avg:76.13ms +[2025-09-02 17:11:16] [Rank 0] step:9041/10000 train_time:688293ms step_avg:76.13ms +[2025-09-02 17:11:18] [Rank 0] step:9061/10000 train_time:689927ms step_avg:76.14ms +[2025-09-02 17:11:18] [Rank 0] step:9061/10000 train_time:689927ms step_avg:76.14ms +[2025-09-02 17:11:20] [Rank 0] step:9081/10000 train_time:691663ms step_avg:76.17ms +[2025-09-02 17:11:20] [Rank 0] step:9081/10000 train_time:691663ms step_avg:76.17ms +[2025-09-02 17:11:21] [Rank 0] step:9101/10000 train_time:693310ms step_avg:76.18ms +[2025-09-02 17:11:21] [Rank 0] step:9101/10000 train_time:693310ms step_avg:76.18ms +[2025-09-02 17:11:23] [Rank 0] step:9121/10000 train_time:694940ms step_avg:76.19ms +[2025-09-02 17:11:23] [Rank 0] step:9121/10000 train_time:694940ms step_avg:76.19ms +[2025-09-02 17:11:25] [Rank 0] step:9141/10000 train_time:696564ms step_avg:76.20ms +[2025-09-02 17:11:25] [Rank 0] step:9141/10000 train_time:696564ms step_avg:76.20ms +[2025-09-02 17:11:26] [Rank 0] step:9161/10000 train_time:698185ms step_avg:76.21ms +[2025-09-02 17:11:26] [Rank 0] step:9161/10000 train_time:698185ms step_avg:76.21ms +[2025-09-02 17:11:28] [Rank 0] step:9181/10000 train_time:699846ms step_avg:76.23ms +[2025-09-02 17:11:28] [Rank 0] step:9181/10000 train_time:699846ms step_avg:76.23ms +[2025-09-02 17:11:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:11:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:11:41] [Rank 0] PRINT: step:9200/10000 val_loss:3.7995 svd_entropy: attn_qk:H=0.7668,top10E=0.25,eRank=185.9,q75/q25=86.54 attn_vo:H=0.7962,top10E=0.14,eRank=272.7,q75/q25=inf mlp_w1:H=0.7960,top10E=0.25,eRank=218.1,q75/q25=15.93 mlp_w2:H=0.8651,top10E=0.12,eRank=318.5,q75/q25=19.28 vo_prod:H=0.6750,top10E=0.21,eRank=129.7,q75/q25=inf train_time:701637ms step_avg:76.26ms +[2025-09-02 17:11:41] [Rank 0] PRINT: step:9200/10000 val_loss:3.7995 svd_entropy: attn_qk:H=0.7668,top10E=0.25,eRank=185.9,q75/q25=86.54 attn_vo:H=0.7962,top10E=0.14,eRank=272.7,q75/q25=inf mlp_w1:H=0.7960,top10E=0.25,eRank=218.1,q75/q25=15.93 mlp_w2:H=0.8651,top10E=0.12,eRank=318.5,q75/q25=19.28 vo_prod:H=0.6750,top10E=0.21,eRank=129.7,q75/q25=inf train_time:701637ms step_avg:76.26ms +[2025-09-02 17:11:41] [Rank 0] step:9201/10000 train_time:701649ms step_avg:76.26ms +[2025-09-02 17:11:41] [Rank 0] step:9201/10000 train_time:701649ms step_avg:76.26ms +[2025-09-02 17:11:43] [Rank 0] step:9221/10000 train_time:703132ms step_avg:76.25ms +[2025-09-02 17:11:43] [Rank 0] step:9221/10000 train_time:703132ms step_avg:76.25ms +[2025-09-02 17:11:45] [Rank 0] step:9241/10000 train_time:704766ms step_avg:76.27ms +[2025-09-02 17:11:45] [Rank 0] step:9241/10000 train_time:704766ms step_avg:76.27ms +[2025-09-02 17:11:46] [Rank 0] step:9261/10000 train_time:706409ms step_avg:76.28ms +[2025-09-02 17:11:46] [Rank 0] step:9261/10000 train_time:706409ms step_avg:76.28ms +[2025-09-02 17:11:48] [Rank 0] step:9281/10000 train_time:708035ms step_avg:76.29ms +[2025-09-02 17:11:48] [Rank 0] step:9281/10000 train_time:708035ms step_avg:76.29ms +[2025-09-02 17:11:50] [Rank 0] step:9301/10000 train_time:709663ms step_avg:76.30ms +[2025-09-02 17:11:50] [Rank 0] step:9301/10000 train_time:709663ms step_avg:76.30ms +[2025-09-02 17:11:51] [Rank 0] step:9321/10000 train_time:711295ms step_avg:76.31ms +[2025-09-02 17:11:51] [Rank 0] step:9321/10000 train_time:711295ms step_avg:76.31ms +[2025-09-02 17:11:53] [Rank 0] step:9341/10000 train_time:712928ms step_avg:76.32ms +[2025-09-02 17:11:53] [Rank 0] step:9341/10000 train_time:712928ms step_avg:76.32ms +[2025-09-02 17:11:54] [Rank 0] step:9361/10000 train_time:714560ms step_avg:76.33ms +[2025-09-02 17:11:54] [Rank 0] step:9361/10000 train_time:714560ms step_avg:76.33ms +[2025-09-02 17:11:56] [Rank 0] step:9381/10000 train_time:716201ms step_avg:76.35ms +[2025-09-02 17:11:56] [Rank 0] step:9381/10000 train_time:716201ms step_avg:76.35ms +[2025-09-02 17:11:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:11:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:12:09] [Rank 0] PRINT: step:9400/10000 val_loss:3.7931 svd_entropy: attn_qk:H=0.7671,top10E=0.25,eRank=186.2,q75/q25=86.15 attn_vo:H=0.7965,top10E=0.14,eRank=273.2,q75/q25=inf mlp_w1:H=0.7964,top10E=0.25,eRank=218.6,q75/q25=15.93 mlp_w2:H=0.8654,top10E=0.12,eRank=319.2,q75/q25=19.27 vo_prod:H=0.6755,top10E=0.21,eRank=130.2,q75/q25=inf train_time:717996ms step_avg:76.38ms +[2025-09-02 17:12:09] [Rank 0] PRINT: step:9400/10000 val_loss:3.7931 svd_entropy: attn_qk:H=0.7671,top10E=0.25,eRank=186.2,q75/q25=86.15 attn_vo:H=0.7965,top10E=0.14,eRank=273.2,q75/q25=inf mlp_w1:H=0.7964,top10E=0.25,eRank=218.6,q75/q25=15.93 mlp_w2:H=0.8654,top10E=0.12,eRank=319.2,q75/q25=19.27 vo_prod:H=0.6755,top10E=0.21,eRank=130.2,q75/q25=inf train_time:717996ms step_avg:76.38ms +[2025-09-02 17:12:10] [Rank 0] step:9401/10000 train_time:718008ms step_avg:76.38ms +[2025-09-02 17:12:10] [Rank 0] step:9401/10000 train_time:718008ms step_avg:76.38ms +[2025-09-02 17:12:11] [Rank 0] step:9421/10000 train_time:719494ms step_avg:76.37ms +[2025-09-02 17:12:11] [Rank 0] step:9421/10000 train_time:719494ms step_avg:76.37ms +[2025-09-02 17:12:13] [Rank 0] step:9441/10000 train_time:721123ms step_avg:76.38ms +[2025-09-02 17:12:13] [Rank 0] step:9441/10000 train_time:721123ms step_avg:76.38ms +[2025-09-02 17:12:14] [Rank 0] step:9461/10000 train_time:722761ms step_avg:76.39ms +[2025-09-02 17:12:14] [Rank 0] step:9461/10000 train_time:722761ms step_avg:76.39ms +[2025-09-02 17:12:16] [Rank 0] step:9481/10000 train_time:724395ms step_avg:76.40ms +[2025-09-02 17:12:16] [Rank 0] step:9481/10000 train_time:724395ms step_avg:76.40ms +[2025-09-02 17:12:18] [Rank 0] step:9501/10000 train_time:726041ms step_avg:76.42ms +[2025-09-02 17:12:18] [Rank 0] step:9501/10000 train_time:726041ms step_avg:76.42ms +[2025-09-02 17:12:19] [Rank 0] step:9521/10000 train_time:727668ms step_avg:76.43ms +[2025-09-02 17:12:19] [Rank 0] step:9521/10000 train_time:727668ms step_avg:76.43ms +[2025-09-02 17:12:21] [Rank 0] step:9541/10000 train_time:729301ms step_avg:76.44ms +[2025-09-02 17:12:21] [Rank 0] step:9541/10000 train_time:729301ms step_avg:76.44ms +[2025-09-02 17:12:23] [Rank 0] step:9561/10000 train_time:730927ms step_avg:76.45ms +[2025-09-02 17:12:23] [Rank 0] step:9561/10000 train_time:730927ms step_avg:76.45ms +[2025-09-02 17:12:24] [Rank 0] step:9581/10000 train_time:732558ms step_avg:76.46ms +[2025-09-02 17:12:24] [Rank 0] step:9581/10000 train_time:732558ms step_avg:76.46ms +[2025-09-02 17:12:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:12:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:12:38] [Rank 0] PRINT: step:9600/10000 val_loss:3.7872 svd_entropy: attn_qk:H=0.7673,top10E=0.25,eRank=186.5,q75/q25=86.16 attn_vo:H=0.7967,top10E=0.14,eRank=273.5,q75/q25=inf mlp_w1:H=0.7968,top10E=0.25,eRank=219.1,q75/q25=15.93 mlp_w2:H=0.8657,top10E=0.12,eRank=319.8,q75/q25=19.23 vo_prod:H=0.6760,top10E=0.21,eRank=130.6,q75/q25=inf train_time:734364ms step_avg:76.50ms +[2025-09-02 17:12:38] [Rank 0] PRINT: step:9600/10000 val_loss:3.7872 svd_entropy: attn_qk:H=0.7673,top10E=0.25,eRank=186.5,q75/q25=86.16 attn_vo:H=0.7967,top10E=0.14,eRank=273.5,q75/q25=inf mlp_w1:H=0.7968,top10E=0.25,eRank=219.1,q75/q25=15.93 mlp_w2:H=0.8657,top10E=0.12,eRank=319.8,q75/q25=19.23 vo_prod:H=0.6760,top10E=0.21,eRank=130.6,q75/q25=inf train_time:734364ms step_avg:76.50ms +[2025-09-02 17:12:38] [Rank 0] step:9601/10000 train_time:734376ms step_avg:76.49ms +[2025-09-02 17:12:38] [Rank 0] step:9601/10000 train_time:734376ms step_avg:76.49ms +[2025-09-02 17:12:39] [Rank 0] step:9621/10000 train_time:735846ms step_avg:76.48ms +[2025-09-02 17:12:39] [Rank 0] step:9621/10000 train_time:735846ms step_avg:76.48ms +[2025-09-02 17:12:41] [Rank 0] step:9641/10000 train_time:737480ms step_avg:76.49ms +[2025-09-02 17:12:41] [Rank 0] step:9641/10000 train_time:737480ms step_avg:76.49ms +[2025-09-02 17:12:43] [Rank 0] step:9661/10000 train_time:739140ms step_avg:76.51ms +[2025-09-02 17:12:43] [Rank 0] step:9661/10000 train_time:739140ms step_avg:76.51ms +[2025-09-02 17:12:44] [Rank 0] step:9681/10000 train_time:740791ms step_avg:76.52ms +[2025-09-02 17:12:44] [Rank 0] step:9681/10000 train_time:740791ms step_avg:76.52ms +[2025-09-02 17:12:46] [Rank 0] step:9701/10000 train_time:742459ms step_avg:76.53ms +[2025-09-02 17:12:46] [Rank 0] step:9701/10000 train_time:742459ms step_avg:76.53ms +[2025-09-02 17:12:48] [Rank 0] step:9721/10000 train_time:744107ms step_avg:76.55ms +[2025-09-02 17:12:48] [Rank 0] step:9721/10000 train_time:744107ms step_avg:76.55ms +[2025-09-02 17:12:49] [Rank 0] step:9741/10000 train_time:745782ms step_avg:76.56ms +[2025-09-02 17:12:49] [Rank 0] step:9741/10000 train_time:745782ms step_avg:76.56ms +[2025-09-02 17:12:51] [Rank 0] step:9761/10000 train_time:747439ms step_avg:76.57ms +[2025-09-02 17:12:51] [Rank 0] step:9761/10000 train_time:747439ms step_avg:76.57ms +[2025-09-02 17:12:53] [Rank 0] step:9781/10000 train_time:749109ms step_avg:76.59ms +[2025-09-02 17:12:53] [Rank 0] step:9781/10000 train_time:749109ms step_avg:76.59ms +[2025-09-02 17:12:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:12:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:13:06] [Rank 0] PRINT: step:9800/10000 val_loss:3.7813 svd_entropy: attn_qk:H=0.7676,top10E=0.25,eRank=186.7,q75/q25=86.23 attn_vo:H=0.7969,top10E=0.14,eRank=273.8,q75/q25=inf mlp_w1:H=0.7971,top10E=0.25,eRank=219.5,q75/q25=15.94 mlp_w2:H=0.8659,top10E=0.12,eRank=320.3,q75/q25=19.18 vo_prod:H=0.6763,top10E=0.21,eRank=130.9,q75/q25=inf train_time:750944ms step_avg:76.63ms +[2025-09-02 17:13:06] [Rank 0] PRINT: step:9800/10000 val_loss:3.7813 svd_entropy: attn_qk:H=0.7676,top10E=0.25,eRank=186.7,q75/q25=86.23 attn_vo:H=0.7969,top10E=0.14,eRank=273.8,q75/q25=inf mlp_w1:H=0.7971,top10E=0.25,eRank=219.5,q75/q25=15.94 mlp_w2:H=0.8659,top10E=0.12,eRank=320.3,q75/q25=19.18 vo_prod:H=0.6763,top10E=0.21,eRank=130.9,q75/q25=inf train_time:750944ms step_avg:76.63ms +[2025-09-02 17:13:06] [Rank 0] step:9801/10000 train_time:750956ms step_avg:76.62ms +[2025-09-02 17:13:06] [Rank 0] step:9801/10000 train_time:750956ms step_avg:76.62ms +[2025-09-02 17:13:08] [Rank 0] step:9821/10000 train_time:752448ms step_avg:76.62ms +[2025-09-02 17:13:08] [Rank 0] step:9821/10000 train_time:752448ms step_avg:76.62ms +[2025-09-02 17:13:09] [Rank 0] step:9841/10000 train_time:754115ms step_avg:76.63ms +[2025-09-02 17:13:09] [Rank 0] step:9841/10000 train_time:754115ms step_avg:76.63ms +[2025-09-02 17:13:11] [Rank 0] step:9861/10000 train_time:755761ms step_avg:76.64ms +[2025-09-02 17:13:11] [Rank 0] step:9861/10000 train_time:755761ms step_avg:76.64ms +[2025-09-02 17:13:13] [Rank 0] step:9881/10000 train_time:757400ms step_avg:76.65ms +[2025-09-02 17:13:13] [Rank 0] step:9881/10000 train_time:757400ms step_avg:76.65ms +[2025-09-02 17:13:14] [Rank 0] step:9901/10000 train_time:759060ms step_avg:76.66ms +[2025-09-02 17:13:14] [Rank 0] step:9901/10000 train_time:759060ms step_avg:76.66ms +[2025-09-02 17:13:16] [Rank 0] step:9921/10000 train_time:760713ms step_avg:76.68ms +[2025-09-02 17:13:16] [Rank 0] step:9921/10000 train_time:760713ms step_avg:76.68ms +[2025-09-02 17:13:18] [Rank 0] step:9941/10000 train_time:762371ms step_avg:76.69ms +[2025-09-02 17:13:18] [Rank 0] step:9941/10000 train_time:762371ms step_avg:76.69ms +[2025-09-02 17:13:19] [Rank 0] step:9961/10000 train_time:764024ms step_avg:76.70ms +[2025-09-02 17:13:19] [Rank 0] step:9961/10000 train_time:764024ms step_avg:76.70ms +[2025-09-02 17:13:21] [Rank 0] step:9981/10000 train_time:765675ms step_avg:76.71ms +[2025-09-02 17:13:21] [Rank 0] step:9981/10000 train_time:765675ms step_avg:76.71ms +[2025-09-02 17:13:23] [Rank 0] step:10000/10000 train_time:767252ms step_avg:76.73ms +[2025-09-02 17:13:23] [Rank 0] step:10000/10000 train_time:767252ms step_avg:76.73ms +[2025-09-02 17:13:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:13:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:13:34] [Rank 0] PRINT: step:10000/10000 val_loss:3.7758 svd_entropy: attn_qk:H=0.7677,top10E=0.25,eRank=186.8,q75/q25=86.15 attn_vo:H=0.7971,top10E=0.14,eRank=274.0,q75/q25=inf mlp_w1:H=0.7973,top10E=0.25,eRank=219.7,q75/q25=15.95 mlp_w2:H=0.8661,top10E=0.12,eRank=320.6,q75/q25=19.17 vo_prod:H=0.6766,top10E=0.21,eRank=131.2,q75/q25=inf train_time:767508ms step_avg:76.75ms +[2025-09-02 17:13:34] [Rank 0] PRINT: step:10000/10000 val_loss:3.7758 svd_entropy: attn_qk:H=0.7677,top10E=0.25,eRank=186.8,q75/q25=86.15 attn_vo:H=0.7971,top10E=0.14,eRank=274.0,q75/q25=inf mlp_w1:H=0.7973,top10E=0.25,eRank=219.7,q75/q25=15.95 mlp_w2:H=0.8661,top10E=0.12,eRank=320.6,q75/q25=19.17 vo_prod:H=0.6766,top10E=0.21,eRank=131.2,q75/q25=inf train_time:767508ms step_avg:76.75ms +[2025-09-02 17:13:34] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 17:13:34 2025 --- +[2025-09-02 17:13:34] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 17:13:34 2025 --- +[2025-09-02 17:13:34] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14416 MiB +[2025-09-02 17:13:34] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14416 MiB diff --git a/logs_svd_qkvo/mode_14_param_qkvo_seed_49/config.json b/logs_svd_qkvo/mode_14_param_qkvo_seed_49/config.json new file mode 100644 index 0000000000000000000000000000000000000000..f91d435c9d577fcef1a66563898836b8c92a3007 --- /dev/null +++ b/logs_svd_qkvo/mode_14_param_qkvo_seed_49/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 49, + "optimizer_mode": 14, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "070ce90d-e5b8-4d7e-af98-f5f4bf161782", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_14_param_qkvo_seed_49/training_log_070ce90d-e5b8-4d7e-af98-f5f4bf161782.txt b/logs_svd_qkvo/mode_14_param_qkvo_seed_49/training_log_070ce90d-e5b8-4d7e-af98-f5f4bf161782.txt new file mode 100644 index 0000000000000000000000000000000000000000..821281e6a6b68652d570f7d1e0c868bd641b4c44 --- /dev/null +++ b/logs_svd_qkvo/mode_14_param_qkvo_seed_49/training_log_070ce90d-e5b8-4d7e-af98-f5f4bf161782.txt @@ -0,0 +1,2984 @@ +[2025-09-02 18:02:20] [Rank 0] PRINT: --- Script Start: Tue Sep 2 18:02:20 2025 --- +[2025-09-02 18:02:20] [Rank 0] PRINT: --- Script Start: Tue Sep 2 18:02:20 2025 --- +[2025-09-02 18:02:20] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=14, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 18:02:20] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=14, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 18:02:20] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 18:02:20] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 18:02:20] [Rank 0] PRINT: Using fixed seed: 49 +[2025-09-02 18:02:20] [Rank 0] PRINT: Using fixed seed: 49 +[2025-09-02 18:02:20] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_14_param_qkvo_seed_49 +[2025-09-02 18:02:20] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_14_param_qkvo_seed_49 +[2025-09-02 18:02:20] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 18:02:20] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 18:02:20] [Rank 0] PRINT: Constructing model... +[2025-09-02 18:02:20] [Rank 0] PRINT: Constructing model... +[2025-09-02 18:02:22] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 18:02:22] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 18:02:22] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 18:02:22] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 18:02:22] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 18:02:22] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 18:02:22] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 14 +[2025-09-02 18:02:22] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 14 +[2025-09-02 18:02:22] [Rank 0] PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 18:02:22] [Rank 0] PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 18:02:22] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 18:02:22] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 18:02:22] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 18:02:22] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 18:02:22] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 18:02:22] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 18:02:22] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 18:02:22] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 18:02:22] [Rank 0] PRINT: Starting warmup... +[2025-09-02 18:02:22] [Rank 0] PRINT: Starting warmup... +[2025-09-02 18:03:03] [Rank 0] PRINT: Warmup complete. +[2025-09-02 18:03:03] [Rank 0] PRINT: Warmup complete. +[2025-09-02 18:03:04] [Rank 0] PRINT: Starting training... +[2025-09-02 18:03:04] [Rank 0] PRINT: Starting training... +[2025-09-02 18:03:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:03:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:03:20] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.7,q75/q25=10.30 attn_vo:H=0.4624,top10E=0.02,eRank=232.9,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 18:03:20] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.7,q75/q25=10.30 attn_vo:H=0.4624,top10E=0.02,eRank=232.9,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 18:03:22] [Rank 0] step:21/10000 train_time:1302ms step_avg:61.99ms +[2025-09-02 18:03:22] [Rank 0] step:21/10000 train_time:1302ms step_avg:61.99ms +[2025-09-02 18:03:23] [Rank 0] step:41/10000 train_time:2693ms step_avg:65.67ms +[2025-09-02 18:03:23] [Rank 0] step:41/10000 train_time:2693ms step_avg:65.67ms +[2025-09-02 18:03:25] [Rank 0] step:61/10000 train_time:4087ms step_avg:67.00ms +[2025-09-02 18:03:25] [Rank 0] step:61/10000 train_time:4087ms step_avg:67.00ms +[2025-09-02 18:03:26] [Rank 0] step:81/10000 train_time:5484ms step_avg:67.70ms +[2025-09-02 18:03:26] [Rank 0] step:81/10000 train_time:5484ms step_avg:67.70ms +[2025-09-02 18:03:27] [Rank 0] step:101/10000 train_time:6882ms step_avg:68.14ms +[2025-09-02 18:03:27] [Rank 0] step:101/10000 train_time:6882ms step_avg:68.14ms +[2025-09-02 18:03:29] [Rank 0] step:121/10000 train_time:8282ms step_avg:68.45ms +[2025-09-02 18:03:29] [Rank 0] step:121/10000 train_time:8282ms step_avg:68.45ms +[2025-09-02 18:03:30] [Rank 0] step:141/10000 train_time:9680ms step_avg:68.65ms +[2025-09-02 18:03:30] [Rank 0] step:141/10000 train_time:9680ms step_avg:68.65ms +[2025-09-02 18:03:32] [Rank 0] step:161/10000 train_time:11081ms step_avg:68.82ms +[2025-09-02 18:03:32] [Rank 0] step:161/10000 train_time:11081ms step_avg:68.82ms +[2025-09-02 18:03:33] [Rank 0] step:181/10000 train_time:12480ms step_avg:68.95ms +[2025-09-02 18:03:33] [Rank 0] step:181/10000 train_time:12480ms step_avg:68.95ms +[2025-09-02 18:03:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:03:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:03:46] [Rank 0] PRINT: step:200/10000 val_loss:6.4756 svd_entropy: attn_qk:H=0.5020,top10E=0.72,eRank=75.0,q75/q25=12.09 attn_vo:H=0.4621,top10E=0.65,eRank=64.3,q75/q25=inf mlp_w1:H=0.4462,top10E=0.72,eRank=26.9,q75/q25=2.68 mlp_w2:H=0.1513,top10E=0.95,eRank=4.0,q75/q25=567.51 vo_prod:H=0.2454,top10E=0.86,eRank=9.0,q75/q25=inf train_time:14022ms step_avg:70.11ms +[2025-09-02 18:03:46] [Rank 0] PRINT: step:200/10000 val_loss:6.4756 svd_entropy: attn_qk:H=0.5020,top10E=0.72,eRank=75.0,q75/q25=12.09 attn_vo:H=0.4621,top10E=0.65,eRank=64.3,q75/q25=inf mlp_w1:H=0.4462,top10E=0.72,eRank=26.9,q75/q25=2.68 mlp_w2:H=0.1513,top10E=0.95,eRank=4.0,q75/q25=567.51 vo_prod:H=0.2454,top10E=0.86,eRank=9.0,q75/q25=inf train_time:14022ms step_avg:70.11ms +[2025-09-02 18:03:46] [Rank 0] step:201/10000 train_time:14035ms step_avg:69.83ms +[2025-09-02 18:03:46] [Rank 0] step:201/10000 train_time:14035ms step_avg:69.83ms +[2025-09-02 18:03:48] [Rank 0] step:221/10000 train_time:15327ms step_avg:69.35ms +[2025-09-02 18:03:48] [Rank 0] step:221/10000 train_time:15327ms step_avg:69.35ms +[2025-09-02 18:03:49] [Rank 0] step:241/10000 train_time:16725ms step_avg:69.40ms +[2025-09-02 18:03:49] [Rank 0] step:241/10000 train_time:16725ms step_avg:69.40ms +[2025-09-02 18:03:50] [Rank 0] step:261/10000 train_time:18124ms step_avg:69.44ms +[2025-09-02 18:03:50] [Rank 0] step:261/10000 train_time:18124ms step_avg:69.44ms +[2025-09-02 18:03:52] [Rank 0] step:281/10000 train_time:19525ms step_avg:69.48ms +[2025-09-02 18:03:52] [Rank 0] step:281/10000 train_time:19525ms step_avg:69.48ms +[2025-09-02 18:03:53] [Rank 0] step:301/10000 train_time:20925ms step_avg:69.52ms +[2025-09-02 18:03:53] [Rank 0] step:301/10000 train_time:20925ms step_avg:69.52ms +[2025-09-02 18:03:55] [Rank 0] step:321/10000 train_time:22328ms step_avg:69.56ms +[2025-09-02 18:03:55] [Rank 0] step:321/10000 train_time:22328ms step_avg:69.56ms +[2025-09-02 18:03:56] [Rank 0] step:341/10000 train_time:23728ms step_avg:69.58ms +[2025-09-02 18:03:56] [Rank 0] step:341/10000 train_time:23728ms step_avg:69.58ms +[2025-09-02 18:03:57] [Rank 0] step:361/10000 train_time:25130ms step_avg:69.61ms +[2025-09-02 18:03:57] [Rank 0] step:361/10000 train_time:25130ms step_avg:69.61ms +[2025-09-02 18:03:59] [Rank 0] step:381/10000 train_time:26532ms step_avg:69.64ms +[2025-09-02 18:03:59] [Rank 0] step:381/10000 train_time:26532ms step_avg:69.64ms +[2025-09-02 18:04:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:04:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:04:12] [Rank 0] PRINT: step:400/10000 val_loss:5.9650 svd_entropy: attn_qk:H=0.5471,top10E=0.63,eRank=82.7,q75/q25=13.20 attn_vo:H=0.5343,top10E=0.51,eRank=82.3,q75/q25=inf mlp_w1:H=0.4721,top10E=0.67,eRank=38.2,q75/q25=3.22 mlp_w2:H=0.5301,top10E=0.62,eRank=35.0,q75/q25=12.66 vo_prod:H=0.3553,top10E=0.77,eRank=16.2,q75/q25=inf train_time:28075ms step_avg:70.19ms +[2025-09-02 18:04:12] [Rank 0] PRINT: step:400/10000 val_loss:5.9650 svd_entropy: attn_qk:H=0.5471,top10E=0.63,eRank=82.7,q75/q25=13.20 attn_vo:H=0.5343,top10E=0.51,eRank=82.3,q75/q25=inf mlp_w1:H=0.4721,top10E=0.67,eRank=38.2,q75/q25=3.22 mlp_w2:H=0.5301,top10E=0.62,eRank=35.0,q75/q25=12.66 vo_prod:H=0.3553,top10E=0.77,eRank=16.2,q75/q25=inf train_time:28075ms step_avg:70.19ms +[2025-09-02 18:04:12] [Rank 0] step:401/10000 train_time:28088ms step_avg:70.05ms +[2025-09-02 18:04:12] [Rank 0] step:401/10000 train_time:28088ms step_avg:70.05ms +[2025-09-02 18:04:13] [Rank 0] step:421/10000 train_time:29350ms step_avg:69.72ms +[2025-09-02 18:04:13] [Rank 0] step:421/10000 train_time:29350ms step_avg:69.72ms +[2025-09-02 18:04:15] [Rank 0] step:441/10000 train_time:30749ms step_avg:69.73ms +[2025-09-02 18:04:15] [Rank 0] step:441/10000 train_time:30749ms step_avg:69.73ms +[2025-09-02 18:04:16] [Rank 0] step:461/10000 train_time:32152ms step_avg:69.75ms +[2025-09-02 18:04:16] [Rank 0] step:461/10000 train_time:32152ms step_avg:69.75ms +[2025-09-02 18:04:18] [Rank 0] step:481/10000 train_time:33554ms step_avg:69.76ms +[2025-09-02 18:04:18] [Rank 0] step:481/10000 train_time:33554ms step_avg:69.76ms +[2025-09-02 18:04:19] [Rank 0] step:501/10000 train_time:34957ms step_avg:69.77ms +[2025-09-02 18:04:19] [Rank 0] step:501/10000 train_time:34957ms step_avg:69.77ms +[2025-09-02 18:04:20] [Rank 0] step:521/10000 train_time:36360ms step_avg:69.79ms +[2025-09-02 18:04:20] [Rank 0] step:521/10000 train_time:36360ms step_avg:69.79ms +[2025-09-02 18:04:22] [Rank 0] step:541/10000 train_time:37763ms step_avg:69.80ms +[2025-09-02 18:04:22] [Rank 0] step:541/10000 train_time:37763ms step_avg:69.80ms +[2025-09-02 18:04:23] [Rank 0] step:561/10000 train_time:39166ms step_avg:69.82ms +[2025-09-02 18:04:23] [Rank 0] step:561/10000 train_time:39166ms step_avg:69.82ms +[2025-09-02 18:04:25] [Rank 0] step:581/10000 train_time:40569ms step_avg:69.83ms +[2025-09-02 18:04:25] [Rank 0] step:581/10000 train_time:40569ms step_avg:69.83ms +[2025-09-02 18:04:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:04:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:04:38] [Rank 0] PRINT: step:600/10000 val_loss:5.6787 svd_entropy: attn_qk:H=0.5769,top10E=0.56,eRank=89.3,q75/q25=14.61 attn_vo:H=0.5754,top10E=0.45,eRank=97.6,q75/q25=inf mlp_w1:H=0.5068,top10E=0.62,eRank=48.0,q75/q25=3.61 mlp_w2:H=0.6337,top10E=0.46,eRank=68.3,q75/q25=8.68 vo_prod:H=0.4160,top10E=0.67,eRank=22.8,q75/q25=inf train_time:42112ms step_avg:70.19ms +[2025-09-02 18:04:38] [Rank 0] PRINT: step:600/10000 val_loss:5.6787 svd_entropy: attn_qk:H=0.5769,top10E=0.56,eRank=89.3,q75/q25=14.61 attn_vo:H=0.5754,top10E=0.45,eRank=97.6,q75/q25=inf mlp_w1:H=0.5068,top10E=0.62,eRank=48.0,q75/q25=3.61 mlp_w2:H=0.6337,top10E=0.46,eRank=68.3,q75/q25=8.68 vo_prod:H=0.4160,top10E=0.67,eRank=22.8,q75/q25=inf train_time:42112ms step_avg:70.19ms +[2025-09-02 18:04:38] [Rank 0] step:601/10000 train_time:42124ms step_avg:70.09ms +[2025-09-02 18:04:38] [Rank 0] step:601/10000 train_time:42124ms step_avg:70.09ms +[2025-09-02 18:04:39] [Rank 0] step:621/10000 train_time:43396ms step_avg:69.88ms +[2025-09-02 18:04:39] [Rank 0] step:621/10000 train_time:43396ms step_avg:69.88ms +[2025-09-02 18:04:41] [Rank 0] step:641/10000 train_time:44797ms step_avg:69.89ms +[2025-09-02 18:04:41] [Rank 0] step:641/10000 train_time:44797ms step_avg:69.89ms +[2025-09-02 18:04:42] [Rank 0] step:661/10000 train_time:46199ms step_avg:69.89ms +[2025-09-02 18:04:42] [Rank 0] step:661/10000 train_time:46199ms step_avg:69.89ms +[2025-09-02 18:04:43] [Rank 0] step:681/10000 train_time:47652ms step_avg:69.97ms +[2025-09-02 18:04:43] [Rank 0] step:681/10000 train_time:47652ms step_avg:69.97ms +[2025-09-02 18:04:45] [Rank 0] step:701/10000 train_time:49055ms step_avg:69.98ms +[2025-09-02 18:04:45] [Rank 0] step:701/10000 train_time:49055ms step_avg:69.98ms +[2025-09-02 18:04:46] [Rank 0] step:721/10000 train_time:50459ms step_avg:69.98ms +[2025-09-02 18:04:46] [Rank 0] step:721/10000 train_time:50459ms step_avg:69.98ms +[2025-09-02 18:04:48] [Rank 0] step:741/10000 train_time:51862ms step_avg:69.99ms +[2025-09-02 18:04:48] [Rank 0] step:741/10000 train_time:51862ms step_avg:69.99ms +[2025-09-02 18:04:49] [Rank 0] step:761/10000 train_time:53277ms step_avg:70.01ms +[2025-09-02 18:04:49] [Rank 0] step:761/10000 train_time:53277ms step_avg:70.01ms +[2025-09-02 18:04:50] [Rank 0] step:781/10000 train_time:54693ms step_avg:70.03ms +[2025-09-02 18:04:50] [Rank 0] step:781/10000 train_time:54693ms step_avg:70.03ms +[2025-09-02 18:04:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:04:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:05:03] [Rank 0] PRINT: step:800/10000 val_loss:5.4544 svd_entropy: attn_qk:H=0.6003,top10E=0.51,eRank=95.0,q75/q25=16.29 attn_vo:H=0.6053,top10E=0.41,eRank=111.9,q75/q25=inf mlp_w1:H=0.5376,top10E=0.58,eRank=56.1,q75/q25=3.94 mlp_w2:H=0.6801,top10E=0.39,eRank=92.6,q75/q25=8.34 vo_prod:H=0.4563,top10E=0.59,eRank=29.0,q75/q25=inf train_time:56252ms step_avg:70.31ms +[2025-09-02 18:05:03] [Rank 0] PRINT: step:800/10000 val_loss:5.4544 svd_entropy: attn_qk:H=0.6003,top10E=0.51,eRank=95.0,q75/q25=16.29 attn_vo:H=0.6053,top10E=0.41,eRank=111.9,q75/q25=inf mlp_w1:H=0.5376,top10E=0.58,eRank=56.1,q75/q25=3.94 mlp_w2:H=0.6801,top10E=0.39,eRank=92.6,q75/q25=8.34 vo_prod:H=0.4563,top10E=0.59,eRank=29.0,q75/q25=inf train_time:56252ms step_avg:70.31ms +[2025-09-02 18:05:04] [Rank 0] step:801/10000 train_time:56264ms step_avg:70.24ms +[2025-09-02 18:05:04] [Rank 0] step:801/10000 train_time:56264ms step_avg:70.24ms +[2025-09-02 18:05:05] [Rank 0] step:821/10000 train_time:57545ms step_avg:70.09ms +[2025-09-02 18:05:05] [Rank 0] step:821/10000 train_time:57545ms step_avg:70.09ms +[2025-09-02 18:05:06] [Rank 0] step:841/10000 train_time:58960ms step_avg:70.11ms +[2025-09-02 18:05:06] [Rank 0] step:841/10000 train_time:58960ms step_avg:70.11ms +[2025-09-02 18:05:08] [Rank 0] step:861/10000 train_time:60375ms step_avg:70.12ms +[2025-09-02 18:05:08] [Rank 0] step:861/10000 train_time:60375ms step_avg:70.12ms +[2025-09-02 18:05:09] [Rank 0] step:881/10000 train_time:61791ms step_avg:70.14ms +[2025-09-02 18:05:09] [Rank 0] step:881/10000 train_time:61791ms step_avg:70.14ms +[2025-09-02 18:05:11] [Rank 0] step:901/10000 train_time:63208ms step_avg:70.15ms +[2025-09-02 18:05:11] [Rank 0] step:901/10000 train_time:63208ms step_avg:70.15ms +[2025-09-02 18:05:12] [Rank 0] step:921/10000 train_time:64626ms step_avg:70.17ms +[2025-09-02 18:05:12] [Rank 0] step:921/10000 train_time:64626ms step_avg:70.17ms +[2025-09-02 18:05:13] [Rank 0] step:941/10000 train_time:66044ms step_avg:70.18ms +[2025-09-02 18:05:13] [Rank 0] step:941/10000 train_time:66044ms step_avg:70.18ms +[2025-09-02 18:05:15] [Rank 0] step:961/10000 train_time:67461ms step_avg:70.20ms +[2025-09-02 18:05:15] [Rank 0] step:961/10000 train_time:67461ms step_avg:70.20ms +[2025-09-02 18:05:16] [Rank 0] step:981/10000 train_time:68880ms step_avg:70.21ms +[2025-09-02 18:05:16] [Rank 0] step:981/10000 train_time:68880ms step_avg:70.21ms +[2025-09-02 18:05:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:05:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:05:29] [Rank 0] PRINT: step:1000/10000 val_loss:5.2948 svd_entropy: attn_qk:H=0.6200,top10E=0.48,eRank=100.7,q75/q25=18.45 attn_vo:H=0.6290,top10E=0.37,eRank=126.1,q75/q25=inf mlp_w1:H=0.5620,top10E=0.55,eRank=62.8,q75/q25=4.27 mlp_w2:H=0.7078,top10E=0.34,eRank=111.3,q75/q25=9.02 vo_prod:H=0.4840,top10E=0.54,eRank=34.5,q75/q25=inf train_time:70439ms step_avg:70.44ms +[2025-09-02 18:05:29] [Rank 0] PRINT: step:1000/10000 val_loss:5.2948 svd_entropy: attn_qk:H=0.6200,top10E=0.48,eRank=100.7,q75/q25=18.45 attn_vo:H=0.6290,top10E=0.37,eRank=126.1,q75/q25=inf mlp_w1:H=0.5620,top10E=0.55,eRank=62.8,q75/q25=4.27 mlp_w2:H=0.7078,top10E=0.34,eRank=111.3,q75/q25=9.02 vo_prod:H=0.4840,top10E=0.54,eRank=34.5,q75/q25=inf train_time:70439ms step_avg:70.44ms +[2025-09-02 18:05:29] [Rank 0] step:1001/10000 train_time:70452ms step_avg:70.38ms +[2025-09-02 18:05:29] [Rank 0] step:1001/10000 train_time:70452ms step_avg:70.38ms +[2025-09-02 18:05:31] [Rank 0] step:1021/10000 train_time:71746ms step_avg:70.27ms +[2025-09-02 18:05:31] [Rank 0] step:1021/10000 train_time:71746ms step_avg:70.27ms +[2025-09-02 18:05:32] [Rank 0] step:1041/10000 train_time:73162ms step_avg:70.28ms +[2025-09-02 18:05:32] [Rank 0] step:1041/10000 train_time:73162ms step_avg:70.28ms +[2025-09-02 18:05:34] [Rank 0] step:1061/10000 train_time:74578ms step_avg:70.29ms +[2025-09-02 18:05:34] [Rank 0] step:1061/10000 train_time:74578ms step_avg:70.29ms +[2025-09-02 18:05:35] [Rank 0] step:1081/10000 train_time:75993ms step_avg:70.30ms +[2025-09-02 18:05:35] [Rank 0] step:1081/10000 train_time:75993ms step_avg:70.30ms +[2025-09-02 18:05:37] [Rank 0] step:1101/10000 train_time:77410ms step_avg:70.31ms +[2025-09-02 18:05:37] [Rank 0] step:1101/10000 train_time:77410ms step_avg:70.31ms +[2025-09-02 18:05:38] [Rank 0] step:1121/10000 train_time:78826ms step_avg:70.32ms +[2025-09-02 18:05:38] [Rank 0] step:1121/10000 train_time:78826ms step_avg:70.32ms +[2025-09-02 18:05:39] [Rank 0] step:1141/10000 train_time:80244ms step_avg:70.33ms +[2025-09-02 18:05:39] [Rank 0] step:1141/10000 train_time:80244ms step_avg:70.33ms +[2025-09-02 18:05:41] [Rank 0] step:1161/10000 train_time:81662ms step_avg:70.34ms +[2025-09-02 18:05:41] [Rank 0] step:1161/10000 train_time:81662ms step_avg:70.34ms +[2025-09-02 18:05:42] [Rank 0] step:1181/10000 train_time:83079ms step_avg:70.35ms +[2025-09-02 18:05:42] [Rank 0] step:1181/10000 train_time:83079ms step_avg:70.35ms +[2025-09-02 18:05:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:05:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:05:55] [Rank 0] PRINT: step:1200/10000 val_loss:5.1321 svd_entropy: attn_qk:H=0.6355,top10E=0.45,eRank=105.9,q75/q25=21.40 attn_vo:H=0.6491,top10E=0.35,eRank=139.8,q75/q25=inf mlp_w1:H=0.5824,top10E=0.53,eRank=69.1,q75/q25=4.65 mlp_w2:H=0.7276,top10E=0.31,eRank=127.1,q75/q25=10.09 vo_prod:H=0.5047,top10E=0.50,eRank=39.5,q75/q25=inf train_time:84641ms step_avg:70.53ms +[2025-09-02 18:05:55] [Rank 0] PRINT: step:1200/10000 val_loss:5.1321 svd_entropy: attn_qk:H=0.6355,top10E=0.45,eRank=105.9,q75/q25=21.40 attn_vo:H=0.6491,top10E=0.35,eRank=139.8,q75/q25=inf mlp_w1:H=0.5824,top10E=0.53,eRank=69.1,q75/q25=4.65 mlp_w2:H=0.7276,top10E=0.31,eRank=127.1,q75/q25=10.09 vo_prod:H=0.5047,top10E=0.50,eRank=39.5,q75/q25=inf train_time:84641ms step_avg:70.53ms +[2025-09-02 18:05:55] [Rank 0] step:1201/10000 train_time:84653ms step_avg:70.49ms +[2025-09-02 18:05:55] [Rank 0] step:1201/10000 train_time:84653ms step_avg:70.49ms +[2025-09-02 18:05:57] [Rank 0] step:1221/10000 train_time:85936ms step_avg:70.38ms +[2025-09-02 18:05:57] [Rank 0] step:1221/10000 train_time:85936ms step_avg:70.38ms +[2025-09-02 18:05:58] [Rank 0] step:1241/10000 train_time:87351ms step_avg:70.39ms +[2025-09-02 18:05:58] [Rank 0] step:1241/10000 train_time:87351ms step_avg:70.39ms +[2025-09-02 18:06:00] [Rank 0] step:1261/10000 train_time:88766ms step_avg:70.39ms +[2025-09-02 18:06:00] [Rank 0] step:1261/10000 train_time:88766ms step_avg:70.39ms +[2025-09-02 18:06:01] [Rank 0] step:1281/10000 train_time:90182ms step_avg:70.40ms +[2025-09-02 18:06:01] [Rank 0] step:1281/10000 train_time:90182ms step_avg:70.40ms +[2025-09-02 18:06:02] [Rank 0] step:1301/10000 train_time:91599ms step_avg:70.41ms +[2025-09-02 18:06:02] [Rank 0] step:1301/10000 train_time:91599ms step_avg:70.41ms +[2025-09-02 18:06:04] [Rank 0] step:1321/10000 train_time:93017ms step_avg:70.41ms +[2025-09-02 18:06:04] [Rank 0] step:1321/10000 train_time:93017ms step_avg:70.41ms +[2025-09-02 18:06:05] [Rank 0] step:1341/10000 train_time:94434ms step_avg:70.42ms +[2025-09-02 18:06:05] [Rank 0] step:1341/10000 train_time:94434ms step_avg:70.42ms +[2025-09-02 18:06:07] [Rank 0] step:1361/10000 train_time:95851ms step_avg:70.43ms +[2025-09-02 18:06:07] [Rank 0] step:1361/10000 train_time:95851ms step_avg:70.43ms +[2025-09-02 18:06:08] [Rank 0] step:1381/10000 train_time:97269ms step_avg:70.43ms +[2025-09-02 18:06:08] [Rank 0] step:1381/10000 train_time:97269ms step_avg:70.43ms +[2025-09-02 18:06:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:06:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:06:21] [Rank 0] PRINT: step:1400/10000 val_loss:4.9937 svd_entropy: attn_qk:H=0.6483,top10E=0.42,eRank=110.8,q75/q25=25.10 attn_vo:H=0.6661,top10E=0.32,eRank=151.6,q75/q25=inf mlp_w1:H=0.6006,top10E=0.50,eRank=75.2,q75/q25=5.08 mlp_w2:H=0.7447,top10E=0.28,eRank=142.4,q75/q25=11.33 vo_prod:H=0.5215,top10E=0.46,eRank=44.0,q75/q25=inf train_time:98829ms step_avg:70.59ms +[2025-09-02 18:06:21] [Rank 0] PRINT: step:1400/10000 val_loss:4.9937 svd_entropy: attn_qk:H=0.6483,top10E=0.42,eRank=110.8,q75/q25=25.10 attn_vo:H=0.6661,top10E=0.32,eRank=151.6,q75/q25=inf mlp_w1:H=0.6006,top10E=0.50,eRank=75.2,q75/q25=5.08 mlp_w2:H=0.7447,top10E=0.28,eRank=142.4,q75/q25=11.33 vo_prod:H=0.5215,top10E=0.46,eRank=44.0,q75/q25=inf train_time:98829ms step_avg:70.59ms +[2025-09-02 18:06:21] [Rank 0] step:1401/10000 train_time:98842ms step_avg:70.55ms +[2025-09-02 18:06:21] [Rank 0] step:1401/10000 train_time:98842ms step_avg:70.55ms +[2025-09-02 18:06:23] [Rank 0] step:1421/10000 train_time:100127ms step_avg:70.46ms +[2025-09-02 18:06:23] [Rank 0] step:1421/10000 train_time:100127ms step_avg:70.46ms +[2025-09-02 18:06:24] [Rank 0] step:1441/10000 train_time:101542ms step_avg:70.47ms +[2025-09-02 18:06:24] [Rank 0] step:1441/10000 train_time:101542ms step_avg:70.47ms +[2025-09-02 18:06:26] [Rank 0] step:1461/10000 train_time:102959ms step_avg:70.47ms +[2025-09-02 18:06:26] [Rank 0] step:1461/10000 train_time:102959ms step_avg:70.47ms +[2025-09-02 18:06:27] [Rank 0] step:1481/10000 train_time:104377ms step_avg:70.48ms +[2025-09-02 18:06:27] [Rank 0] step:1481/10000 train_time:104377ms step_avg:70.48ms +[2025-09-02 18:06:28] [Rank 0] step:1501/10000 train_time:105803ms step_avg:70.49ms +[2025-09-02 18:06:28] [Rank 0] step:1501/10000 train_time:105803ms step_avg:70.49ms +[2025-09-02 18:06:30] [Rank 0] step:1521/10000 train_time:107230ms step_avg:70.50ms +[2025-09-02 18:06:30] [Rank 0] step:1521/10000 train_time:107230ms step_avg:70.50ms +[2025-09-02 18:06:31] [Rank 0] step:1541/10000 train_time:108658ms step_avg:70.51ms +[2025-09-02 18:06:31] [Rank 0] step:1541/10000 train_time:108658ms step_avg:70.51ms +[2025-09-02 18:06:33] [Rank 0] step:1561/10000 train_time:110087ms step_avg:70.52ms +[2025-09-02 18:06:33] [Rank 0] step:1561/10000 train_time:110087ms step_avg:70.52ms +[2025-09-02 18:06:34] [Rank 0] step:1581/10000 train_time:111516ms step_avg:70.54ms +[2025-09-02 18:06:34] [Rank 0] step:1581/10000 train_time:111516ms step_avg:70.54ms +[2025-09-02 18:06:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:06:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:06:47] [Rank 0] PRINT: step:1600/10000 val_loss:4.8484 svd_entropy: attn_qk:H=0.6587,top10E=0.40,eRank=114.9,q75/q25=30.05 attn_vo:H=0.6806,top10E=0.30,eRank=161.7,q75/q25=inf mlp_w1:H=0.6176,top10E=0.48,eRank=81.6,q75/q25=5.52 mlp_w2:H=0.7588,top10E=0.26,eRank=156.8,q75/q25=12.45 vo_prod:H=0.5367,top10E=0.43,eRank=48.8,q75/q25=inf train_time:113088ms step_avg:70.68ms +[2025-09-02 18:06:47] [Rank 0] PRINT: step:1600/10000 val_loss:4.8484 svd_entropy: attn_qk:H=0.6587,top10E=0.40,eRank=114.9,q75/q25=30.05 attn_vo:H=0.6806,top10E=0.30,eRank=161.7,q75/q25=inf mlp_w1:H=0.6176,top10E=0.48,eRank=81.6,q75/q25=5.52 mlp_w2:H=0.7588,top10E=0.26,eRank=156.8,q75/q25=12.45 vo_prod:H=0.5367,top10E=0.43,eRank=48.8,q75/q25=inf train_time:113088ms step_avg:70.68ms +[2025-09-02 18:06:47] [Rank 0] step:1601/10000 train_time:113100ms step_avg:70.64ms +[2025-09-02 18:06:47] [Rank 0] step:1601/10000 train_time:113100ms step_avg:70.64ms +[2025-09-02 18:06:49] [Rank 0] step:1621/10000 train_time:114388ms step_avg:70.57ms +[2025-09-02 18:06:49] [Rank 0] step:1621/10000 train_time:114388ms step_avg:70.57ms +[2025-09-02 18:06:50] [Rank 0] step:1641/10000 train_time:115816ms step_avg:70.58ms +[2025-09-02 18:06:50] [Rank 0] step:1641/10000 train_time:115816ms step_avg:70.58ms +[2025-09-02 18:06:52] [Rank 0] step:1661/10000 train_time:117245ms step_avg:70.59ms +[2025-09-02 18:06:52] [Rank 0] step:1661/10000 train_time:117245ms step_avg:70.59ms +[2025-09-02 18:06:53] [Rank 0] step:1681/10000 train_time:118675ms step_avg:70.60ms +[2025-09-02 18:06:53] [Rank 0] step:1681/10000 train_time:118675ms step_avg:70.60ms +[2025-09-02 18:06:55] [Rank 0] step:1701/10000 train_time:120105ms step_avg:70.61ms +[2025-09-02 18:06:55] [Rank 0] step:1701/10000 train_time:120105ms step_avg:70.61ms +[2025-09-02 18:06:56] [Rank 0] step:1721/10000 train_time:121533ms step_avg:70.62ms +[2025-09-02 18:06:56] [Rank 0] step:1721/10000 train_time:121533ms step_avg:70.62ms +[2025-09-02 18:06:57] [Rank 0] step:1741/10000 train_time:122963ms step_avg:70.63ms +[2025-09-02 18:06:57] [Rank 0] step:1741/10000 train_time:122963ms step_avg:70.63ms +[2025-09-02 18:06:59] [Rank 0] step:1761/10000 train_time:124393ms step_avg:70.64ms +[2025-09-02 18:06:59] [Rank 0] step:1761/10000 train_time:124393ms step_avg:70.64ms +[2025-09-02 18:07:00] [Rank 0] step:1781/10000 train_time:125822ms step_avg:70.65ms +[2025-09-02 18:07:00] [Rank 0] step:1781/10000 train_time:125822ms step_avg:70.65ms +[2025-09-02 18:07:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:07:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:07:14] [Rank 0] PRINT: step:1800/10000 val_loss:4.7341 svd_entropy: attn_qk:H=0.6685,top10E=0.39,eRank=119.1,q75/q25=34.90 attn_vo:H=0.6925,top10E=0.28,eRank=170.2,q75/q25=inf mlp_w1:H=0.6328,top10E=0.47,eRank=88.0,q75/q25=5.98 mlp_w2:H=0.7709,top10E=0.24,eRank=170.0,q75/q25=13.28 vo_prod:H=0.5491,top10E=0.41,eRank=53.0,q75/q25=inf train_time:127396ms step_avg:70.78ms +[2025-09-02 18:07:14] [Rank 0] PRINT: step:1800/10000 val_loss:4.7341 svd_entropy: attn_qk:H=0.6685,top10E=0.39,eRank=119.1,q75/q25=34.90 attn_vo:H=0.6925,top10E=0.28,eRank=170.2,q75/q25=inf mlp_w1:H=0.6328,top10E=0.47,eRank=88.0,q75/q25=5.98 mlp_w2:H=0.7709,top10E=0.24,eRank=170.0,q75/q25=13.28 vo_prod:H=0.5491,top10E=0.41,eRank=53.0,q75/q25=inf train_time:127396ms step_avg:70.78ms +[2025-09-02 18:07:14] [Rank 0] step:1801/10000 train_time:127408ms step_avg:70.74ms +[2025-09-02 18:07:14] [Rank 0] step:1801/10000 train_time:127408ms step_avg:70.74ms +[2025-09-02 18:07:15] [Rank 0] step:1821/10000 train_time:128722ms step_avg:70.69ms +[2025-09-02 18:07:15] [Rank 0] step:1821/10000 train_time:128722ms step_avg:70.69ms +[2025-09-02 18:07:16] [Rank 0] step:1841/10000 train_time:130149ms step_avg:70.69ms +[2025-09-02 18:07:16] [Rank 0] step:1841/10000 train_time:130149ms step_avg:70.69ms +[2025-09-02 18:07:18] [Rank 0] step:1861/10000 train_time:131576ms step_avg:70.70ms +[2025-09-02 18:07:18] [Rank 0] step:1861/10000 train_time:131576ms step_avg:70.70ms +[2025-09-02 18:07:19] [Rank 0] step:1881/10000 train_time:133003ms step_avg:70.71ms +[2025-09-02 18:07:19] [Rank 0] step:1881/10000 train_time:133003ms step_avg:70.71ms +[2025-09-02 18:07:21] [Rank 0] step:1901/10000 train_time:134430ms step_avg:70.72ms +[2025-09-02 18:07:21] [Rank 0] step:1901/10000 train_time:134430ms step_avg:70.72ms +[2025-09-02 18:07:22] [Rank 0] step:1921/10000 train_time:135857ms step_avg:70.72ms +[2025-09-02 18:07:22] [Rank 0] step:1921/10000 train_time:135857ms step_avg:70.72ms +[2025-09-02 18:07:24] [Rank 0] step:1941/10000 train_time:137285ms step_avg:70.73ms +[2025-09-02 18:07:24] [Rank 0] step:1941/10000 train_time:137285ms step_avg:70.73ms +[2025-09-02 18:07:25] [Rank 0] step:1961/10000 train_time:138714ms step_avg:70.74ms +[2025-09-02 18:07:25] [Rank 0] step:1961/10000 train_time:138714ms step_avg:70.74ms +[2025-09-02 18:07:26] [Rank 0] step:1981/10000 train_time:140144ms step_avg:70.74ms +[2025-09-02 18:07:26] [Rank 0] step:1981/10000 train_time:140144ms step_avg:70.74ms +[2025-09-02 18:07:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:07:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:07:40] [Rank 0] PRINT: step:2000/10000 val_loss:4.6600 svd_entropy: attn_qk:H=0.6768,top10E=0.37,eRank=122.9,q75/q25=40.24 attn_vo:H=0.7027,top10E=0.27,eRank=177.9,q75/q25=inf mlp_w1:H=0.6458,top10E=0.45,eRank=94.0,q75/q25=6.45 mlp_w2:H=0.7800,top10E=0.23,eRank=180.9,q75/q25=14.37 vo_prod:H=0.5605,top10E=0.38,eRank=57.4,q75/q25=inf train_time:141717ms step_avg:70.86ms +[2025-09-02 18:07:40] [Rank 0] PRINT: step:2000/10000 val_loss:4.6600 svd_entropy: attn_qk:H=0.6768,top10E=0.37,eRank=122.9,q75/q25=40.24 attn_vo:H=0.7027,top10E=0.27,eRank=177.9,q75/q25=inf mlp_w1:H=0.6458,top10E=0.45,eRank=94.0,q75/q25=6.45 mlp_w2:H=0.7800,top10E=0.23,eRank=180.9,q75/q25=14.37 vo_prod:H=0.5605,top10E=0.38,eRank=57.4,q75/q25=inf train_time:141717ms step_avg:70.86ms +[2025-09-02 18:07:40] [Rank 0] step:2001/10000 train_time:141730ms step_avg:70.83ms +[2025-09-02 18:07:40] [Rank 0] step:2001/10000 train_time:141730ms step_avg:70.83ms +[2025-09-02 18:07:41] [Rank 0] step:2021/10000 train_time:143019ms step_avg:70.77ms +[2025-09-02 18:07:41] [Rank 0] step:2021/10000 train_time:143019ms step_avg:70.77ms +[2025-09-02 18:07:43] [Rank 0] step:2041/10000 train_time:144568ms step_avg:70.83ms +[2025-09-02 18:07:43] [Rank 0] step:2041/10000 train_time:144568ms step_avg:70.83ms +[2025-09-02 18:07:44] [Rank 0] step:2061/10000 train_time:145994ms step_avg:70.84ms +[2025-09-02 18:07:44] [Rank 0] step:2061/10000 train_time:145994ms step_avg:70.84ms +[2025-09-02 18:07:46] [Rank 0] step:2081/10000 train_time:147420ms step_avg:70.84ms +[2025-09-02 18:07:46] [Rank 0] step:2081/10000 train_time:147420ms step_avg:70.84ms +[2025-09-02 18:07:47] [Rank 0] step:2101/10000 train_time:148847ms step_avg:70.85ms +[2025-09-02 18:07:47] [Rank 0] step:2101/10000 train_time:148847ms step_avg:70.85ms +[2025-09-02 18:07:48] [Rank 0] step:2121/10000 train_time:150277ms step_avg:70.85ms +[2025-09-02 18:07:48] [Rank 0] step:2121/10000 train_time:150277ms step_avg:70.85ms +[2025-09-02 18:07:50] [Rank 0] step:2141/10000 train_time:151705ms step_avg:70.86ms +[2025-09-02 18:07:50] [Rank 0] step:2141/10000 train_time:151705ms step_avg:70.86ms +[2025-09-02 18:07:51] [Rank 0] step:2161/10000 train_time:153144ms step_avg:70.87ms +[2025-09-02 18:07:51] [Rank 0] step:2161/10000 train_time:153144ms step_avg:70.87ms +[2025-09-02 18:07:53] [Rank 0] step:2181/10000 train_time:154575ms step_avg:70.87ms +[2025-09-02 18:07:53] [Rank 0] step:2181/10000 train_time:154575ms step_avg:70.87ms +[2025-09-02 18:07:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:07:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:08:06] [Rank 0] PRINT: step:2200/10000 val_loss:4.5789 svd_entropy: attn_qk:H=0.6841,top10E=0.36,eRank=126.5,q75/q25=44.91 attn_vo:H=0.7112,top10E=0.25,eRank=184.7,q75/q25=inf mlp_w1:H=0.6577,top10E=0.43,eRank=99.9,q75/q25=6.89 mlp_w2:H=0.7887,top10E=0.22,eRank=191.8,q75/q25=14.83 vo_prod:H=0.5699,top10E=0.37,eRank=61.3,q75/q25=inf train_time:156145ms step_avg:70.97ms +[2025-09-02 18:08:06] [Rank 0] PRINT: step:2200/10000 val_loss:4.5789 svd_entropy: attn_qk:H=0.6841,top10E=0.36,eRank=126.5,q75/q25=44.91 attn_vo:H=0.7112,top10E=0.25,eRank=184.7,q75/q25=inf mlp_w1:H=0.6577,top10E=0.43,eRank=99.9,q75/q25=6.89 mlp_w2:H=0.7887,top10E=0.22,eRank=191.8,q75/q25=14.83 vo_prod:H=0.5699,top10E=0.37,eRank=61.3,q75/q25=inf train_time:156145ms step_avg:70.97ms +[2025-09-02 18:08:06] [Rank 0] step:2201/10000 train_time:156157ms step_avg:70.95ms +[2025-09-02 18:08:06] [Rank 0] step:2201/10000 train_time:156157ms step_avg:70.95ms +[2025-09-02 18:08:07] [Rank 0] step:2221/10000 train_time:157445ms step_avg:70.89ms +[2025-09-02 18:08:07] [Rank 0] step:2221/10000 train_time:157445ms step_avg:70.89ms +[2025-09-02 18:08:09] [Rank 0] step:2241/10000 train_time:158904ms step_avg:70.91ms +[2025-09-02 18:08:09] [Rank 0] step:2241/10000 train_time:158904ms step_avg:70.91ms +[2025-09-02 18:08:10] [Rank 0] step:2261/10000 train_time:160376ms step_avg:70.93ms +[2025-09-02 18:08:10] [Rank 0] step:2261/10000 train_time:160376ms step_avg:70.93ms +[2025-09-02 18:08:12] [Rank 0] step:2281/10000 train_time:161847ms step_avg:70.95ms +[2025-09-02 18:08:12] [Rank 0] step:2281/10000 train_time:161847ms step_avg:70.95ms +[2025-09-02 18:08:13] [Rank 0] step:2301/10000 train_time:163319ms step_avg:70.98ms +[2025-09-02 18:08:13] [Rank 0] step:2301/10000 train_time:163319ms step_avg:70.98ms +[2025-09-02 18:08:15] [Rank 0] step:2321/10000 train_time:164791ms step_avg:71.00ms +[2025-09-02 18:08:15] [Rank 0] step:2321/10000 train_time:164791ms step_avg:71.00ms +[2025-09-02 18:08:16] [Rank 0] step:2341/10000 train_time:166262ms step_avg:71.02ms +[2025-09-02 18:08:16] [Rank 0] step:2341/10000 train_time:166262ms step_avg:71.02ms +[2025-09-02 18:08:18] [Rank 0] step:2361/10000 train_time:167736ms step_avg:71.04ms +[2025-09-02 18:08:18] [Rank 0] step:2361/10000 train_time:167736ms step_avg:71.04ms +[2025-09-02 18:08:19] [Rank 0] step:2381/10000 train_time:169208ms step_avg:71.07ms +[2025-09-02 18:08:19] [Rank 0] step:2381/10000 train_time:169208ms step_avg:71.07ms +[2025-09-02 18:08:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:08:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:08:32] [Rank 0] PRINT: step:2400/10000 val_loss:4.4996 svd_entropy: attn_qk:H=0.6901,top10E=0.35,eRank=129.5,q75/q25=49.98 attn_vo:H=0.7188,top10E=0.24,eRank=190.9,q75/q25=inf mlp_w1:H=0.6692,top10E=0.42,eRank=106.1,q75/q25=7.27 mlp_w2:H=0.7969,top10E=0.21,eRank=202.7,q75/q25=15.21 vo_prod:H=0.5781,top10E=0.35,eRank=64.9,q75/q25=inf train_time:170828ms step_avg:71.18ms +[2025-09-02 18:08:32] [Rank 0] PRINT: step:2400/10000 val_loss:4.4996 svd_entropy: attn_qk:H=0.6901,top10E=0.35,eRank=129.5,q75/q25=49.98 attn_vo:H=0.7188,top10E=0.24,eRank=190.9,q75/q25=inf mlp_w1:H=0.6692,top10E=0.42,eRank=106.1,q75/q25=7.27 mlp_w2:H=0.7969,top10E=0.21,eRank=202.7,q75/q25=15.21 vo_prod:H=0.5781,top10E=0.35,eRank=64.9,q75/q25=inf train_time:170828ms step_avg:71.18ms +[2025-09-02 18:08:32] [Rank 0] step:2401/10000 train_time:170841ms step_avg:71.15ms +[2025-09-02 18:08:32] [Rank 0] step:2401/10000 train_time:170841ms step_avg:71.15ms +[2025-09-02 18:08:34] [Rank 0] step:2421/10000 train_time:172188ms step_avg:71.12ms +[2025-09-02 18:08:34] [Rank 0] step:2421/10000 train_time:172188ms step_avg:71.12ms +[2025-09-02 18:08:35] [Rank 0] step:2441/10000 train_time:173673ms step_avg:71.15ms +[2025-09-02 18:08:35] [Rank 0] step:2441/10000 train_time:173673ms step_avg:71.15ms +[2025-09-02 18:08:37] [Rank 0] step:2461/10000 train_time:175146ms step_avg:71.17ms +[2025-09-02 18:08:37] [Rank 0] step:2461/10000 train_time:175146ms step_avg:71.17ms +[2025-09-02 18:08:38] [Rank 0] step:2481/10000 train_time:176619ms step_avg:71.19ms +[2025-09-02 18:08:38] [Rank 0] step:2481/10000 train_time:176619ms step_avg:71.19ms +[2025-09-02 18:08:40] [Rank 0] step:2501/10000 train_time:178098ms step_avg:71.21ms +[2025-09-02 18:08:40] [Rank 0] step:2501/10000 train_time:178098ms step_avg:71.21ms +[2025-09-02 18:08:41] [Rank 0] step:2521/10000 train_time:179571ms step_avg:71.23ms +[2025-09-02 18:08:41] [Rank 0] step:2521/10000 train_time:179571ms step_avg:71.23ms +[2025-09-02 18:08:43] [Rank 0] step:2541/10000 train_time:181046ms step_avg:71.25ms +[2025-09-02 18:08:43] [Rank 0] step:2541/10000 train_time:181046ms step_avg:71.25ms +[2025-09-02 18:08:44] [Rank 0] step:2561/10000 train_time:182517ms step_avg:71.27ms +[2025-09-02 18:08:44] [Rank 0] step:2561/10000 train_time:182517ms step_avg:71.27ms +[2025-09-02 18:08:46] [Rank 0] step:2581/10000 train_time:183991ms step_avg:71.29ms +[2025-09-02 18:08:46] [Rank 0] step:2581/10000 train_time:183991ms step_avg:71.29ms +[2025-09-02 18:08:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:08:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:08:59] [Rank 0] PRINT: step:2600/10000 val_loss:4.4371 svd_entropy: attn_qk:H=0.6958,top10E=0.34,eRank=132.7,q75/q25=54.15 attn_vo:H=0.7256,top10E=0.23,eRank=196.6,q75/q25=inf mlp_w1:H=0.6794,top10E=0.41,eRank=112.0,q75/q25=7.65 mlp_w2:H=0.8036,top10E=0.20,eRank=212.2,q75/q25=15.58 vo_prod:H=0.5858,top10E=0.34,eRank=68.5,q75/q25=inf train_time:185615ms step_avg:71.39ms +[2025-09-02 18:08:59] [Rank 0] PRINT: step:2600/10000 val_loss:4.4371 svd_entropy: attn_qk:H=0.6958,top10E=0.34,eRank=132.7,q75/q25=54.15 attn_vo:H=0.7256,top10E=0.23,eRank=196.6,q75/q25=inf mlp_w1:H=0.6794,top10E=0.41,eRank=112.0,q75/q25=7.65 mlp_w2:H=0.8036,top10E=0.20,eRank=212.2,q75/q25=15.58 vo_prod:H=0.5858,top10E=0.34,eRank=68.5,q75/q25=inf train_time:185615ms step_avg:71.39ms +[2025-09-02 18:08:59] [Rank 0] step:2601/10000 train_time:185628ms step_avg:71.37ms +[2025-09-02 18:08:59] [Rank 0] step:2601/10000 train_time:185628ms step_avg:71.37ms +[2025-09-02 18:09:00] [Rank 0] step:2621/10000 train_time:186973ms step_avg:71.34ms +[2025-09-02 18:09:00] [Rank 0] step:2621/10000 train_time:186973ms step_avg:71.34ms +[2025-09-02 18:09:02] [Rank 0] step:2641/10000 train_time:188442ms step_avg:71.35ms +[2025-09-02 18:09:02] [Rank 0] step:2641/10000 train_time:188442ms step_avg:71.35ms +[2025-09-02 18:09:03] [Rank 0] step:2661/10000 train_time:189912ms step_avg:71.37ms +[2025-09-02 18:09:03] [Rank 0] step:2661/10000 train_time:189912ms step_avg:71.37ms +[2025-09-02 18:09:05] [Rank 0] step:2681/10000 train_time:191383ms step_avg:71.38ms +[2025-09-02 18:09:05] [Rank 0] step:2681/10000 train_time:191383ms step_avg:71.38ms +[2025-09-02 18:09:06] [Rank 0] step:2701/10000 train_time:192853ms step_avg:71.40ms +[2025-09-02 18:09:06] [Rank 0] step:2701/10000 train_time:192853ms step_avg:71.40ms +[2025-09-02 18:09:08] [Rank 0] step:2721/10000 train_time:194323ms step_avg:71.42ms +[2025-09-02 18:09:08] [Rank 0] step:2721/10000 train_time:194323ms step_avg:71.42ms +[2025-09-02 18:09:09] [Rank 0] step:2741/10000 train_time:195794ms step_avg:71.43ms +[2025-09-02 18:09:09] [Rank 0] step:2741/10000 train_time:195794ms step_avg:71.43ms +[2025-09-02 18:09:11] [Rank 0] step:2761/10000 train_time:197265ms step_avg:71.45ms +[2025-09-02 18:09:11] [Rank 0] step:2761/10000 train_time:197265ms step_avg:71.45ms +[2025-09-02 18:09:12] [Rank 0] step:2781/10000 train_time:198841ms step_avg:71.50ms +[2025-09-02 18:09:12] [Rank 0] step:2781/10000 train_time:198841ms step_avg:71.50ms +[2025-09-02 18:09:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:09:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:09:25] [Rank 0] PRINT: step:2800/10000 val_loss:4.3931 svd_entropy: attn_qk:H=0.7014,top10E=0.34,eRank=135.9,q75/q25=58.71 attn_vo:H=0.7317,top10E=0.22,eRank=202.0,q75/q25=inf mlp_w1:H=0.6887,top10E=0.39,eRank=117.8,q75/q25=8.04 mlp_w2:H=0.8094,top10E=0.19,eRank=220.6,q75/q25=15.90 vo_prod:H=0.5929,top10E=0.33,eRank=72.0,q75/q25=inf train_time:200462ms step_avg:71.59ms +[2025-09-02 18:09:25] [Rank 0] PRINT: step:2800/10000 val_loss:4.3931 svd_entropy: attn_qk:H=0.7014,top10E=0.34,eRank=135.9,q75/q25=58.71 attn_vo:H=0.7317,top10E=0.22,eRank=202.0,q75/q25=inf mlp_w1:H=0.6887,top10E=0.39,eRank=117.8,q75/q25=8.04 mlp_w2:H=0.8094,top10E=0.19,eRank=220.6,q75/q25=15.90 vo_prod:H=0.5929,top10E=0.33,eRank=72.0,q75/q25=inf train_time:200462ms step_avg:71.59ms +[2025-09-02 18:09:26] [Rank 0] step:2801/10000 train_time:200475ms step_avg:71.57ms +[2025-09-02 18:09:26] [Rank 0] step:2801/10000 train_time:200475ms step_avg:71.57ms +[2025-09-02 18:09:27] [Rank 0] step:2821/10000 train_time:201816ms step_avg:71.54ms +[2025-09-02 18:09:27] [Rank 0] step:2821/10000 train_time:201816ms step_avg:71.54ms +[2025-09-02 18:09:29] [Rank 0] step:2841/10000 train_time:203285ms step_avg:71.55ms +[2025-09-02 18:09:29] [Rank 0] step:2841/10000 train_time:203285ms step_avg:71.55ms +[2025-09-02 18:09:30] [Rank 0] step:2861/10000 train_time:204756ms step_avg:71.57ms +[2025-09-02 18:09:30] [Rank 0] step:2861/10000 train_time:204756ms step_avg:71.57ms +[2025-09-02 18:09:31] [Rank 0] step:2881/10000 train_time:206226ms step_avg:71.58ms +[2025-09-02 18:09:31] [Rank 0] step:2881/10000 train_time:206226ms step_avg:71.58ms +[2025-09-02 18:09:33] [Rank 0] step:2901/10000 train_time:207698ms step_avg:71.60ms +[2025-09-02 18:09:33] [Rank 0] step:2901/10000 train_time:207698ms step_avg:71.60ms +[2025-09-02 18:09:34] [Rank 0] step:2921/10000 train_time:209171ms step_avg:71.61ms +[2025-09-02 18:09:34] [Rank 0] step:2921/10000 train_time:209171ms step_avg:71.61ms +[2025-09-02 18:09:36] [Rank 0] step:2941/10000 train_time:210643ms step_avg:71.62ms +[2025-09-02 18:09:36] [Rank 0] step:2941/10000 train_time:210643ms step_avg:71.62ms +[2025-09-02 18:09:37] [Rank 0] step:2961/10000 train_time:212116ms step_avg:71.64ms +[2025-09-02 18:09:37] [Rank 0] step:2961/10000 train_time:212116ms step_avg:71.64ms +[2025-09-02 18:09:39] [Rank 0] step:2981/10000 train_time:213593ms step_avg:71.65ms +[2025-09-02 18:09:39] [Rank 0] step:2981/10000 train_time:213593ms step_avg:71.65ms +[2025-09-02 18:09:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:09:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:09:52] [Rank 0] PRINT: step:3000/10000 val_loss:4.3475 svd_entropy: attn_qk:H=0.7061,top10E=0.33,eRank=138.6,q75/q25=62.51 attn_vo:H=0.7371,top10E=0.22,eRank=206.9,q75/q25=inf mlp_w1:H=0.6976,top10E=0.38,eRank=123.6,q75/q25=8.36 mlp_w2:H=0.8151,top10E=0.18,eRank=229.0,q75/q25=16.04 vo_prod:H=0.5992,top10E=0.32,eRank=75.3,q75/q25=inf train_time:215221ms step_avg:71.74ms +[2025-09-02 18:09:52] [Rank 0] PRINT: step:3000/10000 val_loss:4.3475 svd_entropy: attn_qk:H=0.7061,top10E=0.33,eRank=138.6,q75/q25=62.51 attn_vo:H=0.7371,top10E=0.22,eRank=206.9,q75/q25=inf mlp_w1:H=0.6976,top10E=0.38,eRank=123.6,q75/q25=8.36 mlp_w2:H=0.8151,top10E=0.18,eRank=229.0,q75/q25=16.04 vo_prod:H=0.5992,top10E=0.32,eRank=75.3,q75/q25=inf train_time:215221ms step_avg:71.74ms +[2025-09-02 18:09:52] [Rank 0] step:3001/10000 train_time:215234ms step_avg:71.72ms +[2025-09-02 18:09:52] [Rank 0] step:3001/10000 train_time:215234ms step_avg:71.72ms +[2025-09-02 18:09:54] [Rank 0] step:3021/10000 train_time:216589ms step_avg:71.69ms +[2025-09-02 18:09:54] [Rank 0] step:3021/10000 train_time:216589ms step_avg:71.69ms +[2025-09-02 18:09:55] [Rank 0] step:3041/10000 train_time:218066ms step_avg:71.71ms +[2025-09-02 18:09:55] [Rank 0] step:3041/10000 train_time:218066ms step_avg:71.71ms +[2025-09-02 18:09:57] [Rank 0] step:3061/10000 train_time:219544ms step_avg:71.72ms +[2025-09-02 18:09:57] [Rank 0] step:3061/10000 train_time:219544ms step_avg:71.72ms +[2025-09-02 18:09:58] [Rank 0] step:3081/10000 train_time:221024ms step_avg:71.74ms +[2025-09-02 18:09:58] [Rank 0] step:3081/10000 train_time:221024ms step_avg:71.74ms +[2025-09-02 18:10:00] [Rank 0] step:3101/10000 train_time:222528ms step_avg:71.76ms +[2025-09-02 18:10:00] [Rank 0] step:3101/10000 train_time:222528ms step_avg:71.76ms +[2025-09-02 18:10:01] [Rank 0] step:3121/10000 train_time:224008ms step_avg:71.77ms +[2025-09-02 18:10:01] [Rank 0] step:3121/10000 train_time:224008ms step_avg:71.77ms +[2025-09-02 18:10:03] [Rank 0] step:3141/10000 train_time:225487ms step_avg:71.79ms +[2025-09-02 18:10:03] [Rank 0] step:3141/10000 train_time:225487ms step_avg:71.79ms +[2025-09-02 18:10:04] [Rank 0] step:3161/10000 train_time:226969ms step_avg:71.80ms +[2025-09-02 18:10:04] [Rank 0] step:3161/10000 train_time:226969ms step_avg:71.80ms +[2025-09-02 18:10:06] [Rank 0] step:3181/10000 train_time:228450ms step_avg:71.82ms +[2025-09-02 18:10:06] [Rank 0] step:3181/10000 train_time:228450ms step_avg:71.82ms +[2025-09-02 18:10:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:10:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:10:19] [Rank 0] PRINT: step:3200/10000 val_loss:4.3119 svd_entropy: attn_qk:H=0.7104,top10E=0.32,eRank=141.2,q75/q25=65.64 attn_vo:H=0.7419,top10E=0.21,eRank=211.4,q75/q25=inf mlp_w1:H=0.7057,top10E=0.37,eRank=129.2,q75/q25=8.77 mlp_w2:H=0.8197,top10E=0.18,eRank=236.3,q75/q25=16.26 vo_prod:H=0.6048,top10E=0.31,eRank=78.4,q75/q25=inf train_time:230081ms step_avg:71.90ms +[2025-09-02 18:10:19] [Rank 0] PRINT: step:3200/10000 val_loss:4.3119 svd_entropy: attn_qk:H=0.7104,top10E=0.32,eRank=141.2,q75/q25=65.64 attn_vo:H=0.7419,top10E=0.21,eRank=211.4,q75/q25=inf mlp_w1:H=0.7057,top10E=0.37,eRank=129.2,q75/q25=8.77 mlp_w2:H=0.8197,top10E=0.18,eRank=236.3,q75/q25=16.26 vo_prod:H=0.6048,top10E=0.31,eRank=78.4,q75/q25=inf train_time:230081ms step_avg:71.90ms +[2025-09-02 18:10:19] [Rank 0] step:3201/10000 train_time:230094ms step_avg:71.88ms +[2025-09-02 18:10:19] [Rank 0] step:3201/10000 train_time:230094ms step_avg:71.88ms +[2025-09-02 18:10:20] [Rank 0] step:3221/10000 train_time:231428ms step_avg:71.85ms +[2025-09-02 18:10:20] [Rank 0] step:3221/10000 train_time:231428ms step_avg:71.85ms +[2025-09-02 18:10:22] [Rank 0] step:3241/10000 train_time:232910ms step_avg:71.86ms +[2025-09-02 18:10:22] [Rank 0] step:3241/10000 train_time:232910ms step_avg:71.86ms +[2025-09-02 18:10:23] [Rank 0] step:3261/10000 train_time:234389ms step_avg:71.88ms +[2025-09-02 18:10:23] [Rank 0] step:3261/10000 train_time:234389ms step_avg:71.88ms +[2025-09-02 18:10:25] [Rank 0] step:3281/10000 train_time:235871ms step_avg:71.89ms +[2025-09-02 18:10:25] [Rank 0] step:3281/10000 train_time:235871ms step_avg:71.89ms +[2025-09-02 18:10:26] [Rank 0] step:3301/10000 train_time:237351ms step_avg:71.90ms +[2025-09-02 18:10:26] [Rank 0] step:3301/10000 train_time:237351ms step_avg:71.90ms +[2025-09-02 18:10:28] [Rank 0] step:3321/10000 train_time:238833ms step_avg:71.92ms +[2025-09-02 18:10:28] [Rank 0] step:3321/10000 train_time:238833ms step_avg:71.92ms +[2025-09-02 18:10:29] [Rank 0] step:3341/10000 train_time:240316ms step_avg:71.93ms +[2025-09-02 18:10:29] [Rank 0] step:3341/10000 train_time:240316ms step_avg:71.93ms +[2025-09-02 18:10:31] [Rank 0] step:3361/10000 train_time:241799ms step_avg:71.94ms +[2025-09-02 18:10:31] [Rank 0] step:3361/10000 train_time:241799ms step_avg:71.94ms +[2025-09-02 18:10:32] [Rank 0] step:3381/10000 train_time:243282ms step_avg:71.96ms +[2025-09-02 18:10:32] [Rank 0] step:3381/10000 train_time:243282ms step_avg:71.96ms +[2025-09-02 18:10:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:10:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:10:46] [Rank 0] PRINT: step:3400/10000 val_loss:4.2673 svd_entropy: attn_qk:H=0.7149,top10E=0.32,eRank=144.0,q75/q25=68.74 attn_vo:H=0.7465,top10E=0.20,eRank=215.9,q75/q25=inf mlp_w1:H=0.7132,top10E=0.36,eRank=134.8,q75/q25=9.11 mlp_w2:H=0.8238,top10E=0.17,eRank=242.8,q75/q25=16.49 vo_prod:H=0.6104,top10E=0.30,eRank=81.6,q75/q25=inf train_time:244913ms step_avg:72.03ms +[2025-09-02 18:10:46] [Rank 0] PRINT: step:3400/10000 val_loss:4.2673 svd_entropy: attn_qk:H=0.7149,top10E=0.32,eRank=144.0,q75/q25=68.74 attn_vo:H=0.7465,top10E=0.20,eRank=215.9,q75/q25=inf mlp_w1:H=0.7132,top10E=0.36,eRank=134.8,q75/q25=9.11 mlp_w2:H=0.8238,top10E=0.17,eRank=242.8,q75/q25=16.49 vo_prod:H=0.6104,top10E=0.30,eRank=81.6,q75/q25=inf train_time:244913ms step_avg:72.03ms +[2025-09-02 18:10:46] [Rank 0] step:3401/10000 train_time:244926ms step_avg:72.02ms +[2025-09-02 18:10:46] [Rank 0] step:3401/10000 train_time:244926ms step_avg:72.02ms +[2025-09-02 18:10:47] [Rank 0] step:3421/10000 train_time:246287ms step_avg:71.99ms +[2025-09-02 18:10:47] [Rank 0] step:3421/10000 train_time:246287ms step_avg:71.99ms +[2025-09-02 18:10:49] [Rank 0] step:3441/10000 train_time:247766ms step_avg:72.00ms +[2025-09-02 18:10:49] [Rank 0] step:3441/10000 train_time:247766ms step_avg:72.00ms +[2025-09-02 18:10:50] [Rank 0] step:3461/10000 train_time:249245ms step_avg:72.02ms +[2025-09-02 18:10:50] [Rank 0] step:3461/10000 train_time:249245ms step_avg:72.02ms +[2025-09-02 18:10:52] [Rank 0] step:3481/10000 train_time:250728ms step_avg:72.03ms +[2025-09-02 18:10:52] [Rank 0] step:3481/10000 train_time:250728ms step_avg:72.03ms +[2025-09-02 18:10:53] [Rank 0] step:3501/10000 train_time:252209ms step_avg:72.04ms +[2025-09-02 18:10:53] [Rank 0] step:3501/10000 train_time:252209ms step_avg:72.04ms +[2025-09-02 18:10:55] [Rank 0] step:3521/10000 train_time:253690ms step_avg:72.05ms +[2025-09-02 18:10:55] [Rank 0] step:3521/10000 train_time:253690ms step_avg:72.05ms +[2025-09-02 18:10:56] [Rank 0] step:3541/10000 train_time:255170ms step_avg:72.06ms +[2025-09-02 18:10:56] [Rank 0] step:3541/10000 train_time:255170ms step_avg:72.06ms +[2025-09-02 18:10:58] [Rank 0] step:3561/10000 train_time:256651ms step_avg:72.07ms +[2025-09-02 18:10:58] [Rank 0] step:3561/10000 train_time:256651ms step_avg:72.07ms +[2025-09-02 18:10:59] [Rank 0] step:3581/10000 train_time:258285ms step_avg:72.13ms +[2025-09-02 18:10:59] [Rank 0] step:3581/10000 train_time:258285ms step_avg:72.13ms +[2025-09-02 18:11:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:11:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:11:12] [Rank 0] PRINT: step:3600/10000 val_loss:4.2519 svd_entropy: attn_qk:H=0.7187,top10E=0.31,eRank=146.5,q75/q25=71.75 attn_vo:H=0.7505,top10E=0.20,eRank=219.9,q75/q25=inf mlp_w1:H=0.7200,top10E=0.35,eRank=140.1,q75/q25=9.49 mlp_w2:H=0.8274,top10E=0.17,eRank=248.8,q75/q25=16.72 vo_prod:H=0.6150,top10E=0.29,eRank=84.3,q75/q25=inf train_time:259792ms step_avg:72.16ms +[2025-09-02 18:11:12] [Rank 0] PRINT: step:3600/10000 val_loss:4.2519 svd_entropy: attn_qk:H=0.7187,top10E=0.31,eRank=146.5,q75/q25=71.75 attn_vo:H=0.7505,top10E=0.20,eRank=219.9,q75/q25=inf mlp_w1:H=0.7200,top10E=0.35,eRank=140.1,q75/q25=9.49 mlp_w2:H=0.8274,top10E=0.17,eRank=248.8,q75/q25=16.72 vo_prod:H=0.6150,top10E=0.29,eRank=84.3,q75/q25=inf train_time:259792ms step_avg:72.16ms +[2025-09-02 18:11:12] [Rank 0] step:3601/10000 train_time:259804ms step_avg:72.15ms +[2025-09-02 18:11:12] [Rank 0] step:3601/10000 train_time:259804ms step_avg:72.15ms +[2025-09-02 18:11:14] [Rank 0] step:3621/10000 train_time:261144ms step_avg:72.12ms +[2025-09-02 18:11:14] [Rank 0] step:3621/10000 train_time:261144ms step_avg:72.12ms +[2025-09-02 18:11:15] [Rank 0] step:3641/10000 train_time:262620ms step_avg:72.13ms +[2025-09-02 18:11:15] [Rank 0] step:3641/10000 train_time:262620ms step_avg:72.13ms +[2025-09-02 18:11:17] [Rank 0] step:3661/10000 train_time:264097ms step_avg:72.14ms +[2025-09-02 18:11:17] [Rank 0] step:3661/10000 train_time:264097ms step_avg:72.14ms +[2025-09-02 18:11:18] [Rank 0] step:3681/10000 train_time:265576ms step_avg:72.15ms +[2025-09-02 18:11:18] [Rank 0] step:3681/10000 train_time:265576ms step_avg:72.15ms +[2025-09-02 18:11:20] [Rank 0] step:3701/10000 train_time:267056ms step_avg:72.16ms +[2025-09-02 18:11:20] [Rank 0] step:3701/10000 train_time:267056ms step_avg:72.16ms +[2025-09-02 18:11:21] [Rank 0] step:3721/10000 train_time:268561ms step_avg:72.17ms +[2025-09-02 18:11:21] [Rank 0] step:3721/10000 train_time:268561ms step_avg:72.17ms +[2025-09-02 18:11:23] [Rank 0] step:3741/10000 train_time:270077ms step_avg:72.19ms +[2025-09-02 18:11:23] [Rank 0] step:3741/10000 train_time:270077ms step_avg:72.19ms +[2025-09-02 18:11:24] [Rank 0] step:3761/10000 train_time:271592ms step_avg:72.21ms +[2025-09-02 18:11:24] [Rank 0] step:3761/10000 train_time:271592ms step_avg:72.21ms +[2025-09-02 18:11:26] [Rank 0] step:3781/10000 train_time:273108ms step_avg:72.23ms +[2025-09-02 18:11:26] [Rank 0] step:3781/10000 train_time:273108ms step_avg:72.23ms +[2025-09-02 18:11:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:11:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:11:39] [Rank 0] PRINT: step:3800/10000 val_loss:4.1961 svd_entropy: attn_qk:H=0.7221,top10E=0.31,eRank=148.8,q75/q25=74.21 attn_vo:H=0.7543,top10E=0.19,eRank=223.8,q75/q25=inf mlp_w1:H=0.7264,top10E=0.34,eRank=145.2,q75/q25=9.85 mlp_w2:H=0.8305,top10E=0.16,eRank=254.1,q75/q25=16.97 vo_prod:H=0.6195,top10E=0.29,eRank=87.1,q75/q25=inf train_time:274778ms step_avg:72.31ms +[2025-09-02 18:11:39] [Rank 0] PRINT: step:3800/10000 val_loss:4.1961 svd_entropy: attn_qk:H=0.7221,top10E=0.31,eRank=148.8,q75/q25=74.21 attn_vo:H=0.7543,top10E=0.19,eRank=223.8,q75/q25=inf mlp_w1:H=0.7264,top10E=0.34,eRank=145.2,q75/q25=9.85 mlp_w2:H=0.8305,top10E=0.16,eRank=254.1,q75/q25=16.97 vo_prod:H=0.6195,top10E=0.29,eRank=87.1,q75/q25=inf train_time:274778ms step_avg:72.31ms +[2025-09-02 18:11:39] [Rank 0] step:3801/10000 train_time:274789ms step_avg:72.29ms +[2025-09-02 18:11:39] [Rank 0] step:3801/10000 train_time:274789ms step_avg:72.29ms +[2025-09-02 18:11:41] [Rank 0] step:3821/10000 train_time:276178ms step_avg:72.28ms +[2025-09-02 18:11:41] [Rank 0] step:3821/10000 train_time:276178ms step_avg:72.28ms +[2025-09-02 18:11:42] [Rank 0] step:3841/10000 train_time:277696ms step_avg:72.30ms +[2025-09-02 18:11:42] [Rank 0] step:3841/10000 train_time:277696ms step_avg:72.30ms +[2025-09-02 18:11:44] [Rank 0] step:3861/10000 train_time:279213ms step_avg:72.32ms +[2025-09-02 18:11:44] [Rank 0] step:3861/10000 train_time:279213ms step_avg:72.32ms +[2025-09-02 18:11:45] [Rank 0] step:3881/10000 train_time:280728ms step_avg:72.33ms +[2025-09-02 18:11:45] [Rank 0] step:3881/10000 train_time:280728ms step_avg:72.33ms +[2025-09-02 18:11:47] [Rank 0] step:3901/10000 train_time:282245ms step_avg:72.35ms +[2025-09-02 18:11:47] [Rank 0] step:3901/10000 train_time:282245ms step_avg:72.35ms +[2025-09-02 18:11:48] [Rank 0] step:3921/10000 train_time:283763ms step_avg:72.37ms +[2025-09-02 18:11:48] [Rank 0] step:3921/10000 train_time:283763ms step_avg:72.37ms +[2025-09-02 18:11:50] [Rank 0] step:3941/10000 train_time:285280ms step_avg:72.39ms +[2025-09-02 18:11:50] [Rank 0] step:3941/10000 train_time:285280ms step_avg:72.39ms +[2025-09-02 18:11:51] [Rank 0] step:3961/10000 train_time:286794ms step_avg:72.40ms +[2025-09-02 18:11:51] [Rank 0] step:3961/10000 train_time:286794ms step_avg:72.40ms +[2025-09-02 18:11:53] [Rank 0] step:3981/10000 train_time:288310ms step_avg:72.42ms +[2025-09-02 18:11:53] [Rank 0] step:3981/10000 train_time:288310ms step_avg:72.42ms +[2025-09-02 18:11:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:11:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:12:06] [Rank 0] PRINT: step:4000/10000 val_loss:4.1705 svd_entropy: attn_qk:H=0.7253,top10E=0.30,eRank=151.0,q75/q25=76.31 attn_vo:H=0.7577,top10E=0.19,eRank=227.4,q75/q25=inf mlp_w1:H=0.7326,top10E=0.34,eRank=150.5,q75/q25=10.26 mlp_w2:H=0.8334,top10E=0.16,eRank=259.2,q75/q25=17.28 vo_prod:H=0.6240,top10E=0.28,eRank=89.9,q75/q25=inf train_time:289978ms step_avg:72.49ms +[2025-09-02 18:12:06] [Rank 0] PRINT: step:4000/10000 val_loss:4.1705 svd_entropy: attn_qk:H=0.7253,top10E=0.30,eRank=151.0,q75/q25=76.31 attn_vo:H=0.7577,top10E=0.19,eRank=227.4,q75/q25=inf mlp_w1:H=0.7326,top10E=0.34,eRank=150.5,q75/q25=10.26 mlp_w2:H=0.8334,top10E=0.16,eRank=259.2,q75/q25=17.28 vo_prod:H=0.6240,top10E=0.28,eRank=89.9,q75/q25=inf train_time:289978ms step_avg:72.49ms +[2025-09-02 18:12:06] [Rank 0] step:4001/10000 train_time:289991ms step_avg:72.48ms +[2025-09-02 18:12:06] [Rank 0] step:4001/10000 train_time:289991ms step_avg:72.48ms +[2025-09-02 18:12:08] [Rank 0] step:4021/10000 train_time:291365ms step_avg:72.46ms +[2025-09-02 18:12:08] [Rank 0] step:4021/10000 train_time:291365ms step_avg:72.46ms +[2025-09-02 18:12:09] [Rank 0] step:4041/10000 train_time:292879ms step_avg:72.48ms +[2025-09-02 18:12:09] [Rank 0] step:4041/10000 train_time:292879ms step_avg:72.48ms +[2025-09-02 18:12:11] [Rank 0] step:4061/10000 train_time:294394ms step_avg:72.49ms +[2025-09-02 18:12:11] [Rank 0] step:4061/10000 train_time:294394ms step_avg:72.49ms +[2025-09-02 18:12:12] [Rank 0] step:4081/10000 train_time:296011ms step_avg:72.53ms +[2025-09-02 18:12:12] [Rank 0] step:4081/10000 train_time:296011ms step_avg:72.53ms +[2025-09-02 18:12:14] [Rank 0] step:4101/10000 train_time:297527ms step_avg:72.55ms +[2025-09-02 18:12:14] [Rank 0] step:4101/10000 train_time:297527ms step_avg:72.55ms +[2025-09-02 18:12:15] [Rank 0] step:4121/10000 train_time:299043ms step_avg:72.57ms +[2025-09-02 18:12:15] [Rank 0] step:4121/10000 train_time:299043ms step_avg:72.57ms +[2025-09-02 18:12:17] [Rank 0] step:4141/10000 train_time:300560ms step_avg:72.58ms +[2025-09-02 18:12:17] [Rank 0] step:4141/10000 train_time:300560ms step_avg:72.58ms +[2025-09-02 18:12:18] [Rank 0] step:4161/10000 train_time:302076ms step_avg:72.60ms +[2025-09-02 18:12:18] [Rank 0] step:4161/10000 train_time:302076ms step_avg:72.60ms +[2025-09-02 18:12:20] [Rank 0] step:4181/10000 train_time:303594ms step_avg:72.61ms +[2025-09-02 18:12:20] [Rank 0] step:4181/10000 train_time:303594ms step_avg:72.61ms +[2025-09-02 18:12:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:12:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:12:33] [Rank 0] PRINT: step:4200/10000 val_loss:4.1516 svd_entropy: attn_qk:H=0.7286,top10E=0.30,eRank=153.3,q75/q25=78.30 attn_vo:H=0.7609,top10E=0.18,eRank=230.8,q75/q25=inf mlp_w1:H=0.7383,top10E=0.33,eRank=155.7,q75/q25=10.73 mlp_w2:H=0.8358,top10E=0.16,eRank=263.4,q75/q25=17.64 vo_prod:H=0.6278,top10E=0.27,eRank=92.3,q75/q25=inf train_time:305262ms step_avg:72.68ms +[2025-09-02 18:12:33] [Rank 0] PRINT: step:4200/10000 val_loss:4.1516 svd_entropy: attn_qk:H=0.7286,top10E=0.30,eRank=153.3,q75/q25=78.30 attn_vo:H=0.7609,top10E=0.18,eRank=230.8,q75/q25=inf mlp_w1:H=0.7383,top10E=0.33,eRank=155.7,q75/q25=10.73 mlp_w2:H=0.8358,top10E=0.16,eRank=263.4,q75/q25=17.64 vo_prod:H=0.6278,top10E=0.27,eRank=92.3,q75/q25=inf train_time:305262ms step_avg:72.68ms +[2025-09-02 18:12:33] [Rank 0] step:4201/10000 train_time:305275ms step_avg:72.67ms +[2025-09-02 18:12:33] [Rank 0] step:4201/10000 train_time:305275ms step_avg:72.67ms +[2025-09-02 18:12:35] [Rank 0] step:4221/10000 train_time:306657ms step_avg:72.65ms +[2025-09-02 18:12:35] [Rank 0] step:4221/10000 train_time:306657ms step_avg:72.65ms +[2025-09-02 18:12:36] [Rank 0] step:4241/10000 train_time:308173ms step_avg:72.67ms +[2025-09-02 18:12:36] [Rank 0] step:4241/10000 train_time:308173ms step_avg:72.67ms +[2025-09-02 18:12:38] [Rank 0] step:4261/10000 train_time:309689ms step_avg:72.68ms +[2025-09-02 18:12:38] [Rank 0] step:4261/10000 train_time:309689ms step_avg:72.68ms +[2025-09-02 18:12:39] [Rank 0] step:4281/10000 train_time:311204ms step_avg:72.69ms +[2025-09-02 18:12:39] [Rank 0] step:4281/10000 train_time:311204ms step_avg:72.69ms +[2025-09-02 18:12:41] [Rank 0] step:4301/10000 train_time:312719ms step_avg:72.71ms +[2025-09-02 18:12:41] [Rank 0] step:4301/10000 train_time:312719ms step_avg:72.71ms +[2025-09-02 18:12:43] [Rank 0] step:4321/10000 train_time:314236ms step_avg:72.72ms +[2025-09-02 18:12:43] [Rank 0] step:4321/10000 train_time:314236ms step_avg:72.72ms +[2025-09-02 18:12:44] [Rank 0] step:4341/10000 train_time:315750ms step_avg:72.74ms +[2025-09-02 18:12:44] [Rank 0] step:4341/10000 train_time:315750ms step_avg:72.74ms +[2025-09-02 18:12:46] [Rank 0] step:4361/10000 train_time:317266ms step_avg:72.75ms +[2025-09-02 18:12:46] [Rank 0] step:4361/10000 train_time:317266ms step_avg:72.75ms +[2025-09-02 18:12:47] [Rank 0] step:4381/10000 train_time:318781ms step_avg:72.76ms +[2025-09-02 18:12:47] [Rank 0] step:4381/10000 train_time:318781ms step_avg:72.76ms +[2025-09-02 18:12:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:12:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:13:00] [Rank 0] PRINT: step:4400/10000 val_loss:4.1302 svd_entropy: attn_qk:H=0.7315,top10E=0.29,eRank=155.4,q75/q25=79.67 attn_vo:H=0.7639,top10E=0.18,eRank=234.0,q75/q25=inf mlp_w1:H=0.7437,top10E=0.32,eRank=160.6,q75/q25=11.13 mlp_w2:H=0.8381,top10E=0.15,eRank=267.5,q75/q25=17.87 vo_prod:H=0.6314,top10E=0.27,eRank=94.7,q75/q25=inf train_time:320450ms step_avg:72.83ms +[2025-09-02 18:13:00] [Rank 0] PRINT: step:4400/10000 val_loss:4.1302 svd_entropy: attn_qk:H=0.7315,top10E=0.29,eRank=155.4,q75/q25=79.67 attn_vo:H=0.7639,top10E=0.18,eRank=234.0,q75/q25=inf mlp_w1:H=0.7437,top10E=0.32,eRank=160.6,q75/q25=11.13 mlp_w2:H=0.8381,top10E=0.15,eRank=267.5,q75/q25=17.87 vo_prod:H=0.6314,top10E=0.27,eRank=94.7,q75/q25=inf train_time:320450ms step_avg:72.83ms +[2025-09-02 18:13:00] [Rank 0] step:4401/10000 train_time:320463ms step_avg:72.82ms +[2025-09-02 18:13:00] [Rank 0] step:4401/10000 train_time:320463ms step_avg:72.82ms +[2025-09-02 18:13:02] [Rank 0] step:4421/10000 train_time:321838ms step_avg:72.80ms +[2025-09-02 18:13:02] [Rank 0] step:4421/10000 train_time:321838ms step_avg:72.80ms +[2025-09-02 18:13:04] [Rank 0] step:4441/10000 train_time:323351ms step_avg:72.81ms +[2025-09-02 18:13:04] [Rank 0] step:4441/10000 train_time:323351ms step_avg:72.81ms +[2025-09-02 18:13:05] [Rank 0] step:4461/10000 train_time:324872ms step_avg:72.82ms +[2025-09-02 18:13:05] [Rank 0] step:4461/10000 train_time:324872ms step_avg:72.82ms +[2025-09-02 18:13:07] [Rank 0] step:4481/10000 train_time:326393ms step_avg:72.84ms +[2025-09-02 18:13:07] [Rank 0] step:4481/10000 train_time:326393ms step_avg:72.84ms +[2025-09-02 18:13:08] [Rank 0] step:4501/10000 train_time:327914ms step_avg:72.85ms +[2025-09-02 18:13:08] [Rank 0] step:4501/10000 train_time:327914ms step_avg:72.85ms +[2025-09-02 18:13:10] [Rank 0] step:4521/10000 train_time:329509ms step_avg:72.88ms +[2025-09-02 18:13:10] [Rank 0] step:4521/10000 train_time:329509ms step_avg:72.88ms +[2025-09-02 18:13:11] [Rank 0] step:4541/10000 train_time:331031ms step_avg:72.90ms +[2025-09-02 18:13:11] [Rank 0] step:4541/10000 train_time:331031ms step_avg:72.90ms +[2025-09-02 18:13:13] [Rank 0] step:4561/10000 train_time:332555ms step_avg:72.91ms +[2025-09-02 18:13:13] [Rank 0] step:4561/10000 train_time:332555ms step_avg:72.91ms +[2025-09-02 18:13:14] [Rank 0] step:4581/10000 train_time:334079ms step_avg:72.93ms +[2025-09-02 18:13:14] [Rank 0] step:4581/10000 train_time:334079ms step_avg:72.93ms +[2025-09-02 18:13:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:13:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:13:28] [Rank 0] PRINT: step:4600/10000 val_loss:4.0950 svd_entropy: attn_qk:H=0.7344,top10E=0.29,eRank=157.5,q75/q25=81.34 attn_vo:H=0.7668,top10E=0.17,eRank=237.2,q75/q25=inf mlp_w1:H=0.7488,top10E=0.31,eRank=165.5,q75/q25=11.48 mlp_w2:H=0.8404,top10E=0.15,eRank=271.6,q75/q25=18.18 vo_prod:H=0.6351,top10E=0.26,eRank=97.2,q75/q25=inf train_time:335755ms step_avg:72.99ms +[2025-09-02 18:13:28] [Rank 0] PRINT: step:4600/10000 val_loss:4.0950 svd_entropy: attn_qk:H=0.7344,top10E=0.29,eRank=157.5,q75/q25=81.34 attn_vo:H=0.7668,top10E=0.17,eRank=237.2,q75/q25=inf mlp_w1:H=0.7488,top10E=0.31,eRank=165.5,q75/q25=11.48 mlp_w2:H=0.8404,top10E=0.15,eRank=271.6,q75/q25=18.18 vo_prod:H=0.6351,top10E=0.26,eRank=97.2,q75/q25=inf train_time:335755ms step_avg:72.99ms +[2025-09-02 18:13:28] [Rank 0] step:4601/10000 train_time:335768ms step_avg:72.98ms +[2025-09-02 18:13:28] [Rank 0] step:4601/10000 train_time:335768ms step_avg:72.98ms +[2025-09-02 18:13:29] [Rank 0] step:4621/10000 train_time:337148ms step_avg:72.96ms +[2025-09-02 18:13:29] [Rank 0] step:4621/10000 train_time:337148ms step_avg:72.96ms +[2025-09-02 18:13:31] [Rank 0] step:4641/10000 train_time:338667ms step_avg:72.97ms +[2025-09-02 18:13:31] [Rank 0] step:4641/10000 train_time:338667ms step_avg:72.97ms +[2025-09-02 18:13:32] [Rank 0] step:4661/10000 train_time:340187ms step_avg:72.99ms +[2025-09-02 18:13:32] [Rank 0] step:4661/10000 train_time:340187ms step_avg:72.99ms +[2025-09-02 18:13:34] [Rank 0] step:4681/10000 train_time:341708ms step_avg:73.00ms +[2025-09-02 18:13:34] [Rank 0] step:4681/10000 train_time:341708ms step_avg:73.00ms +[2025-09-02 18:13:35] [Rank 0] step:4701/10000 train_time:343231ms step_avg:73.01ms +[2025-09-02 18:13:35] [Rank 0] step:4701/10000 train_time:343231ms step_avg:73.01ms +[2025-09-02 18:13:37] [Rank 0] step:4721/10000 train_time:344753ms step_avg:73.03ms +[2025-09-02 18:13:37] [Rank 0] step:4721/10000 train_time:344753ms step_avg:73.03ms +[2025-09-02 18:13:38] [Rank 0] step:4741/10000 train_time:346274ms step_avg:73.04ms +[2025-09-02 18:13:38] [Rank 0] step:4741/10000 train_time:346274ms step_avg:73.04ms +[2025-09-02 18:13:40] [Rank 0] step:4761/10000 train_time:347795ms step_avg:73.05ms +[2025-09-02 18:13:40] [Rank 0] step:4761/10000 train_time:347795ms step_avg:73.05ms +[2025-09-02 18:13:41] [Rank 0] step:4781/10000 train_time:349316ms step_avg:73.06ms +[2025-09-02 18:13:41] [Rank 0] step:4781/10000 train_time:349316ms step_avg:73.06ms +[2025-09-02 18:13:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:13:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:13:55] [Rank 0] PRINT: step:4800/10000 val_loss:4.0823 svd_entropy: attn_qk:H=0.7372,top10E=0.29,eRank=159.6,q75/q25=82.82 attn_vo:H=0.7694,top10E=0.17,eRank=240.3,q75/q25=inf mlp_w1:H=0.7533,top10E=0.31,eRank=169.9,q75/q25=11.88 mlp_w2:H=0.8423,top10E=0.15,eRank=275.1,q75/q25=18.47 vo_prod:H=0.6384,top10E=0.26,eRank=99.5,q75/q25=inf train_time:350992ms step_avg:73.12ms +[2025-09-02 18:13:55] [Rank 0] PRINT: step:4800/10000 val_loss:4.0823 svd_entropy: attn_qk:H=0.7372,top10E=0.29,eRank=159.6,q75/q25=82.82 attn_vo:H=0.7694,top10E=0.17,eRank=240.3,q75/q25=inf mlp_w1:H=0.7533,top10E=0.31,eRank=169.9,q75/q25=11.88 mlp_w2:H=0.8423,top10E=0.15,eRank=275.1,q75/q25=18.47 vo_prod:H=0.6384,top10E=0.26,eRank=99.5,q75/q25=inf train_time:350992ms step_avg:73.12ms +[2025-09-02 18:13:55] [Rank 0] step:4801/10000 train_time:351004ms step_avg:73.11ms +[2025-09-02 18:13:55] [Rank 0] step:4801/10000 train_time:351004ms step_avg:73.11ms +[2025-09-02 18:13:56] [Rank 0] step:4821/10000 train_time:352387ms step_avg:73.09ms +[2025-09-02 18:13:56] [Rank 0] step:4821/10000 train_time:352387ms step_avg:73.09ms +[2025-09-02 18:13:58] [Rank 0] step:4841/10000 train_time:353906ms step_avg:73.11ms +[2025-09-02 18:13:58] [Rank 0] step:4841/10000 train_time:353906ms step_avg:73.11ms +[2025-09-02 18:13:59] [Rank 0] step:4861/10000 train_time:355429ms step_avg:73.12ms +[2025-09-02 18:13:59] [Rank 0] step:4861/10000 train_time:355429ms step_avg:73.12ms +[2025-09-02 18:14:01] [Rank 0] step:4881/10000 train_time:356949ms step_avg:73.13ms +[2025-09-02 18:14:01] [Rank 0] step:4881/10000 train_time:356949ms step_avg:73.13ms +[2025-09-02 18:14:02] [Rank 0] step:4901/10000 train_time:358470ms step_avg:73.14ms +[2025-09-02 18:14:02] [Rank 0] step:4901/10000 train_time:358470ms step_avg:73.14ms +[2025-09-02 18:14:04] [Rank 0] step:4921/10000 train_time:359995ms step_avg:73.15ms +[2025-09-02 18:14:04] [Rank 0] step:4921/10000 train_time:359995ms step_avg:73.15ms +[2025-09-02 18:14:05] [Rank 0] step:4941/10000 train_time:361519ms step_avg:73.17ms +[2025-09-02 18:14:05] [Rank 0] step:4941/10000 train_time:361519ms step_avg:73.17ms +[2025-09-02 18:14:07] [Rank 0] step:4961/10000 train_time:363042ms step_avg:73.18ms +[2025-09-02 18:14:07] [Rank 0] step:4961/10000 train_time:363042ms step_avg:73.18ms +[2025-09-02 18:14:08] [Rank 0] step:4981/10000 train_time:364567ms step_avg:73.19ms +[2025-09-02 18:14:08] [Rank 0] step:4981/10000 train_time:364567ms step_avg:73.19ms +[2025-09-02 18:14:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:14:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:14:22] [Rank 0] PRINT: step:5000/10000 val_loss:4.0604 svd_entropy: attn_qk:H=0.7397,top10E=0.28,eRank=161.6,q75/q25=83.63 attn_vo:H=0.7719,top10E=0.17,eRank=243.2,q75/q25=inf mlp_w1:H=0.7578,top10E=0.30,eRank=174.5,q75/q25=12.27 mlp_w2:H=0.8441,top10E=0.15,eRank=278.5,q75/q25=18.65 vo_prod:H=0.6416,top10E=0.25,eRank=101.8,q75/q25=inf train_time:366243ms step_avg:73.25ms +[2025-09-02 18:14:22] [Rank 0] PRINT: step:5000/10000 val_loss:4.0604 svd_entropy: attn_qk:H=0.7397,top10E=0.28,eRank=161.6,q75/q25=83.63 attn_vo:H=0.7719,top10E=0.17,eRank=243.2,q75/q25=inf mlp_w1:H=0.7578,top10E=0.30,eRank=174.5,q75/q25=12.27 mlp_w2:H=0.8441,top10E=0.15,eRank=278.5,q75/q25=18.65 vo_prod:H=0.6416,top10E=0.25,eRank=101.8,q75/q25=inf train_time:366243ms step_avg:73.25ms +[2025-09-02 18:14:22] [Rank 0] step:5001/10000 train_time:366255ms step_avg:73.24ms +[2025-09-02 18:14:22] [Rank 0] step:5001/10000 train_time:366255ms step_avg:73.24ms +[2025-09-02 18:14:23] [Rank 0] step:5021/10000 train_time:367650ms step_avg:73.22ms +[2025-09-02 18:14:23] [Rank 0] step:5021/10000 train_time:367650ms step_avg:73.22ms +[2025-09-02 18:14:25] [Rank 0] step:5041/10000 train_time:369173ms step_avg:73.23ms +[2025-09-02 18:14:25] [Rank 0] step:5041/10000 train_time:369173ms step_avg:73.23ms +[2025-09-02 18:14:27] [Rank 0] step:5061/10000 train_time:370693ms step_avg:73.25ms +[2025-09-02 18:14:27] [Rank 0] step:5061/10000 train_time:370693ms step_avg:73.25ms +[2025-09-02 18:14:28] [Rank 0] step:5081/10000 train_time:372215ms step_avg:73.26ms +[2025-09-02 18:14:28] [Rank 0] step:5081/10000 train_time:372215ms step_avg:73.26ms +[2025-09-02 18:14:30] [Rank 0] step:5101/10000 train_time:373738ms step_avg:73.27ms +[2025-09-02 18:14:30] [Rank 0] step:5101/10000 train_time:373738ms step_avg:73.27ms +[2025-09-02 18:14:31] [Rank 0] step:5121/10000 train_time:375260ms step_avg:73.28ms +[2025-09-02 18:14:31] [Rank 0] step:5121/10000 train_time:375260ms step_avg:73.28ms +[2025-09-02 18:14:33] [Rank 0] step:5141/10000 train_time:376787ms step_avg:73.29ms +[2025-09-02 18:14:33] [Rank 0] step:5141/10000 train_time:376787ms step_avg:73.29ms +[2025-09-02 18:14:34] [Rank 0] step:5161/10000 train_time:378309ms step_avg:73.30ms +[2025-09-02 18:14:34] [Rank 0] step:5161/10000 train_time:378309ms step_avg:73.30ms +[2025-09-02 18:14:36] [Rank 0] step:5181/10000 train_time:379834ms step_avg:73.31ms +[2025-09-02 18:14:36] [Rank 0] step:5181/10000 train_time:379834ms step_avg:73.31ms +[2025-09-02 18:14:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:14:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:14:49] [Rank 0] PRINT: step:5200/10000 val_loss:4.0410 svd_entropy: attn_qk:H=0.7422,top10E=0.28,eRank=163.5,q75/q25=84.62 attn_vo:H=0.7743,top10E=0.17,eRank=245.9,q75/q25=inf mlp_w1:H=0.7619,top10E=0.30,eRank=178.8,q75/q25=12.63 mlp_w2:H=0.8457,top10E=0.15,eRank=281.5,q75/q25=18.88 vo_prod:H=0.6445,top10E=0.25,eRank=103.9,q75/q25=inf train_time:381538ms step_avg:73.37ms +[2025-09-02 18:14:49] [Rank 0] PRINT: step:5200/10000 val_loss:4.0410 svd_entropy: attn_qk:H=0.7422,top10E=0.28,eRank=163.5,q75/q25=84.62 attn_vo:H=0.7743,top10E=0.17,eRank=245.9,q75/q25=inf mlp_w1:H=0.7619,top10E=0.30,eRank=178.8,q75/q25=12.63 mlp_w2:H=0.8457,top10E=0.15,eRank=281.5,q75/q25=18.88 vo_prod:H=0.6445,top10E=0.25,eRank=103.9,q75/q25=inf train_time:381538ms step_avg:73.37ms +[2025-09-02 18:14:49] [Rank 0] step:5201/10000 train_time:381551ms step_avg:73.36ms +[2025-09-02 18:14:49] [Rank 0] step:5201/10000 train_time:381551ms step_avg:73.36ms +[2025-09-02 18:14:51] [Rank 0] step:5221/10000 train_time:382969ms step_avg:73.35ms +[2025-09-02 18:14:51] [Rank 0] step:5221/10000 train_time:382969ms step_avg:73.35ms +[2025-09-02 18:14:52] [Rank 0] step:5241/10000 train_time:384523ms step_avg:73.37ms +[2025-09-02 18:14:52] [Rank 0] step:5241/10000 train_time:384523ms step_avg:73.37ms +[2025-09-02 18:14:54] [Rank 0] step:5261/10000 train_time:386076ms step_avg:73.38ms +[2025-09-02 18:14:54] [Rank 0] step:5261/10000 train_time:386076ms step_avg:73.38ms +[2025-09-02 18:14:55] [Rank 0] step:5281/10000 train_time:387632ms step_avg:73.40ms +[2025-09-02 18:14:55] [Rank 0] step:5281/10000 train_time:387632ms step_avg:73.40ms +[2025-09-02 18:14:57] [Rank 0] step:5301/10000 train_time:389196ms step_avg:73.42ms +[2025-09-02 18:14:57] [Rank 0] step:5301/10000 train_time:389196ms step_avg:73.42ms +[2025-09-02 18:14:59] [Rank 0] step:5321/10000 train_time:390753ms step_avg:73.44ms +[2025-09-02 18:14:59] [Rank 0] step:5321/10000 train_time:390753ms step_avg:73.44ms +[2025-09-02 18:15:00] [Rank 0] step:5341/10000 train_time:392309ms step_avg:73.45ms +[2025-09-02 18:15:00] [Rank 0] step:5341/10000 train_time:392309ms step_avg:73.45ms +[2025-09-02 18:15:02] [Rank 0] step:5361/10000 train_time:393872ms step_avg:73.47ms +[2025-09-02 18:15:02] [Rank 0] step:5361/10000 train_time:393872ms step_avg:73.47ms +[2025-09-02 18:15:03] [Rank 0] step:5381/10000 train_time:395432ms step_avg:73.49ms +[2025-09-02 18:15:03] [Rank 0] step:5381/10000 train_time:395432ms step_avg:73.49ms +[2025-09-02 18:15:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:15:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:15:17] [Rank 0] PRINT: step:5400/10000 val_loss:4.0233 svd_entropy: attn_qk:H=0.7444,top10E=0.28,eRank=165.3,q75/q25=85.18 attn_vo:H=0.7764,top10E=0.16,eRank=248.5,q75/q25=inf mlp_w1:H=0.7657,top10E=0.29,eRank=183.0,q75/q25=13.03 mlp_w2:H=0.8472,top10E=0.14,eRank=284.3,q75/q25=19.21 vo_prod:H=0.6473,top10E=0.25,eRank=106.0,q75/q25=inf train_time:397146ms step_avg:73.55ms +[2025-09-02 18:15:17] [Rank 0] PRINT: step:5400/10000 val_loss:4.0233 svd_entropy: attn_qk:H=0.7444,top10E=0.28,eRank=165.3,q75/q25=85.18 attn_vo:H=0.7764,top10E=0.16,eRank=248.5,q75/q25=inf mlp_w1:H=0.7657,top10E=0.29,eRank=183.0,q75/q25=13.03 mlp_w2:H=0.8472,top10E=0.14,eRank=284.3,q75/q25=19.21 vo_prod:H=0.6473,top10E=0.25,eRank=106.0,q75/q25=inf train_time:397146ms step_avg:73.55ms +[2025-09-02 18:15:17] [Rank 0] step:5401/10000 train_time:397159ms step_avg:73.53ms +[2025-09-02 18:15:17] [Rank 0] step:5401/10000 train_time:397159ms step_avg:73.53ms +[2025-09-02 18:15:18] [Rank 0] step:5421/10000 train_time:398567ms step_avg:73.52ms +[2025-09-02 18:15:18] [Rank 0] step:5421/10000 train_time:398567ms step_avg:73.52ms +[2025-09-02 18:15:20] [Rank 0] step:5441/10000 train_time:400119ms step_avg:73.54ms +[2025-09-02 18:15:20] [Rank 0] step:5441/10000 train_time:400119ms step_avg:73.54ms +[2025-09-02 18:15:21] [Rank 0] step:5461/10000 train_time:401674ms step_avg:73.55ms +[2025-09-02 18:15:21] [Rank 0] step:5461/10000 train_time:401674ms step_avg:73.55ms +[2025-09-02 18:15:23] [Rank 0] step:5481/10000 train_time:403231ms step_avg:73.57ms +[2025-09-02 18:15:23] [Rank 0] step:5481/10000 train_time:403231ms step_avg:73.57ms +[2025-09-02 18:15:24] [Rank 0] step:5501/10000 train_time:404791ms step_avg:73.59ms +[2025-09-02 18:15:24] [Rank 0] step:5501/10000 train_time:404791ms step_avg:73.59ms +[2025-09-02 18:15:26] [Rank 0] step:5521/10000 train_time:406352ms step_avg:73.60ms +[2025-09-02 18:15:26] [Rank 0] step:5521/10000 train_time:406352ms step_avg:73.60ms +[2025-09-02 18:15:28] [Rank 0] step:5541/10000 train_time:407909ms step_avg:73.62ms +[2025-09-02 18:15:28] [Rank 0] step:5541/10000 train_time:407909ms step_avg:73.62ms +[2025-09-02 18:15:29] [Rank 0] step:5561/10000 train_time:409463ms step_avg:73.63ms +[2025-09-02 18:15:29] [Rank 0] step:5561/10000 train_time:409463ms step_avg:73.63ms +[2025-09-02 18:15:31] [Rank 0] step:5581/10000 train_time:411019ms step_avg:73.65ms +[2025-09-02 18:15:31] [Rank 0] step:5581/10000 train_time:411019ms step_avg:73.65ms +[2025-09-02 18:15:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:15:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:15:44] [Rank 0] PRINT: step:5600/10000 val_loss:4.0097 svd_entropy: attn_qk:H=0.7467,top10E=0.27,eRank=167.2,q75/q25=86.01 attn_vo:H=0.7785,top10E=0.16,eRank=251.0,q75/q25=inf mlp_w1:H=0.7693,top10E=0.28,eRank=186.9,q75/q25=13.43 mlp_w2:H=0.8486,top10E=0.14,eRank=286.9,q75/q25=19.46 vo_prod:H=0.6499,top10E=0.24,eRank=108.0,q75/q25=inf train_time:412732ms step_avg:73.70ms +[2025-09-02 18:15:44] [Rank 0] PRINT: step:5600/10000 val_loss:4.0097 svd_entropy: attn_qk:H=0.7467,top10E=0.27,eRank=167.2,q75/q25=86.01 attn_vo:H=0.7785,top10E=0.16,eRank=251.0,q75/q25=inf mlp_w1:H=0.7693,top10E=0.28,eRank=186.9,q75/q25=13.43 mlp_w2:H=0.8486,top10E=0.14,eRank=286.9,q75/q25=19.46 vo_prod:H=0.6499,top10E=0.24,eRank=108.0,q75/q25=inf train_time:412732ms step_avg:73.70ms +[2025-09-02 18:15:44] [Rank 0] step:5601/10000 train_time:412744ms step_avg:73.69ms +[2025-09-02 18:15:44] [Rank 0] step:5601/10000 train_time:412744ms step_avg:73.69ms +[2025-09-02 18:15:46] [Rank 0] step:5621/10000 train_time:414175ms step_avg:73.68ms +[2025-09-02 18:15:46] [Rank 0] step:5621/10000 train_time:414175ms step_avg:73.68ms +[2025-09-02 18:15:47] [Rank 0] step:5641/10000 train_time:415728ms step_avg:73.70ms +[2025-09-02 18:15:47] [Rank 0] step:5641/10000 train_time:415728ms step_avg:73.70ms +[2025-09-02 18:15:49] [Rank 0] step:5661/10000 train_time:417282ms step_avg:73.71ms +[2025-09-02 18:15:49] [Rank 0] step:5661/10000 train_time:417282ms step_avg:73.71ms +[2025-09-02 18:15:50] [Rank 0] step:5681/10000 train_time:418840ms step_avg:73.73ms +[2025-09-02 18:15:50] [Rank 0] step:5681/10000 train_time:418840ms step_avg:73.73ms +[2025-09-02 18:15:52] [Rank 0] step:5701/10000 train_time:420394ms step_avg:73.74ms +[2025-09-02 18:15:52] [Rank 0] step:5701/10000 train_time:420394ms step_avg:73.74ms +[2025-09-02 18:15:54] [Rank 0] step:5721/10000 train_time:421953ms step_avg:73.76ms +[2025-09-02 18:15:54] [Rank 0] step:5721/10000 train_time:421953ms step_avg:73.76ms +[2025-09-02 18:15:55] [Rank 0] step:5741/10000 train_time:423508ms step_avg:73.77ms +[2025-09-02 18:15:55] [Rank 0] step:5741/10000 train_time:423508ms step_avg:73.77ms +[2025-09-02 18:15:57] [Rank 0] step:5761/10000 train_time:425062ms step_avg:73.78ms +[2025-09-02 18:15:57] [Rank 0] step:5761/10000 train_time:425062ms step_avg:73.78ms +[2025-09-02 18:15:58] [Rank 0] step:5781/10000 train_time:426620ms step_avg:73.80ms +[2025-09-02 18:15:58] [Rank 0] step:5781/10000 train_time:426620ms step_avg:73.80ms +[2025-09-02 18:16:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:16:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:16:12] [Rank 0] PRINT: step:5800/10000 val_loss:3.9986 svd_entropy: attn_qk:H=0.7488,top10E=0.27,eRank=168.9,q75/q25=86.34 attn_vo:H=0.7804,top10E=0.16,eRank=253.3,q75/q25=inf mlp_w1:H=0.7727,top10E=0.28,eRank=190.8,q75/q25=13.81 mlp_w2:H=0.8499,top10E=0.14,eRank=289.3,q75/q25=19.67 vo_prod:H=0.6522,top10E=0.24,eRank=109.8,q75/q25=inf train_time:428336ms step_avg:73.85ms +[2025-09-02 18:16:12] [Rank 0] PRINT: step:5800/10000 val_loss:3.9986 svd_entropy: attn_qk:H=0.7488,top10E=0.27,eRank=168.9,q75/q25=86.34 attn_vo:H=0.7804,top10E=0.16,eRank=253.3,q75/q25=inf mlp_w1:H=0.7727,top10E=0.28,eRank=190.8,q75/q25=13.81 mlp_w2:H=0.8499,top10E=0.14,eRank=289.3,q75/q25=19.67 vo_prod:H=0.6522,top10E=0.24,eRank=109.8,q75/q25=inf train_time:428336ms step_avg:73.85ms +[2025-09-02 18:16:12] [Rank 0] step:5801/10000 train_time:428348ms step_avg:73.84ms +[2025-09-02 18:16:12] [Rank 0] step:5801/10000 train_time:428348ms step_avg:73.84ms +[2025-09-02 18:16:13] [Rank 0] step:5821/10000 train_time:429752ms step_avg:73.83ms +[2025-09-02 18:16:13] [Rank 0] step:5821/10000 train_time:429752ms step_avg:73.83ms +[2025-09-02 18:16:15] [Rank 0] step:5841/10000 train_time:431305ms step_avg:73.84ms +[2025-09-02 18:16:15] [Rank 0] step:5841/10000 train_time:431305ms step_avg:73.84ms +[2025-09-02 18:16:16] [Rank 0] step:5861/10000 train_time:432864ms step_avg:73.85ms +[2025-09-02 18:16:16] [Rank 0] step:5861/10000 train_time:432864ms step_avg:73.85ms +[2025-09-02 18:16:18] [Rank 0] step:5881/10000 train_time:434420ms step_avg:73.87ms +[2025-09-02 18:16:18] [Rank 0] step:5881/10000 train_time:434420ms step_avg:73.87ms +[2025-09-02 18:16:20] [Rank 0] step:5901/10000 train_time:435975ms step_avg:73.88ms +[2025-09-02 18:16:20] [Rank 0] step:5901/10000 train_time:435975ms step_avg:73.88ms +[2025-09-02 18:16:21] [Rank 0] step:5921/10000 train_time:437530ms step_avg:73.89ms +[2025-09-02 18:16:21] [Rank 0] step:5921/10000 train_time:437530ms step_avg:73.89ms +[2025-09-02 18:16:23] [Rank 0] step:5941/10000 train_time:439089ms step_avg:73.91ms +[2025-09-02 18:16:23] [Rank 0] step:5941/10000 train_time:439089ms step_avg:73.91ms +[2025-09-02 18:16:24] [Rank 0] step:5961/10000 train_time:440649ms step_avg:73.92ms +[2025-09-02 18:16:24] [Rank 0] step:5961/10000 train_time:440649ms step_avg:73.92ms +[2025-09-02 18:16:26] [Rank 0] step:5981/10000 train_time:442209ms step_avg:73.94ms +[2025-09-02 18:16:26] [Rank 0] step:5981/10000 train_time:442209ms step_avg:73.94ms +[2025-09-02 18:16:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:16:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:16:39] [Rank 0] PRINT: step:6000/10000 val_loss:3.9742 svd_entropy: attn_qk:H=0.7509,top10E=0.27,eRank=170.6,q75/q25=86.89 attn_vo:H=0.7822,top10E=0.16,eRank=255.6,q75/q25=inf mlp_w1:H=0.7759,top10E=0.28,eRank=194.5,q75/q25=14.15 mlp_w2:H=0.8512,top10E=0.14,eRank=291.9,q75/q25=20.01 vo_prod:H=0.6546,top10E=0.24,eRank=111.6,q75/q25=inf train_time:443922ms step_avg:73.99ms +[2025-09-02 18:16:39] [Rank 0] PRINT: step:6000/10000 val_loss:3.9742 svd_entropy: attn_qk:H=0.7509,top10E=0.27,eRank=170.6,q75/q25=86.89 attn_vo:H=0.7822,top10E=0.16,eRank=255.6,q75/q25=inf mlp_w1:H=0.7759,top10E=0.28,eRank=194.5,q75/q25=14.15 mlp_w2:H=0.8512,top10E=0.14,eRank=291.9,q75/q25=20.01 vo_prod:H=0.6546,top10E=0.24,eRank=111.6,q75/q25=inf train_time:443922ms step_avg:73.99ms +[2025-09-02 18:16:39] [Rank 0] step:6001/10000 train_time:443935ms step_avg:73.98ms +[2025-09-02 18:16:39] [Rank 0] step:6001/10000 train_time:443935ms step_avg:73.98ms +[2025-09-02 18:16:41] [Rank 0] step:6021/10000 train_time:445359ms step_avg:73.97ms +[2025-09-02 18:16:41] [Rank 0] step:6021/10000 train_time:445359ms step_avg:73.97ms +[2025-09-02 18:16:42] [Rank 0] step:6041/10000 train_time:446917ms step_avg:73.98ms +[2025-09-02 18:16:42] [Rank 0] step:6041/10000 train_time:446917ms step_avg:73.98ms +[2025-09-02 18:16:44] [Rank 0] step:6061/10000 train_time:448480ms step_avg:73.99ms +[2025-09-02 18:16:44] [Rank 0] step:6061/10000 train_time:448480ms step_avg:73.99ms +[2025-09-02 18:16:46] [Rank 0] step:6081/10000 train_time:450042ms step_avg:74.01ms +[2025-09-02 18:16:46] [Rank 0] step:6081/10000 train_time:450042ms step_avg:74.01ms +[2025-09-02 18:16:47] [Rank 0] step:6101/10000 train_time:451604ms step_avg:74.02ms +[2025-09-02 18:16:47] [Rank 0] step:6101/10000 train_time:451604ms step_avg:74.02ms +[2025-09-02 18:16:49] [Rank 0] step:6121/10000 train_time:453434ms step_avg:74.08ms +[2025-09-02 18:16:49] [Rank 0] step:6121/10000 train_time:453434ms step_avg:74.08ms +[2025-09-02 18:16:51] [Rank 0] step:6141/10000 train_time:455003ms step_avg:74.09ms +[2025-09-02 18:16:51] [Rank 0] step:6141/10000 train_time:455003ms step_avg:74.09ms +[2025-09-02 18:16:52] [Rank 0] step:6161/10000 train_time:456569ms step_avg:74.11ms +[2025-09-02 18:16:52] [Rank 0] step:6161/10000 train_time:456569ms step_avg:74.11ms +[2025-09-02 18:16:54] [Rank 0] step:6181/10000 train_time:458129ms step_avg:74.12ms +[2025-09-02 18:16:54] [Rank 0] step:6181/10000 train_time:458129ms step_avg:74.12ms +[2025-09-02 18:16:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:16:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:17:07] [Rank 0] PRINT: step:6200/10000 val_loss:3.9594 svd_entropy: attn_qk:H=0.7528,top10E=0.26,eRank=172.3,q75/q25=87.42 attn_vo:H=0.7840,top10E=0.15,eRank=257.8,q75/q25=inf mlp_w1:H=0.7788,top10E=0.27,eRank=197.8,q75/q25=14.55 mlp_w2:H=0.8523,top10E=0.14,eRank=294.2,q75/q25=20.24 vo_prod:H=0.6569,top10E=0.24,eRank=113.4,q75/q25=inf train_time:459846ms step_avg:74.17ms +[2025-09-02 18:17:07] [Rank 0] PRINT: step:6200/10000 val_loss:3.9594 svd_entropy: attn_qk:H=0.7528,top10E=0.26,eRank=172.3,q75/q25=87.42 attn_vo:H=0.7840,top10E=0.15,eRank=257.8,q75/q25=inf mlp_w1:H=0.7788,top10E=0.27,eRank=197.8,q75/q25=14.55 mlp_w2:H=0.8523,top10E=0.14,eRank=294.2,q75/q25=20.24 vo_prod:H=0.6569,top10E=0.24,eRank=113.4,q75/q25=inf train_time:459846ms step_avg:74.17ms +[2025-09-02 18:17:07] [Rank 0] step:6201/10000 train_time:459860ms step_avg:74.16ms +[2025-09-02 18:17:07] [Rank 0] step:6201/10000 train_time:459860ms step_avg:74.16ms +[2025-09-02 18:17:09] [Rank 0] step:6221/10000 train_time:461267ms step_avg:74.15ms +[2025-09-02 18:17:09] [Rank 0] step:6221/10000 train_time:461267ms step_avg:74.15ms +[2025-09-02 18:17:10] [Rank 0] step:6241/10000 train_time:462823ms step_avg:74.16ms +[2025-09-02 18:17:10] [Rank 0] step:6241/10000 train_time:462823ms step_avg:74.16ms +[2025-09-02 18:17:12] [Rank 0] step:6261/10000 train_time:464383ms step_avg:74.17ms +[2025-09-02 18:17:12] [Rank 0] step:6261/10000 train_time:464383ms step_avg:74.17ms +[2025-09-02 18:17:13] [Rank 0] step:6281/10000 train_time:465948ms step_avg:74.18ms +[2025-09-02 18:17:13] [Rank 0] step:6281/10000 train_time:465948ms step_avg:74.18ms +[2025-09-02 18:17:15] [Rank 0] step:6301/10000 train_time:467511ms step_avg:74.20ms +[2025-09-02 18:17:15] [Rank 0] step:6301/10000 train_time:467511ms step_avg:74.20ms +[2025-09-02 18:17:16] [Rank 0] step:6321/10000 train_time:469068ms step_avg:74.21ms +[2025-09-02 18:17:16] [Rank 0] step:6321/10000 train_time:469068ms step_avg:74.21ms +[2025-09-02 18:17:18] [Rank 0] step:6341/10000 train_time:470635ms step_avg:74.22ms +[2025-09-02 18:17:18] [Rank 0] step:6341/10000 train_time:470635ms step_avg:74.22ms +[2025-09-02 18:17:20] [Rank 0] step:6361/10000 train_time:472203ms step_avg:74.23ms +[2025-09-02 18:17:20] [Rank 0] step:6361/10000 train_time:472203ms step_avg:74.23ms +[2025-09-02 18:17:21] [Rank 0] step:6381/10000 train_time:473770ms step_avg:74.25ms +[2025-09-02 18:17:21] [Rank 0] step:6381/10000 train_time:473770ms step_avg:74.25ms +[2025-09-02 18:17:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:17:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:17:35] [Rank 0] PRINT: step:6400/10000 val_loss:3.9438 svd_entropy: attn_qk:H=0.7545,top10E=0.26,eRank=173.8,q75/q25=87.96 attn_vo:H=0.7855,top10E=0.15,eRank=259.8,q75/q25=inf mlp_w1:H=0.7814,top10E=0.27,eRank=200.9,q75/q25=14.90 mlp_w2:H=0.8533,top10E=0.14,eRank=296.2,q75/q25=20.55 vo_prod:H=0.6589,top10E=0.23,eRank=115.1,q75/q25=inf train_time:475490ms step_avg:74.30ms +[2025-09-02 18:17:35] [Rank 0] PRINT: step:6400/10000 val_loss:3.9438 svd_entropy: attn_qk:H=0.7545,top10E=0.26,eRank=173.8,q75/q25=87.96 attn_vo:H=0.7855,top10E=0.15,eRank=259.8,q75/q25=inf mlp_w1:H=0.7814,top10E=0.27,eRank=200.9,q75/q25=14.90 mlp_w2:H=0.8533,top10E=0.14,eRank=296.2,q75/q25=20.55 vo_prod:H=0.6589,top10E=0.23,eRank=115.1,q75/q25=inf train_time:475490ms step_avg:74.30ms +[2025-09-02 18:17:35] [Rank 0] step:6401/10000 train_time:475502ms step_avg:74.29ms +[2025-09-02 18:17:35] [Rank 0] step:6401/10000 train_time:475502ms step_avg:74.29ms +[2025-09-02 18:17:36] [Rank 0] step:6421/10000 train_time:476917ms step_avg:74.27ms +[2025-09-02 18:17:36] [Rank 0] step:6421/10000 train_time:476917ms step_avg:74.27ms +[2025-09-02 18:17:38] [Rank 0] step:6441/10000 train_time:478475ms step_avg:74.29ms +[2025-09-02 18:17:38] [Rank 0] step:6441/10000 train_time:478475ms step_avg:74.29ms +[2025-09-02 18:17:39] [Rank 0] step:6461/10000 train_time:480037ms step_avg:74.30ms +[2025-09-02 18:17:39] [Rank 0] step:6461/10000 train_time:480037ms step_avg:74.30ms +[2025-09-02 18:17:41] [Rank 0] step:6481/10000 train_time:481606ms step_avg:74.31ms +[2025-09-02 18:17:41] [Rank 0] step:6481/10000 train_time:481606ms step_avg:74.31ms +[2025-09-02 18:17:42] [Rank 0] step:6501/10000 train_time:483163ms step_avg:74.32ms +[2025-09-02 18:17:42] [Rank 0] step:6501/10000 train_time:483163ms step_avg:74.32ms +[2025-09-02 18:17:44] [Rank 0] step:6521/10000 train_time:484720ms step_avg:74.33ms +[2025-09-02 18:17:44] [Rank 0] step:6521/10000 train_time:484720ms step_avg:74.33ms +[2025-09-02 18:17:46] [Rank 0] step:6541/10000 train_time:486282ms step_avg:74.34ms +[2025-09-02 18:17:46] [Rank 0] step:6541/10000 train_time:486282ms step_avg:74.34ms +[2025-09-02 18:17:47] [Rank 0] step:6561/10000 train_time:487854ms step_avg:74.36ms +[2025-09-02 18:17:47] [Rank 0] step:6561/10000 train_time:487854ms step_avg:74.36ms +[2025-09-02 18:17:49] [Rank 0] step:6581/10000 train_time:489412ms step_avg:74.37ms +[2025-09-02 18:17:49] [Rank 0] step:6581/10000 train_time:489412ms step_avg:74.37ms +[2025-09-02 18:17:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:17:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:18:02] [Rank 0] PRINT: step:6600/10000 val_loss:3.9332 svd_entropy: attn_qk:H=0.7561,top10E=0.26,eRank=175.2,q75/q25=87.73 attn_vo:H=0.7870,top10E=0.15,eRank=261.6,q75/q25=inf mlp_w1:H=0.7837,top10E=0.26,eRank=203.7,q75/q25=15.17 mlp_w2:H=0.8543,top10E=0.14,eRank=298.1,q75/q25=20.75 vo_prod:H=0.6607,top10E=0.23,eRank=116.6,q75/q25=inf train_time:491131ms step_avg:74.41ms +[2025-09-02 18:18:02] [Rank 0] PRINT: step:6600/10000 val_loss:3.9332 svd_entropy: attn_qk:H=0.7561,top10E=0.26,eRank=175.2,q75/q25=87.73 attn_vo:H=0.7870,top10E=0.15,eRank=261.6,q75/q25=inf mlp_w1:H=0.7837,top10E=0.26,eRank=203.7,q75/q25=15.17 mlp_w2:H=0.8543,top10E=0.14,eRank=298.1,q75/q25=20.75 vo_prod:H=0.6607,top10E=0.23,eRank=116.6,q75/q25=inf train_time:491131ms step_avg:74.41ms +[2025-09-02 18:18:02] [Rank 0] step:6601/10000 train_time:491144ms step_avg:74.40ms +[2025-09-02 18:18:02] [Rank 0] step:6601/10000 train_time:491144ms step_avg:74.40ms +[2025-09-02 18:18:04] [Rank 0] step:6621/10000 train_time:492570ms step_avg:74.40ms +[2025-09-02 18:18:04] [Rank 0] step:6621/10000 train_time:492570ms step_avg:74.40ms +[2025-09-02 18:18:06] [Rank 0] step:6641/10000 train_time:494132ms step_avg:74.41ms +[2025-09-02 18:18:06] [Rank 0] step:6641/10000 train_time:494132ms step_avg:74.41ms +[2025-09-02 18:18:07] [Rank 0] step:6661/10000 train_time:495692ms step_avg:74.42ms +[2025-09-02 18:18:07] [Rank 0] step:6661/10000 train_time:495692ms step_avg:74.42ms +[2025-09-02 18:18:09] [Rank 0] step:6681/10000 train_time:497267ms step_avg:74.43ms +[2025-09-02 18:18:09] [Rank 0] step:6681/10000 train_time:497267ms step_avg:74.43ms +[2025-09-02 18:18:10] [Rank 0] step:6701/10000 train_time:498860ms step_avg:74.45ms +[2025-09-02 18:18:10] [Rank 0] step:6701/10000 train_time:498860ms step_avg:74.45ms +[2025-09-02 18:18:12] [Rank 0] step:6721/10000 train_time:500447ms step_avg:74.46ms +[2025-09-02 18:18:12] [Rank 0] step:6721/10000 train_time:500447ms step_avg:74.46ms +[2025-09-02 18:18:13] [Rank 0] step:6741/10000 train_time:502033ms step_avg:74.47ms +[2025-09-02 18:18:13] [Rank 0] step:6741/10000 train_time:502033ms step_avg:74.47ms +[2025-09-02 18:18:15] [Rank 0] step:6761/10000 train_time:503620ms step_avg:74.49ms +[2025-09-02 18:18:15] [Rank 0] step:6761/10000 train_time:503620ms step_avg:74.49ms +[2025-09-02 18:18:17] [Rank 0] step:6781/10000 train_time:505212ms step_avg:74.50ms +[2025-09-02 18:18:17] [Rank 0] step:6781/10000 train_time:505212ms step_avg:74.50ms +[2025-09-02 18:18:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:18:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:18:30] [Rank 0] PRINT: step:6800/10000 val_loss:3.9161 svd_entropy: attn_qk:H=0.7575,top10E=0.26,eRank=176.4,q75/q25=87.77 attn_vo:H=0.7883,top10E=0.15,eRank=263.4,q75/q25=inf mlp_w1:H=0.7857,top10E=0.26,eRank=206.2,q75/q25=15.42 mlp_w2:H=0.8552,top10E=0.13,eRank=299.9,q75/q25=21.03 vo_prod:H=0.6625,top10E=0.23,eRank=118.0,q75/q25=inf train_time:506964ms step_avg:74.55ms +[2025-09-02 18:18:30] [Rank 0] PRINT: step:6800/10000 val_loss:3.9161 svd_entropy: attn_qk:H=0.7575,top10E=0.26,eRank=176.4,q75/q25=87.77 attn_vo:H=0.7883,top10E=0.15,eRank=263.4,q75/q25=inf mlp_w1:H=0.7857,top10E=0.26,eRank=206.2,q75/q25=15.42 mlp_w2:H=0.8552,top10E=0.13,eRank=299.9,q75/q25=21.03 vo_prod:H=0.6625,top10E=0.23,eRank=118.0,q75/q25=inf train_time:506964ms step_avg:74.55ms +[2025-09-02 18:18:30] [Rank 0] step:6801/10000 train_time:506976ms step_avg:74.54ms +[2025-09-02 18:18:30] [Rank 0] step:6801/10000 train_time:506976ms step_avg:74.54ms +[2025-09-02 18:18:32] [Rank 0] step:6821/10000 train_time:508408ms step_avg:74.54ms +[2025-09-02 18:18:32] [Rank 0] step:6821/10000 train_time:508408ms step_avg:74.54ms +[2025-09-02 18:18:33] [Rank 0] step:6841/10000 train_time:509990ms step_avg:74.55ms +[2025-09-02 18:18:33] [Rank 0] step:6841/10000 train_time:509990ms step_avg:74.55ms +[2025-09-02 18:18:35] [Rank 0] step:6861/10000 train_time:511577ms step_avg:74.56ms +[2025-09-02 18:18:35] [Rank 0] step:6861/10000 train_time:511577ms step_avg:74.56ms +[2025-09-02 18:18:37] [Rank 0] step:6881/10000 train_time:513163ms step_avg:74.58ms +[2025-09-02 18:18:37] [Rank 0] step:6881/10000 train_time:513163ms step_avg:74.58ms +[2025-09-02 18:18:38] [Rank 0] step:6901/10000 train_time:514750ms step_avg:74.59ms +[2025-09-02 18:18:38] [Rank 0] step:6901/10000 train_time:514750ms step_avg:74.59ms +[2025-09-02 18:18:40] [Rank 0] step:6921/10000 train_time:516334ms step_avg:74.60ms +[2025-09-02 18:18:40] [Rank 0] step:6921/10000 train_time:516334ms step_avg:74.60ms +[2025-09-02 18:18:41] [Rank 0] step:6941/10000 train_time:517930ms step_avg:74.62ms +[2025-09-02 18:18:41] [Rank 0] step:6941/10000 train_time:517930ms step_avg:74.62ms +[2025-09-02 18:18:43] [Rank 0] step:6961/10000 train_time:519531ms step_avg:74.63ms +[2025-09-02 18:18:43] [Rank 0] step:6961/10000 train_time:519531ms step_avg:74.63ms +[2025-09-02 18:18:44] [Rank 0] step:6981/10000 train_time:521125ms step_avg:74.65ms +[2025-09-02 18:18:44] [Rank 0] step:6981/10000 train_time:521125ms step_avg:74.65ms +[2025-09-02 18:18:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:18:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:18:58] [Rank 0] PRINT: step:7000/10000 val_loss:3.9017 svd_entropy: attn_qk:H=0.7589,top10E=0.26,eRank=177.6,q75/q25=87.65 attn_vo:H=0.7895,top10E=0.15,eRank=265.0,q75/q25=inf mlp_w1:H=0.7876,top10E=0.26,eRank=208.5,q75/q25=15.65 mlp_w2:H=0.8560,top10E=0.13,eRank=301.6,q75/q25=21.21 vo_prod:H=0.6642,top10E=0.23,eRank=119.5,q75/q25=inf train_time:522878ms step_avg:74.70ms +[2025-09-02 18:18:58] [Rank 0] PRINT: step:7000/10000 val_loss:3.9017 svd_entropy: attn_qk:H=0.7589,top10E=0.26,eRank=177.6,q75/q25=87.65 attn_vo:H=0.7895,top10E=0.15,eRank=265.0,q75/q25=inf mlp_w1:H=0.7876,top10E=0.26,eRank=208.5,q75/q25=15.65 mlp_w2:H=0.8560,top10E=0.13,eRank=301.6,q75/q25=21.21 vo_prod:H=0.6642,top10E=0.23,eRank=119.5,q75/q25=inf train_time:522878ms step_avg:74.70ms +[2025-09-02 18:18:58] [Rank 0] step:7001/10000 train_time:522890ms step_avg:74.69ms +[2025-09-02 18:18:58] [Rank 0] step:7001/10000 train_time:522890ms step_avg:74.69ms +[2025-09-02 18:19:00] [Rank 0] step:7021/10000 train_time:524332ms step_avg:74.68ms +[2025-09-02 18:19:00] [Rank 0] step:7021/10000 train_time:524332ms step_avg:74.68ms +[2025-09-02 18:19:01] [Rank 0] step:7041/10000 train_time:525922ms step_avg:74.69ms +[2025-09-02 18:19:01] [Rank 0] step:7041/10000 train_time:525922ms step_avg:74.69ms +[2025-09-02 18:19:03] [Rank 0] step:7061/10000 train_time:527507ms step_avg:74.71ms +[2025-09-02 18:19:03] [Rank 0] step:7061/10000 train_time:527507ms step_avg:74.71ms +[2025-09-02 18:19:04] [Rank 0] step:7081/10000 train_time:529098ms step_avg:74.72ms +[2025-09-02 18:19:04] [Rank 0] step:7081/10000 train_time:529098ms step_avg:74.72ms +[2025-09-02 18:19:06] [Rank 0] step:7101/10000 train_time:530689ms step_avg:74.73ms +[2025-09-02 18:19:06] [Rank 0] step:7101/10000 train_time:530689ms step_avg:74.73ms +[2025-09-02 18:19:07] [Rank 0] step:7121/10000 train_time:532280ms step_avg:74.75ms +[2025-09-02 18:19:07] [Rank 0] step:7121/10000 train_time:532280ms step_avg:74.75ms +[2025-09-02 18:19:09] [Rank 0] step:7141/10000 train_time:533869ms step_avg:74.76ms +[2025-09-02 18:19:09] [Rank 0] step:7141/10000 train_time:533869ms step_avg:74.76ms +[2025-09-02 18:19:11] [Rank 0] step:7161/10000 train_time:535460ms step_avg:74.77ms +[2025-09-02 18:19:11] [Rank 0] step:7161/10000 train_time:535460ms step_avg:74.77ms +[2025-09-02 18:19:12] [Rank 0] step:7181/10000 train_time:537053ms step_avg:74.79ms +[2025-09-02 18:19:12] [Rank 0] step:7181/10000 train_time:537053ms step_avg:74.79ms +[2025-09-02 18:19:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:19:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:19:26] [Rank 0] PRINT: step:7200/10000 val_loss:3.8915 svd_entropy: attn_qk:H=0.7601,top10E=0.26,eRank=178.7,q75/q25=87.61 attn_vo:H=0.7906,top10E=0.15,eRank=266.4,q75/q25=inf mlp_w1:H=0.7893,top10E=0.26,eRank=210.7,q75/q25=15.87 mlp_w2:H=0.8568,top10E=0.13,eRank=303.2,q75/q25=21.38 vo_prod:H=0.6657,top10E=0.23,eRank=120.7,q75/q25=inf train_time:538810ms step_avg:74.83ms +[2025-09-02 18:19:26] [Rank 0] PRINT: step:7200/10000 val_loss:3.8915 svd_entropy: attn_qk:H=0.7601,top10E=0.26,eRank=178.7,q75/q25=87.61 attn_vo:H=0.7906,top10E=0.15,eRank=266.4,q75/q25=inf mlp_w1:H=0.7893,top10E=0.26,eRank=210.7,q75/q25=15.87 mlp_w2:H=0.8568,top10E=0.13,eRank=303.2,q75/q25=21.38 vo_prod:H=0.6657,top10E=0.23,eRank=120.7,q75/q25=inf train_time:538810ms step_avg:74.83ms +[2025-09-02 18:19:26] [Rank 0] step:7201/10000 train_time:538821ms step_avg:74.83ms +[2025-09-02 18:19:26] [Rank 0] step:7201/10000 train_time:538821ms step_avg:74.83ms +[2025-09-02 18:19:27] [Rank 0] step:7221/10000 train_time:540272ms step_avg:74.82ms +[2025-09-02 18:19:27] [Rank 0] step:7221/10000 train_time:540272ms step_avg:74.82ms +[2025-09-02 18:19:29] [Rank 0] step:7241/10000 train_time:541879ms step_avg:74.83ms +[2025-09-02 18:19:29] [Rank 0] step:7241/10000 train_time:541879ms step_avg:74.83ms +[2025-09-02 18:19:31] [Rank 0] step:7261/10000 train_time:543466ms step_avg:74.85ms +[2025-09-02 18:19:31] [Rank 0] step:7261/10000 train_time:543466ms step_avg:74.85ms +[2025-09-02 18:19:32] [Rank 0] step:7281/10000 train_time:545066ms step_avg:74.86ms +[2025-09-02 18:19:32] [Rank 0] step:7281/10000 train_time:545066ms step_avg:74.86ms +[2025-09-02 18:19:34] [Rank 0] step:7301/10000 train_time:546657ms step_avg:74.87ms +[2025-09-02 18:19:34] [Rank 0] step:7301/10000 train_time:546657ms step_avg:74.87ms +[2025-09-02 18:19:35] [Rank 0] step:7321/10000 train_time:548261ms step_avg:74.89ms +[2025-09-02 18:19:35] [Rank 0] step:7321/10000 train_time:548261ms step_avg:74.89ms +[2025-09-02 18:19:37] [Rank 0] step:7341/10000 train_time:549856ms step_avg:74.90ms +[2025-09-02 18:19:37] [Rank 0] step:7341/10000 train_time:549856ms step_avg:74.90ms +[2025-09-02 18:19:39] [Rank 0] step:7361/10000 train_time:551455ms step_avg:74.92ms +[2025-09-02 18:19:39] [Rank 0] step:7361/10000 train_time:551455ms step_avg:74.92ms +[2025-09-02 18:19:40] [Rank 0] step:7381/10000 train_time:553054ms step_avg:74.93ms +[2025-09-02 18:19:40] [Rank 0] step:7381/10000 train_time:553054ms step_avg:74.93ms +[2025-09-02 18:19:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:19:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:19:54] [Rank 0] PRINT: step:7400/10000 val_loss:3.8731 svd_entropy: attn_qk:H=0.7612,top10E=0.25,eRank=179.7,q75/q25=87.61 attn_vo:H=0.7916,top10E=0.14,eRank=267.7,q75/q25=inf mlp_w1:H=0.7908,top10E=0.25,eRank=212.6,q75/q25=16.09 mlp_w2:H=0.8575,top10E=0.13,eRank=304.7,q75/q25=21.49 vo_prod:H=0.6670,top10E=0.23,eRank=121.8,q75/q25=inf train_time:554792ms step_avg:74.97ms +[2025-09-02 18:19:54] [Rank 0] PRINT: step:7400/10000 val_loss:3.8731 svd_entropy: attn_qk:H=0.7612,top10E=0.25,eRank=179.7,q75/q25=87.61 attn_vo:H=0.7916,top10E=0.14,eRank=267.7,q75/q25=inf mlp_w1:H=0.7908,top10E=0.25,eRank=212.6,q75/q25=16.09 mlp_w2:H=0.8575,top10E=0.13,eRank=304.7,q75/q25=21.49 vo_prod:H=0.6670,top10E=0.23,eRank=121.8,q75/q25=inf train_time:554792ms step_avg:74.97ms +[2025-09-02 18:19:54] [Rank 0] step:7401/10000 train_time:554804ms step_avg:74.96ms +[2025-09-02 18:19:54] [Rank 0] step:7401/10000 train_time:554804ms step_avg:74.96ms +[2025-09-02 18:19:55] [Rank 0] step:7421/10000 train_time:556245ms step_avg:74.96ms +[2025-09-02 18:19:55] [Rank 0] step:7421/10000 train_time:556245ms step_avg:74.96ms +[2025-09-02 18:19:57] [Rank 0] step:7441/10000 train_time:557832ms step_avg:74.97ms +[2025-09-02 18:19:57] [Rank 0] step:7441/10000 train_time:557832ms step_avg:74.97ms +[2025-09-02 18:19:58] [Rank 0] step:7461/10000 train_time:559423ms step_avg:74.98ms +[2025-09-02 18:19:58] [Rank 0] step:7461/10000 train_time:559423ms step_avg:74.98ms +[2025-09-02 18:20:00] [Rank 0] step:7481/10000 train_time:561020ms step_avg:74.99ms +[2025-09-02 18:20:00] [Rank 0] step:7481/10000 train_time:561020ms step_avg:74.99ms +[2025-09-02 18:20:02] [Rank 0] step:7501/10000 train_time:562617ms step_avg:75.01ms +[2025-09-02 18:20:02] [Rank 0] step:7501/10000 train_time:562617ms step_avg:75.01ms +[2025-09-02 18:20:03] [Rank 0] step:7521/10000 train_time:564213ms step_avg:75.02ms +[2025-09-02 18:20:03] [Rank 0] step:7521/10000 train_time:564213ms step_avg:75.02ms +[2025-09-02 18:20:05] [Rank 0] step:7541/10000 train_time:565820ms step_avg:75.03ms +[2025-09-02 18:20:05] [Rank 0] step:7541/10000 train_time:565820ms step_avg:75.03ms +[2025-09-02 18:20:06] [Rank 0] step:7561/10000 train_time:567403ms step_avg:75.04ms +[2025-09-02 18:20:06] [Rank 0] step:7561/10000 train_time:567403ms step_avg:75.04ms +[2025-09-02 18:20:08] [Rank 0] step:7581/10000 train_time:569009ms step_avg:75.06ms +[2025-09-02 18:20:08] [Rank 0] step:7581/10000 train_time:569009ms step_avg:75.06ms +[2025-09-02 18:20:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:20:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:20:21] [Rank 0] PRINT: step:7600/10000 val_loss:3.8720 svd_entropy: attn_qk:H=0.7622,top10E=0.25,eRank=180.7,q75/q25=87.48 attn_vo:H=0.7924,top10E=0.14,eRank=268.9,q75/q25=inf mlp_w1:H=0.7922,top10E=0.25,eRank=214.3,q75/q25=16.25 mlp_w2:H=0.8582,top10E=0.13,eRank=306.1,q75/q25=21.60 vo_prod:H=0.6683,top10E=0.22,eRank=123.0,q75/q25=inf train_time:570772ms step_avg:75.10ms +[2025-09-02 18:20:21] [Rank 0] PRINT: step:7600/10000 val_loss:3.8720 svd_entropy: attn_qk:H=0.7622,top10E=0.25,eRank=180.7,q75/q25=87.48 attn_vo:H=0.7924,top10E=0.14,eRank=268.9,q75/q25=inf mlp_w1:H=0.7922,top10E=0.25,eRank=214.3,q75/q25=16.25 mlp_w2:H=0.8582,top10E=0.13,eRank=306.1,q75/q25=21.60 vo_prod:H=0.6683,top10E=0.22,eRank=123.0,q75/q25=inf train_time:570772ms step_avg:75.10ms +[2025-09-02 18:20:22] [Rank 0] step:7601/10000 train_time:570784ms step_avg:75.09ms +[2025-09-02 18:20:22] [Rank 0] step:7601/10000 train_time:570784ms step_avg:75.09ms +[2025-09-02 18:20:23] [Rank 0] step:7621/10000 train_time:572215ms step_avg:75.08ms +[2025-09-02 18:20:23] [Rank 0] step:7621/10000 train_time:572215ms step_avg:75.08ms +[2025-09-02 18:20:25] [Rank 0] step:7641/10000 train_time:573807ms step_avg:75.10ms +[2025-09-02 18:20:25] [Rank 0] step:7641/10000 train_time:573807ms step_avg:75.10ms +[2025-09-02 18:20:26] [Rank 0] step:7661/10000 train_time:575402ms step_avg:75.11ms +[2025-09-02 18:20:26] [Rank 0] step:7661/10000 train_time:575402ms step_avg:75.11ms +[2025-09-02 18:20:28] [Rank 0] step:7681/10000 train_time:576988ms step_avg:75.12ms +[2025-09-02 18:20:28] [Rank 0] step:7681/10000 train_time:576988ms step_avg:75.12ms +[2025-09-02 18:20:30] [Rank 0] step:7701/10000 train_time:578612ms step_avg:75.13ms +[2025-09-02 18:20:30] [Rank 0] step:7701/10000 train_time:578612ms step_avg:75.13ms +[2025-09-02 18:20:31] [Rank 0] step:7721/10000 train_time:580213ms step_avg:75.15ms +[2025-09-02 18:20:31] [Rank 0] step:7721/10000 train_time:580213ms step_avg:75.15ms +[2025-09-02 18:20:33] [Rank 0] step:7741/10000 train_time:581808ms step_avg:75.16ms +[2025-09-02 18:20:33] [Rank 0] step:7741/10000 train_time:581808ms step_avg:75.16ms +[2025-09-02 18:20:34] [Rank 0] step:7761/10000 train_time:583406ms step_avg:75.17ms +[2025-09-02 18:20:34] [Rank 0] step:7761/10000 train_time:583406ms step_avg:75.17ms +[2025-09-02 18:20:36] [Rank 0] step:7781/10000 train_time:585010ms step_avg:75.18ms +[2025-09-02 18:20:36] [Rank 0] step:7781/10000 train_time:585010ms step_avg:75.18ms +[2025-09-02 18:20:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:20:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:20:49] [Rank 0] PRINT: step:7800/10000 val_loss:3.8567 svd_entropy: attn_qk:H=0.7631,top10E=0.25,eRank=181.6,q75/q25=87.29 attn_vo:H=0.7933,top10E=0.14,eRank=270.0,q75/q25=inf mlp_w1:H=0.7934,top10E=0.25,eRank=215.9,q75/q25=16.35 mlp_w2:H=0.8589,top10E=0.13,eRank=307.4,q75/q25=21.71 vo_prod:H=0.6695,top10E=0.22,eRank=124.1,q75/q25=inf train_time:586771ms step_avg:75.23ms +[2025-09-02 18:20:49] [Rank 0] PRINT: step:7800/10000 val_loss:3.8567 svd_entropy: attn_qk:H=0.7631,top10E=0.25,eRank=181.6,q75/q25=87.29 attn_vo:H=0.7933,top10E=0.14,eRank=270.0,q75/q25=inf mlp_w1:H=0.7934,top10E=0.25,eRank=215.9,q75/q25=16.35 mlp_w2:H=0.8589,top10E=0.13,eRank=307.4,q75/q25=21.71 vo_prod:H=0.6695,top10E=0.22,eRank=124.1,q75/q25=inf train_time:586771ms step_avg:75.23ms +[2025-09-02 18:20:49] [Rank 0] step:7801/10000 train_time:586783ms step_avg:75.22ms +[2025-09-02 18:20:49] [Rank 0] step:7801/10000 train_time:586783ms step_avg:75.22ms +[2025-09-02 18:20:51] [Rank 0] step:7821/10000 train_time:588235ms step_avg:75.21ms +[2025-09-02 18:20:51] [Rank 0] step:7821/10000 train_time:588235ms step_avg:75.21ms +[2025-09-02 18:20:53] [Rank 0] step:7841/10000 train_time:589825ms step_avg:75.22ms +[2025-09-02 18:20:53] [Rank 0] step:7841/10000 train_time:589825ms step_avg:75.22ms +[2025-09-02 18:20:54] [Rank 0] step:7861/10000 train_time:591422ms step_avg:75.23ms +[2025-09-02 18:20:54] [Rank 0] step:7861/10000 train_time:591422ms step_avg:75.23ms +[2025-09-02 18:20:56] [Rank 0] step:7881/10000 train_time:593025ms step_avg:75.25ms +[2025-09-02 18:20:56] [Rank 0] step:7881/10000 train_time:593025ms step_avg:75.25ms +[2025-09-02 18:20:57] [Rank 0] step:7901/10000 train_time:594615ms step_avg:75.26ms +[2025-09-02 18:20:57] [Rank 0] step:7901/10000 train_time:594615ms step_avg:75.26ms +[2025-09-02 18:20:59] [Rank 0] step:7921/10000 train_time:596212ms step_avg:75.27ms +[2025-09-02 18:20:59] [Rank 0] step:7921/10000 train_time:596212ms step_avg:75.27ms +[2025-09-02 18:21:01] [Rank 0] step:7941/10000 train_time:597815ms step_avg:75.28ms +[2025-09-02 18:21:01] [Rank 0] step:7941/10000 train_time:597815ms step_avg:75.28ms +[2025-09-02 18:21:02] [Rank 0] step:7961/10000 train_time:599415ms step_avg:75.29ms +[2025-09-02 18:21:02] [Rank 0] step:7961/10000 train_time:599415ms step_avg:75.29ms +[2025-09-02 18:21:04] [Rank 0] step:7981/10000 train_time:601007ms step_avg:75.30ms +[2025-09-02 18:21:04] [Rank 0] step:7981/10000 train_time:601007ms step_avg:75.30ms +[2025-09-02 18:21:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:21:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:21:17] [Rank 0] PRINT: step:8000/10000 val_loss:3.8402 svd_entropy: attn_qk:H=0.7640,top10E=0.25,eRank=182.3,q75/q25=87.12 attn_vo:H=0.7940,top10E=0.14,eRank=271.0,q75/q25=inf mlp_w1:H=0.7944,top10E=0.25,eRank=217.3,q75/q25=16.50 mlp_w2:H=0.8595,top10E=0.13,eRank=308.7,q75/q25=21.85 vo_prod:H=0.6706,top10E=0.22,eRank=125.1,q75/q25=inf train_time:602763ms step_avg:75.35ms +[2025-09-02 18:21:17] [Rank 0] PRINT: step:8000/10000 val_loss:3.8402 svd_entropy: attn_qk:H=0.7640,top10E=0.25,eRank=182.3,q75/q25=87.12 attn_vo:H=0.7940,top10E=0.14,eRank=271.0,q75/q25=inf mlp_w1:H=0.7944,top10E=0.25,eRank=217.3,q75/q25=16.50 mlp_w2:H=0.8595,top10E=0.13,eRank=308.7,q75/q25=21.85 vo_prod:H=0.6706,top10E=0.22,eRank=125.1,q75/q25=inf train_time:602763ms step_avg:75.35ms +[2025-09-02 18:21:17] [Rank 0] step:8001/10000 train_time:602776ms step_avg:75.34ms +[2025-09-02 18:21:17] [Rank 0] step:8001/10000 train_time:602776ms step_avg:75.34ms +[2025-09-02 18:21:19] [Rank 0] step:8021/10000 train_time:604215ms step_avg:75.33ms +[2025-09-02 18:21:19] [Rank 0] step:8021/10000 train_time:604215ms step_avg:75.33ms +[2025-09-02 18:21:21] [Rank 0] step:8041/10000 train_time:605819ms step_avg:75.34ms +[2025-09-02 18:21:21] [Rank 0] step:8041/10000 train_time:605819ms step_avg:75.34ms +[2025-09-02 18:21:22] [Rank 0] step:8061/10000 train_time:607413ms step_avg:75.35ms +[2025-09-02 18:21:22] [Rank 0] step:8061/10000 train_time:607413ms step_avg:75.35ms +[2025-09-02 18:21:24] [Rank 0] step:8081/10000 train_time:609001ms step_avg:75.36ms +[2025-09-02 18:21:24] [Rank 0] step:8081/10000 train_time:609001ms step_avg:75.36ms +[2025-09-02 18:21:25] [Rank 0] step:8101/10000 train_time:610605ms step_avg:75.37ms +[2025-09-02 18:21:25] [Rank 0] step:8101/10000 train_time:610605ms step_avg:75.37ms +[2025-09-02 18:21:27] [Rank 0] step:8121/10000 train_time:612201ms step_avg:75.38ms +[2025-09-02 18:21:27] [Rank 0] step:8121/10000 train_time:612201ms step_avg:75.38ms +[2025-09-02 18:21:29] [Rank 0] step:8141/10000 train_time:613900ms step_avg:75.41ms +[2025-09-02 18:21:29] [Rank 0] step:8141/10000 train_time:613900ms step_avg:75.41ms +[2025-09-02 18:21:30] [Rank 0] step:8161/10000 train_time:615507ms step_avg:75.42ms +[2025-09-02 18:21:30] [Rank 0] step:8161/10000 train_time:615507ms step_avg:75.42ms +[2025-09-02 18:21:32] [Rank 0] step:8181/10000 train_time:617135ms step_avg:75.44ms +[2025-09-02 18:21:32] [Rank 0] step:8181/10000 train_time:617135ms step_avg:75.44ms +[2025-09-02 18:21:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:21:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:21:45] [Rank 0] PRINT: step:8200/10000 val_loss:3.8315 svd_entropy: attn_qk:H=0.7647,top10E=0.25,eRank=183.1,q75/q25=87.30 attn_vo:H=0.7947,top10E=0.14,eRank=271.9,q75/q25=inf mlp_w1:H=0.7954,top10E=0.25,eRank=218.5,q75/q25=16.61 mlp_w2:H=0.8600,top10E=0.13,eRank=309.8,q75/q25=21.82 vo_prod:H=0.6716,top10E=0.22,eRank=125.9,q75/q25=inf train_time:618946ms step_avg:75.48ms +[2025-09-02 18:21:45] [Rank 0] PRINT: step:8200/10000 val_loss:3.8315 svd_entropy: attn_qk:H=0.7647,top10E=0.25,eRank=183.1,q75/q25=87.30 attn_vo:H=0.7947,top10E=0.14,eRank=271.9,q75/q25=inf mlp_w1:H=0.7954,top10E=0.25,eRank=218.5,q75/q25=16.61 mlp_w2:H=0.8600,top10E=0.13,eRank=309.8,q75/q25=21.82 vo_prod:H=0.6716,top10E=0.22,eRank=125.9,q75/q25=inf train_time:618946ms step_avg:75.48ms +[2025-09-02 18:21:45] [Rank 0] step:8201/10000 train_time:618959ms step_avg:75.47ms +[2025-09-02 18:21:45] [Rank 0] step:8201/10000 train_time:618959ms step_avg:75.47ms +[2025-09-02 18:21:47] [Rank 0] step:8221/10000 train_time:620434ms step_avg:75.47ms +[2025-09-02 18:21:47] [Rank 0] step:8221/10000 train_time:620434ms step_avg:75.47ms +[2025-09-02 18:21:49] [Rank 0] step:8241/10000 train_time:622063ms step_avg:75.48ms +[2025-09-02 18:21:49] [Rank 0] step:8241/10000 train_time:622063ms step_avg:75.48ms +[2025-09-02 18:21:50] [Rank 0] step:8261/10000 train_time:623687ms step_avg:75.50ms +[2025-09-02 18:21:50] [Rank 0] step:8261/10000 train_time:623687ms step_avg:75.50ms +[2025-09-02 18:21:52] [Rank 0] step:8281/10000 train_time:625314ms step_avg:75.51ms +[2025-09-02 18:21:52] [Rank 0] step:8281/10000 train_time:625314ms step_avg:75.51ms +[2025-09-02 18:21:54] [Rank 0] step:8301/10000 train_time:626939ms step_avg:75.53ms +[2025-09-02 18:21:54] [Rank 0] step:8301/10000 train_time:626939ms step_avg:75.53ms +[2025-09-02 18:21:55] [Rank 0] step:8321/10000 train_time:628557ms step_avg:75.54ms +[2025-09-02 18:21:55] [Rank 0] step:8321/10000 train_time:628557ms step_avg:75.54ms +[2025-09-02 18:21:57] [Rank 0] step:8341/10000 train_time:630179ms step_avg:75.55ms +[2025-09-02 18:21:57] [Rank 0] step:8341/10000 train_time:630179ms step_avg:75.55ms +[2025-09-02 18:21:58] [Rank 0] step:8361/10000 train_time:631803ms step_avg:75.57ms +[2025-09-02 18:21:58] [Rank 0] step:8361/10000 train_time:631803ms step_avg:75.57ms +[2025-09-02 18:22:00] [Rank 0] step:8381/10000 train_time:633424ms step_avg:75.58ms +[2025-09-02 18:22:00] [Rank 0] step:8381/10000 train_time:633424ms step_avg:75.58ms +[2025-09-02 18:22:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:22:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:22:14] [Rank 0] PRINT: step:8400/10000 val_loss:3.8219 svd_entropy: attn_qk:H=0.7653,top10E=0.25,eRank=183.7,q75/q25=86.87 attn_vo:H=0.7953,top10E=0.14,eRank=272.7,q75/q25=inf mlp_w1:H=0.7963,top10E=0.25,eRank=219.7,q75/q25=16.71 mlp_w2:H=0.8605,top10E=0.13,eRank=310.8,q75/q25=21.87 vo_prod:H=0.6726,top10E=0.22,eRank=126.8,q75/q25=inf train_time:635203ms step_avg:75.62ms +[2025-09-02 18:22:14] [Rank 0] PRINT: step:8400/10000 val_loss:3.8219 svd_entropy: attn_qk:H=0.7653,top10E=0.25,eRank=183.7,q75/q25=86.87 attn_vo:H=0.7953,top10E=0.14,eRank=272.7,q75/q25=inf mlp_w1:H=0.7963,top10E=0.25,eRank=219.7,q75/q25=16.71 mlp_w2:H=0.8605,top10E=0.13,eRank=310.8,q75/q25=21.87 vo_prod:H=0.6726,top10E=0.22,eRank=126.8,q75/q25=inf train_time:635203ms step_avg:75.62ms +[2025-09-02 18:22:14] [Rank 0] step:8401/10000 train_time:635215ms step_avg:75.61ms +[2025-09-02 18:22:14] [Rank 0] step:8401/10000 train_time:635215ms step_avg:75.61ms +[2025-09-02 18:22:15] [Rank 0] step:8421/10000 train_time:636674ms step_avg:75.61ms +[2025-09-02 18:22:15] [Rank 0] step:8421/10000 train_time:636674ms step_avg:75.61ms +[2025-09-02 18:22:17] [Rank 0] step:8441/10000 train_time:638300ms step_avg:75.62ms +[2025-09-02 18:22:17] [Rank 0] step:8441/10000 train_time:638300ms step_avg:75.62ms +[2025-09-02 18:22:19] [Rank 0] step:8461/10000 train_time:639917ms step_avg:75.63ms +[2025-09-02 18:22:19] [Rank 0] step:8461/10000 train_time:639917ms step_avg:75.63ms +[2025-09-02 18:22:20] [Rank 0] step:8481/10000 train_time:641549ms step_avg:75.65ms +[2025-09-02 18:22:20] [Rank 0] step:8481/10000 train_time:641549ms step_avg:75.65ms +[2025-09-02 18:22:22] [Rank 0] step:8501/10000 train_time:643192ms step_avg:75.66ms +[2025-09-02 18:22:22] [Rank 0] step:8501/10000 train_time:643192ms step_avg:75.66ms +[2025-09-02 18:22:23] [Rank 0] step:8521/10000 train_time:644824ms step_avg:75.67ms +[2025-09-02 18:22:23] [Rank 0] step:8521/10000 train_time:644824ms step_avg:75.67ms +[2025-09-02 18:22:25] [Rank 0] step:8541/10000 train_time:646462ms step_avg:75.69ms +[2025-09-02 18:22:25] [Rank 0] step:8541/10000 train_time:646462ms step_avg:75.69ms +[2025-09-02 18:22:27] [Rank 0] step:8561/10000 train_time:648092ms step_avg:75.70ms +[2025-09-02 18:22:27] [Rank 0] step:8561/10000 train_time:648092ms step_avg:75.70ms +[2025-09-02 18:22:28] [Rank 0] step:8581/10000 train_time:649718ms step_avg:75.72ms +[2025-09-02 18:22:28] [Rank 0] step:8581/10000 train_time:649718ms step_avg:75.72ms +[2025-09-02 18:22:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:22:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:22:42] [Rank 0] PRINT: step:8600/10000 val_loss:3.8126 svd_entropy: attn_qk:H=0.7659,top10E=0.25,eRank=184.2,q75/q25=87.23 attn_vo:H=0.7958,top10E=0.14,eRank=273.4,q75/q25=inf mlp_w1:H=0.7970,top10E=0.25,eRank=220.7,q75/q25=16.80 mlp_w2:H=0.8610,top10E=0.13,eRank=311.8,q75/q25=21.90 vo_prod:H=0.6734,top10E=0.22,eRank=127.6,q75/q25=inf train_time:651493ms step_avg:75.76ms +[2025-09-02 18:22:42] [Rank 0] PRINT: step:8600/10000 val_loss:3.8126 svd_entropy: attn_qk:H=0.7659,top10E=0.25,eRank=184.2,q75/q25=87.23 attn_vo:H=0.7958,top10E=0.14,eRank=273.4,q75/q25=inf mlp_w1:H=0.7970,top10E=0.25,eRank=220.7,q75/q25=16.80 mlp_w2:H=0.8610,top10E=0.13,eRank=311.8,q75/q25=21.90 vo_prod:H=0.6734,top10E=0.22,eRank=127.6,q75/q25=inf train_time:651493ms step_avg:75.76ms +[2025-09-02 18:22:42] [Rank 0] step:8601/10000 train_time:651505ms step_avg:75.75ms +[2025-09-02 18:22:42] [Rank 0] step:8601/10000 train_time:651505ms step_avg:75.75ms +[2025-09-02 18:22:44] [Rank 0] step:8621/10000 train_time:652977ms step_avg:75.74ms +[2025-09-02 18:22:44] [Rank 0] step:8621/10000 train_time:652977ms step_avg:75.74ms +[2025-09-02 18:22:45] [Rank 0] step:8641/10000 train_time:654596ms step_avg:75.75ms +[2025-09-02 18:22:45] [Rank 0] step:8641/10000 train_time:654596ms step_avg:75.75ms +[2025-09-02 18:22:47] [Rank 0] step:8661/10000 train_time:656220ms step_avg:75.77ms +[2025-09-02 18:22:47] [Rank 0] step:8661/10000 train_time:656220ms step_avg:75.77ms +[2025-09-02 18:22:48] [Rank 0] step:8681/10000 train_time:657841ms step_avg:75.78ms +[2025-09-02 18:22:48] [Rank 0] step:8681/10000 train_time:657841ms step_avg:75.78ms +[2025-09-02 18:22:50] [Rank 0] step:8701/10000 train_time:659456ms step_avg:75.79ms +[2025-09-02 18:22:50] [Rank 0] step:8701/10000 train_time:659456ms step_avg:75.79ms +[2025-09-02 18:22:52] [Rank 0] step:8721/10000 train_time:661079ms step_avg:75.80ms +[2025-09-02 18:22:52] [Rank 0] step:8721/10000 train_time:661079ms step_avg:75.80ms +[2025-09-02 18:22:53] [Rank 0] step:8741/10000 train_time:662693ms step_avg:75.81ms +[2025-09-02 18:22:53] [Rank 0] step:8741/10000 train_time:662693ms step_avg:75.81ms +[2025-09-02 18:22:55] [Rank 0] step:8761/10000 train_time:664308ms step_avg:75.83ms +[2025-09-02 18:22:55] [Rank 0] step:8761/10000 train_time:664308ms step_avg:75.83ms +[2025-09-02 18:22:56] [Rank 0] step:8781/10000 train_time:665936ms step_avg:75.84ms +[2025-09-02 18:22:56] [Rank 0] step:8781/10000 train_time:665936ms step_avg:75.84ms +[2025-09-02 18:22:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:22:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:23:10] [Rank 0] PRINT: step:8800/10000 val_loss:3.8038 svd_entropy: attn_qk:H=0.7664,top10E=0.25,eRank=184.7,q75/q25=87.15 attn_vo:H=0.7963,top10E=0.14,eRank=274.1,q75/q25=inf mlp_w1:H=0.7977,top10E=0.24,eRank=221.6,q75/q25=16.84 mlp_w2:H=0.8614,top10E=0.13,eRank=312.8,q75/q25=21.93 vo_prod:H=0.6742,top10E=0.22,eRank=128.3,q75/q25=inf train_time:667728ms step_avg:75.88ms +[2025-09-02 18:23:10] [Rank 0] PRINT: step:8800/10000 val_loss:3.8038 svd_entropy: attn_qk:H=0.7664,top10E=0.25,eRank=184.7,q75/q25=87.15 attn_vo:H=0.7963,top10E=0.14,eRank=274.1,q75/q25=inf mlp_w1:H=0.7977,top10E=0.24,eRank=221.6,q75/q25=16.84 mlp_w2:H=0.8614,top10E=0.13,eRank=312.8,q75/q25=21.93 vo_prod:H=0.6742,top10E=0.22,eRank=128.3,q75/q25=inf train_time:667728ms step_avg:75.88ms +[2025-09-02 18:23:10] [Rank 0] step:8801/10000 train_time:667740ms step_avg:75.87ms +[2025-09-02 18:23:10] [Rank 0] step:8801/10000 train_time:667740ms step_avg:75.87ms +[2025-09-02 18:23:12] [Rank 0] step:8821/10000 train_time:669215ms step_avg:75.87ms +[2025-09-02 18:23:12] [Rank 0] step:8821/10000 train_time:669215ms step_avg:75.87ms +[2025-09-02 18:23:13] [Rank 0] step:8841/10000 train_time:670856ms step_avg:75.88ms +[2025-09-02 18:23:13] [Rank 0] step:8841/10000 train_time:670856ms step_avg:75.88ms +[2025-09-02 18:23:15] [Rank 0] step:8861/10000 train_time:672474ms step_avg:75.89ms +[2025-09-02 18:23:15] [Rank 0] step:8861/10000 train_time:672474ms step_avg:75.89ms +[2025-09-02 18:23:17] [Rank 0] step:8881/10000 train_time:674093ms step_avg:75.90ms +[2025-09-02 18:23:17] [Rank 0] step:8881/10000 train_time:674093ms step_avg:75.90ms +[2025-09-02 18:23:18] [Rank 0] step:8901/10000 train_time:675716ms step_avg:75.91ms +[2025-09-02 18:23:18] [Rank 0] step:8901/10000 train_time:675716ms step_avg:75.91ms +[2025-09-02 18:23:20] [Rank 0] step:8921/10000 train_time:677346ms step_avg:75.93ms +[2025-09-02 18:23:20] [Rank 0] step:8921/10000 train_time:677346ms step_avg:75.93ms +[2025-09-02 18:23:21] [Rank 0] step:8941/10000 train_time:678983ms step_avg:75.94ms +[2025-09-02 18:23:21] [Rank 0] step:8941/10000 train_time:678983ms step_avg:75.94ms +[2025-09-02 18:23:23] [Rank 0] step:8961/10000 train_time:680603ms step_avg:75.95ms +[2025-09-02 18:23:23] [Rank 0] step:8961/10000 train_time:680603ms step_avg:75.95ms +[2025-09-02 18:23:25] [Rank 0] step:8981/10000 train_time:682223ms step_avg:75.96ms +[2025-09-02 18:23:25] [Rank 0] step:8981/10000 train_time:682223ms step_avg:75.96ms +[2025-09-02 18:23:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:23:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:23:38] [Rank 0] PRINT: step:9000/10000 val_loss:3.7951 svd_entropy: attn_qk:H=0.7669,top10E=0.25,eRank=185.2,q75/q25=86.96 attn_vo:H=0.7967,top10E=0.14,eRank=274.7,q75/q25=inf mlp_w1:H=0.7983,top10E=0.24,eRank=222.4,q75/q25=16.87 mlp_w2:H=0.8618,top10E=0.13,eRank=313.6,q75/q25=21.86 vo_prod:H=0.6749,top10E=0.22,eRank=128.9,q75/q25=inf train_time:684005ms step_avg:76.00ms +[2025-09-02 18:23:38] [Rank 0] PRINT: step:9000/10000 val_loss:3.7951 svd_entropy: attn_qk:H=0.7669,top10E=0.25,eRank=185.2,q75/q25=86.96 attn_vo:H=0.7967,top10E=0.14,eRank=274.7,q75/q25=inf mlp_w1:H=0.7983,top10E=0.24,eRank=222.4,q75/q25=16.87 mlp_w2:H=0.8618,top10E=0.13,eRank=313.6,q75/q25=21.86 vo_prod:H=0.6749,top10E=0.22,eRank=128.9,q75/q25=inf train_time:684005ms step_avg:76.00ms +[2025-09-02 18:23:38] [Rank 0] step:9001/10000 train_time:684017ms step_avg:75.99ms +[2025-09-02 18:23:38] [Rank 0] step:9001/10000 train_time:684017ms step_avg:75.99ms +[2025-09-02 18:23:40] [Rank 0] step:9021/10000 train_time:685478ms step_avg:75.99ms +[2025-09-02 18:23:40] [Rank 0] step:9021/10000 train_time:685478ms step_avg:75.99ms +[2025-09-02 18:23:41] [Rank 0] step:9041/10000 train_time:687097ms step_avg:76.00ms +[2025-09-02 18:23:41] [Rank 0] step:9041/10000 train_time:687097ms step_avg:76.00ms +[2025-09-02 18:23:43] [Rank 0] step:9061/10000 train_time:688731ms step_avg:76.01ms +[2025-09-02 18:23:43] [Rank 0] step:9061/10000 train_time:688731ms step_avg:76.01ms +[2025-09-02 18:23:45] [Rank 0] step:9081/10000 train_time:690362ms step_avg:76.02ms +[2025-09-02 18:23:45] [Rank 0] step:9081/10000 train_time:690362ms step_avg:76.02ms +[2025-09-02 18:23:46] [Rank 0] step:9101/10000 train_time:692005ms step_avg:76.04ms +[2025-09-02 18:23:46] [Rank 0] step:9101/10000 train_time:692005ms step_avg:76.04ms +[2025-09-02 18:23:48] [Rank 0] step:9121/10000 train_time:693639ms step_avg:76.05ms +[2025-09-02 18:23:48] [Rank 0] step:9121/10000 train_time:693639ms step_avg:76.05ms +[2025-09-02 18:23:50] [Rank 0] step:9141/10000 train_time:695253ms step_avg:76.06ms +[2025-09-02 18:23:50] [Rank 0] step:9141/10000 train_time:695253ms step_avg:76.06ms +[2025-09-02 18:23:51] [Rank 0] step:9161/10000 train_time:696867ms step_avg:76.07ms +[2025-09-02 18:23:51] [Rank 0] step:9161/10000 train_time:696867ms step_avg:76.07ms +[2025-09-02 18:23:53] [Rank 0] step:9181/10000 train_time:698527ms step_avg:76.08ms +[2025-09-02 18:23:53] [Rank 0] step:9181/10000 train_time:698527ms step_avg:76.08ms +[2025-09-02 18:23:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:23:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:24:06] [Rank 0] PRINT: step:9200/10000 val_loss:3.7883 svd_entropy: attn_qk:H=0.7673,top10E=0.25,eRank=185.6,q75/q25=86.88 attn_vo:H=0.7971,top10E=0.14,eRank=275.2,q75/q25=inf mlp_w1:H=0.7988,top10E=0.24,eRank=223.0,q75/q25=16.87 mlp_w2:H=0.8622,top10E=0.13,eRank=314.4,q75/q25=21.84 vo_prod:H=0.6755,top10E=0.22,eRank=129.5,q75/q25=inf train_time:700318ms step_avg:76.12ms +[2025-09-02 18:24:06] [Rank 0] PRINT: step:9200/10000 val_loss:3.7883 svd_entropy: attn_qk:H=0.7673,top10E=0.25,eRank=185.6,q75/q25=86.88 attn_vo:H=0.7971,top10E=0.14,eRank=275.2,q75/q25=inf mlp_w1:H=0.7988,top10E=0.24,eRank=223.0,q75/q25=16.87 mlp_w2:H=0.8622,top10E=0.13,eRank=314.4,q75/q25=21.84 vo_prod:H=0.6755,top10E=0.22,eRank=129.5,q75/q25=inf train_time:700318ms step_avg:76.12ms +[2025-09-02 18:24:06] [Rank 0] step:9201/10000 train_time:700330ms step_avg:76.11ms +[2025-09-02 18:24:06] [Rank 0] step:9201/10000 train_time:700330ms step_avg:76.11ms +[2025-09-02 18:24:08] [Rank 0] step:9221/10000 train_time:701814ms step_avg:76.11ms +[2025-09-02 18:24:08] [Rank 0] step:9221/10000 train_time:701814ms step_avg:76.11ms +[2025-09-02 18:24:10] [Rank 0] step:9241/10000 train_time:703452ms step_avg:76.12ms +[2025-09-02 18:24:10] [Rank 0] step:9241/10000 train_time:703452ms step_avg:76.12ms +[2025-09-02 18:24:11] [Rank 0] step:9261/10000 train_time:705085ms step_avg:76.13ms +[2025-09-02 18:24:11] [Rank 0] step:9261/10000 train_time:705085ms step_avg:76.13ms +[2025-09-02 18:24:13] [Rank 0] step:9281/10000 train_time:706703ms step_avg:76.15ms +[2025-09-02 18:24:13] [Rank 0] step:9281/10000 train_time:706703ms step_avg:76.15ms +[2025-09-02 18:24:15] [Rank 0] step:9301/10000 train_time:708333ms step_avg:76.16ms +[2025-09-02 18:24:15] [Rank 0] step:9301/10000 train_time:708333ms step_avg:76.16ms +[2025-09-02 18:24:16] [Rank 0] step:9321/10000 train_time:709962ms step_avg:76.17ms +[2025-09-02 18:24:16] [Rank 0] step:9321/10000 train_time:709962ms step_avg:76.17ms +[2025-09-02 18:24:18] [Rank 0] step:9341/10000 train_time:711588ms step_avg:76.18ms +[2025-09-02 18:24:18] [Rank 0] step:9341/10000 train_time:711588ms step_avg:76.18ms +[2025-09-02 18:24:19] [Rank 0] step:9361/10000 train_time:713222ms step_avg:76.19ms +[2025-09-02 18:24:19] [Rank 0] step:9361/10000 train_time:713222ms step_avg:76.19ms +[2025-09-02 18:24:21] [Rank 0] step:9381/10000 train_time:714867ms step_avg:76.20ms +[2025-09-02 18:24:21] [Rank 0] step:9381/10000 train_time:714867ms step_avg:76.20ms +[2025-09-02 18:24:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:24:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:24:35] [Rank 0] PRINT: step:9400/10000 val_loss:3.7810 svd_entropy: attn_qk:H=0.7676,top10E=0.25,eRank=185.9,q75/q25=86.58 attn_vo:H=0.7974,top10E=0.14,eRank=275.6,q75/q25=inf mlp_w1:H=0.7993,top10E=0.24,eRank=223.6,q75/q25=16.90 mlp_w2:H=0.8625,top10E=0.13,eRank=315.0,q75/q25=21.87 vo_prod:H=0.6760,top10E=0.22,eRank=129.9,q75/q25=inf train_time:716666ms step_avg:76.24ms +[2025-09-02 18:24:35] [Rank 0] PRINT: step:9400/10000 val_loss:3.7810 svd_entropy: attn_qk:H=0.7676,top10E=0.25,eRank=185.9,q75/q25=86.58 attn_vo:H=0.7974,top10E=0.14,eRank=275.6,q75/q25=inf mlp_w1:H=0.7993,top10E=0.24,eRank=223.6,q75/q25=16.90 mlp_w2:H=0.8625,top10E=0.13,eRank=315.0,q75/q25=21.87 vo_prod:H=0.6760,top10E=0.22,eRank=129.9,q75/q25=inf train_time:716666ms step_avg:76.24ms +[2025-09-02 18:24:35] [Rank 0] step:9401/10000 train_time:716678ms step_avg:76.23ms +[2025-09-02 18:24:35] [Rank 0] step:9401/10000 train_time:716678ms step_avg:76.23ms +[2025-09-02 18:24:36] [Rank 0] step:9421/10000 train_time:718149ms step_avg:76.23ms +[2025-09-02 18:24:36] [Rank 0] step:9421/10000 train_time:718149ms step_avg:76.23ms +[2025-09-02 18:24:38] [Rank 0] step:9441/10000 train_time:719777ms step_avg:76.24ms +[2025-09-02 18:24:38] [Rank 0] step:9441/10000 train_time:719777ms step_avg:76.24ms +[2025-09-02 18:24:40] [Rank 0] step:9461/10000 train_time:721413ms step_avg:76.25ms +[2025-09-02 18:24:40] [Rank 0] step:9461/10000 train_time:721413ms step_avg:76.25ms +[2025-09-02 18:24:41] [Rank 0] step:9481/10000 train_time:723114ms step_avg:76.27ms +[2025-09-02 18:24:41] [Rank 0] step:9481/10000 train_time:723114ms step_avg:76.27ms +[2025-09-02 18:24:43] [Rank 0] step:9501/10000 train_time:724698ms step_avg:76.28ms +[2025-09-02 18:24:43] [Rank 0] step:9501/10000 train_time:724698ms step_avg:76.28ms +[2025-09-02 18:24:44] [Rank 0] step:9521/10000 train_time:726326ms step_avg:76.29ms +[2025-09-02 18:24:44] [Rank 0] step:9521/10000 train_time:726326ms step_avg:76.29ms +[2025-09-02 18:24:46] [Rank 0] step:9541/10000 train_time:727955ms step_avg:76.30ms +[2025-09-02 18:24:46] [Rank 0] step:9541/10000 train_time:727955ms step_avg:76.30ms +[2025-09-02 18:24:48] [Rank 0] step:9561/10000 train_time:729584ms step_avg:76.31ms +[2025-09-02 18:24:48] [Rank 0] step:9561/10000 train_time:729584ms step_avg:76.31ms +[2025-09-02 18:24:49] [Rank 0] step:9581/10000 train_time:731216ms step_avg:76.32ms +[2025-09-02 18:24:49] [Rank 0] step:9581/10000 train_time:731216ms step_avg:76.32ms +[2025-09-02 18:24:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:24:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:25:03] [Rank 0] PRINT: step:9600/10000 val_loss:3.7756 svd_entropy: attn_qk:H=0.7678,top10E=0.25,eRank=186.1,q75/q25=86.64 attn_vo:H=0.7976,top10E=0.14,eRank=276.0,q75/q25=inf mlp_w1:H=0.7996,top10E=0.24,eRank=224.1,q75/q25=16.93 mlp_w2:H=0.8628,top10E=0.13,eRank=315.5,q75/q25=21.91 vo_prod:H=0.6764,top10E=0.22,eRank=130.3,q75/q25=inf train_time:733020ms step_avg:76.36ms +[2025-09-02 18:25:03] [Rank 0] PRINT: step:9600/10000 val_loss:3.7756 svd_entropy: attn_qk:H=0.7678,top10E=0.25,eRank=186.1,q75/q25=86.64 attn_vo:H=0.7976,top10E=0.14,eRank=276.0,q75/q25=inf mlp_w1:H=0.7996,top10E=0.24,eRank=224.1,q75/q25=16.93 mlp_w2:H=0.8628,top10E=0.13,eRank=315.5,q75/q25=21.91 vo_prod:H=0.6764,top10E=0.22,eRank=130.3,q75/q25=inf train_time:733020ms step_avg:76.36ms +[2025-09-02 18:25:03] [Rank 0] step:9601/10000 train_time:733032ms step_avg:76.35ms +[2025-09-02 18:25:03] [Rank 0] step:9601/10000 train_time:733032ms step_avg:76.35ms +[2025-09-02 18:25:05] [Rank 0] step:9621/10000 train_time:734509ms step_avg:76.34ms +[2025-09-02 18:25:05] [Rank 0] step:9621/10000 train_time:734509ms step_avg:76.34ms +[2025-09-02 18:25:06] [Rank 0] step:9641/10000 train_time:736140ms step_avg:76.36ms +[2025-09-02 18:25:06] [Rank 0] step:9641/10000 train_time:736140ms step_avg:76.36ms +[2025-09-02 18:25:08] [Rank 0] step:9661/10000 train_time:737797ms step_avg:76.37ms +[2025-09-02 18:25:08] [Rank 0] step:9661/10000 train_time:737797ms step_avg:76.37ms +[2025-09-02 18:25:09] [Rank 0] step:9681/10000 train_time:739446ms step_avg:76.38ms +[2025-09-02 18:25:09] [Rank 0] step:9681/10000 train_time:739446ms step_avg:76.38ms +[2025-09-02 18:25:11] [Rank 0] step:9701/10000 train_time:741113ms step_avg:76.40ms +[2025-09-02 18:25:11] [Rank 0] step:9701/10000 train_time:741113ms step_avg:76.40ms +[2025-09-02 18:25:13] [Rank 0] step:9721/10000 train_time:742758ms step_avg:76.41ms +[2025-09-02 18:25:13] [Rank 0] step:9721/10000 train_time:742758ms step_avg:76.41ms +[2025-09-02 18:25:14] [Rank 0] step:9741/10000 train_time:744431ms step_avg:76.42ms +[2025-09-02 18:25:14] [Rank 0] step:9741/10000 train_time:744431ms step_avg:76.42ms +[2025-09-02 18:25:16] [Rank 0] step:9761/10000 train_time:746082ms step_avg:76.43ms +[2025-09-02 18:25:16] [Rank 0] step:9761/10000 train_time:746082ms step_avg:76.43ms +[2025-09-02 18:25:18] [Rank 0] step:9781/10000 train_time:747748ms step_avg:76.45ms +[2025-09-02 18:25:18] [Rank 0] step:9781/10000 train_time:747748ms step_avg:76.45ms +[2025-09-02 18:25:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:25:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:25:31] [Rank 0] PRINT: step:9800/10000 val_loss:3.7698 svd_entropy: attn_qk:H=0.7680,top10E=0.25,eRank=186.3,q75/q25=86.75 attn_vo:H=0.7978,top10E=0.14,eRank=276.3,q75/q25=inf mlp_w1:H=0.7999,top10E=0.24,eRank=224.5,q75/q25=16.91 mlp_w2:H=0.8630,top10E=0.13,eRank=316.0,q75/q25=21.86 vo_prod:H=0.6768,top10E=0.22,eRank=130.7,q75/q25=inf train_time:749585ms step_avg:76.49ms +[2025-09-02 18:25:31] [Rank 0] PRINT: step:9800/10000 val_loss:3.7698 svd_entropy: attn_qk:H=0.7680,top10E=0.25,eRank=186.3,q75/q25=86.75 attn_vo:H=0.7978,top10E=0.14,eRank=276.3,q75/q25=inf mlp_w1:H=0.7999,top10E=0.24,eRank=224.5,q75/q25=16.91 mlp_w2:H=0.8630,top10E=0.13,eRank=316.0,q75/q25=21.86 vo_prod:H=0.6768,top10E=0.22,eRank=130.7,q75/q25=inf train_time:749585ms step_avg:76.49ms +[2025-09-02 18:25:31] [Rank 0] step:9801/10000 train_time:749597ms step_avg:76.48ms +[2025-09-02 18:25:31] [Rank 0] step:9801/10000 train_time:749597ms step_avg:76.48ms +[2025-09-02 18:25:33] [Rank 0] step:9821/10000 train_time:751081ms step_avg:76.48ms +[2025-09-02 18:25:33] [Rank 0] step:9821/10000 train_time:751081ms step_avg:76.48ms +[2025-09-02 18:25:35] [Rank 0] step:9841/10000 train_time:752750ms step_avg:76.49ms +[2025-09-02 18:25:35] [Rank 0] step:9841/10000 train_time:752750ms step_avg:76.49ms +[2025-09-02 18:25:36] [Rank 0] step:9861/10000 train_time:754395ms step_avg:76.50ms +[2025-09-02 18:25:36] [Rank 0] step:9861/10000 train_time:754395ms step_avg:76.50ms +[2025-09-02 18:25:38] [Rank 0] step:9881/10000 train_time:756038ms step_avg:76.51ms +[2025-09-02 18:25:38] [Rank 0] step:9881/10000 train_time:756038ms step_avg:76.51ms +[2025-09-02 18:25:40] [Rank 0] step:9901/10000 train_time:757698ms step_avg:76.53ms +[2025-09-02 18:25:40] [Rank 0] step:9901/10000 train_time:757698ms step_avg:76.53ms +[2025-09-02 18:25:41] [Rank 0] step:9921/10000 train_time:759349ms step_avg:76.54ms +[2025-09-02 18:25:41] [Rank 0] step:9921/10000 train_time:759349ms step_avg:76.54ms +[2025-09-02 18:25:43] [Rank 0] step:9941/10000 train_time:761010ms step_avg:76.55ms +[2025-09-02 18:25:43] [Rank 0] step:9941/10000 train_time:761010ms step_avg:76.55ms +[2025-09-02 18:25:45] [Rank 0] step:9961/10000 train_time:762663ms step_avg:76.56ms +[2025-09-02 18:25:45] [Rank 0] step:9961/10000 train_time:762663ms step_avg:76.56ms +[2025-09-02 18:25:46] [Rank 0] step:9981/10000 train_time:764334ms step_avg:76.58ms +[2025-09-02 18:25:46] [Rank 0] step:9981/10000 train_time:764334ms step_avg:76.58ms +[2025-09-02 18:25:48] [Rank 0] step:10000/10000 train_time:765910ms step_avg:76.59ms +[2025-09-02 18:25:48] [Rank 0] step:10000/10000 train_time:765910ms step_avg:76.59ms +[2025-09-02 18:25:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:25:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:26:00] [Rank 0] PRINT: step:10000/10000 val_loss:3.7646 svd_entropy: attn_qk:H=0.7682,top10E=0.25,eRank=186.4,q75/q25=86.54 attn_vo:H=0.7980,top10E=0.14,eRank=276.5,q75/q25=inf mlp_w1:H=0.8001,top10E=0.24,eRank=224.7,q75/q25=16.90 mlp_w2:H=0.8631,top10E=0.13,eRank=316.3,q75/q25=21.82 vo_prod:H=0.6770,top10E=0.21,eRank=130.9,q75/q25=inf train_time:766164ms step_avg:76.62ms +[2025-09-02 18:26:00] [Rank 0] PRINT: step:10000/10000 val_loss:3.7646 svd_entropy: attn_qk:H=0.7682,top10E=0.25,eRank=186.4,q75/q25=86.54 attn_vo:H=0.7980,top10E=0.14,eRank=276.5,q75/q25=inf mlp_w1:H=0.8001,top10E=0.24,eRank=224.7,q75/q25=16.90 mlp_w2:H=0.8631,top10E=0.13,eRank=316.3,q75/q25=21.82 vo_prod:H=0.6770,top10E=0.21,eRank=130.9,q75/q25=inf train_time:766164ms step_avg:76.62ms +[2025-09-02 18:26:00] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 18:26:00 2025 --- +[2025-09-02 18:26:00] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 18:26:00 2025 --- +[2025-09-02 18:26:00] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14416 MiB +[2025-09-02 18:26:00] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14416 MiB diff --git a/logs_svd_qkvo/mode_14_param_qkvo_seed_50/config.json b/logs_svd_qkvo/mode_14_param_qkvo_seed_50/config.json new file mode 100644 index 0000000000000000000000000000000000000000..86fe8d6d2109cd226d6429fe62f44e549beda948 --- /dev/null +++ b/logs_svd_qkvo/mode_14_param_qkvo_seed_50/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 50, + "optimizer_mode": 14, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "4653ff87-eb47-4bc5-9f95-75428b4404f4", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_14_param_qkvo_seed_50/training_log_4653ff87-eb47-4bc5-9f95-75428b4404f4.txt b/logs_svd_qkvo/mode_14_param_qkvo_seed_50/training_log_4653ff87-eb47-4bc5-9f95-75428b4404f4.txt new file mode 100644 index 0000000000000000000000000000000000000000..4b3e03ff472803f4eb2e4c0a643c334955972ade --- /dev/null +++ b/logs_svd_qkvo/mode_14_param_qkvo_seed_50/training_log_4653ff87-eb47-4bc5-9f95-75428b4404f4.txt @@ -0,0 +1,2984 @@ +[2025-09-03 04:37:51] [Rank 0] PRINT: --- Script Start: Wed Sep 3 04:37:51 2025 --- +[2025-09-03 04:37:51] [Rank 0] PRINT: --- Script Start: Wed Sep 3 04:37:51 2025 --- +[2025-09-03 04:37:51] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=14, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-03 04:37:51] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=14, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-03 04:37:51] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-03 04:37:51] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-03 04:37:51] [Rank 0] PRINT: Using fixed seed: 50 +[2025-09-03 04:37:51] [Rank 0] PRINT: Using fixed seed: 50 +[2025-09-03 04:37:51] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_14_param_qkvo_seed_50 +[2025-09-03 04:37:51] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_14_param_qkvo_seed_50 +[2025-09-03 04:37:51] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-03 04:37:51] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-03 04:37:51] [Rank 0] PRINT: Constructing model... +[2025-09-03 04:37:51] [Rank 0] PRINT: Constructing model... +[2025-09-03 04:37:53] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-03 04:37:53] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-03 04:37:53] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-03 04:37:53] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-03 04:37:53] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-03 04:37:53] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-03 04:37:53] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 14 +[2025-09-03 04:37:53] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 14 +[2025-09-03 04:37:53] [Rank 0] PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-03 04:37:53] [Rank 0] PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-03 04:37:53] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-03 04:37:53] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-03 04:37:53] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-03 04:37:53] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-03 04:37:53] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-03 04:37:53] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-03 04:37:53] [Rank 0] PRINT: Model compilation complete. +[2025-09-03 04:37:53] [Rank 0] PRINT: Model compilation complete. +[2025-09-03 04:37:53] [Rank 0] PRINT: Starting warmup... +[2025-09-03 04:37:53] [Rank 0] PRINT: Starting warmup... +[2025-09-03 04:43:32] [Rank 0] PRINT: Warmup complete. +[2025-09-03 04:43:32] [Rank 0] PRINT: Warmup complete. +[2025-09-03 04:43:33] [Rank 0] PRINT: Starting training... +[2025-09-03 04:43:33] [Rank 0] PRINT: Starting training... +[2025-09-03 04:43:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:43:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:44:42] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9248,top10E=0.05,eRank=465.9,q75/q25=10.25 attn_vo:H=0.4623,top10E=0.02,eRank=232.8,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.6,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-03 04:44:42] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9248,top10E=0.05,eRank=465.9,q75/q25=10.25 attn_vo:H=0.4623,top10E=0.02,eRank=232.8,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.6,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-03 04:44:44] [Rank 0] step:21/10000 train_time:1292ms step_avg:61.52ms +[2025-09-03 04:44:44] [Rank 0] step:21/10000 train_time:1292ms step_avg:61.52ms +[2025-09-03 04:44:45] [Rank 0] step:41/10000 train_time:2687ms step_avg:65.53ms +[2025-09-03 04:44:45] [Rank 0] step:41/10000 train_time:2687ms step_avg:65.53ms +[2025-09-03 04:44:47] [Rank 0] step:61/10000 train_time:4085ms step_avg:66.96ms +[2025-09-03 04:44:47] [Rank 0] step:61/10000 train_time:4085ms step_avg:66.96ms +[2025-09-03 04:44:48] [Rank 0] step:81/10000 train_time:5485ms step_avg:67.72ms +[2025-09-03 04:44:48] [Rank 0] step:81/10000 train_time:5485ms step_avg:67.72ms +[2025-09-03 04:44:50] [Rank 0] step:101/10000 train_time:6886ms step_avg:68.17ms +[2025-09-03 04:44:50] [Rank 0] step:101/10000 train_time:6886ms step_avg:68.17ms +[2025-09-03 04:44:51] [Rank 0] step:121/10000 train_time:8287ms step_avg:68.49ms +[2025-09-03 04:44:51] [Rank 0] step:121/10000 train_time:8287ms step_avg:68.49ms +[2025-09-03 04:44:52] [Rank 0] step:141/10000 train_time:9690ms step_avg:68.72ms +[2025-09-03 04:44:52] [Rank 0] step:141/10000 train_time:9690ms step_avg:68.72ms +[2025-09-03 04:44:54] [Rank 0] step:161/10000 train_time:11094ms step_avg:68.90ms +[2025-09-03 04:44:54] [Rank 0] step:161/10000 train_time:11094ms step_avg:68.90ms +[2025-09-03 04:44:55] [Rank 0] step:181/10000 train_time:12498ms step_avg:69.05ms +[2025-09-03 04:44:55] [Rank 0] step:181/10000 train_time:12498ms step_avg:69.05ms +[2025-09-03 04:44:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:44:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:45:09] [Rank 0] PRINT: step:200/10000 val_loss:6.5494 svd_entropy: attn_qk:H=0.4588,top10E=0.77,eRank=67.9,q75/q25=12.19 attn_vo:H=0.5122,top10E=0.56,eRank=100.6,q75/q25=inf mlp_w1:H=0.4075,top10E=0.77,eRank=16.2,q75/q25=2.69 mlp_w2:H=0.1610,top10E=0.97,eRank=3.7,q75/q25=219.23 vo_prod:H=0.2077,top10E=0.87,eRank=6.7,q75/q25=inf train_time:14042ms step_avg:70.21ms +[2025-09-03 04:45:09] [Rank 0] PRINT: step:200/10000 val_loss:6.5494 svd_entropy: attn_qk:H=0.4588,top10E=0.77,eRank=67.9,q75/q25=12.19 attn_vo:H=0.5122,top10E=0.56,eRank=100.6,q75/q25=inf mlp_w1:H=0.4075,top10E=0.77,eRank=16.2,q75/q25=2.69 mlp_w2:H=0.1610,top10E=0.97,eRank=3.7,q75/q25=219.23 vo_prod:H=0.2077,top10E=0.87,eRank=6.7,q75/q25=inf train_time:14042ms step_avg:70.21ms +[2025-09-03 04:45:09] [Rank 0] step:201/10000 train_time:14054ms step_avg:69.92ms +[2025-09-03 04:45:09] [Rank 0] step:201/10000 train_time:14054ms step_avg:69.92ms +[2025-09-03 04:45:10] [Rank 0] step:221/10000 train_time:15320ms step_avg:69.32ms +[2025-09-03 04:45:10] [Rank 0] step:221/10000 train_time:15320ms step_avg:69.32ms +[2025-09-03 04:45:11] [Rank 0] step:241/10000 train_time:16723ms step_avg:69.39ms +[2025-09-03 04:45:11] [Rank 0] step:241/10000 train_time:16723ms step_avg:69.39ms +[2025-09-03 04:45:13] [Rank 0] step:261/10000 train_time:18128ms step_avg:69.45ms +[2025-09-03 04:45:13] [Rank 0] step:261/10000 train_time:18128ms step_avg:69.45ms +[2025-09-03 04:45:14] [Rank 0] step:281/10000 train_time:19531ms step_avg:69.51ms +[2025-09-03 04:45:14] [Rank 0] step:281/10000 train_time:19531ms step_avg:69.51ms +[2025-09-03 04:45:16] [Rank 0] step:301/10000 train_time:20936ms step_avg:69.56ms +[2025-09-03 04:45:16] [Rank 0] step:301/10000 train_time:20936ms step_avg:69.56ms +[2025-09-03 04:45:17] [Rank 0] step:321/10000 train_time:22342ms step_avg:69.60ms +[2025-09-03 04:45:17] [Rank 0] step:321/10000 train_time:22342ms step_avg:69.60ms +[2025-09-03 04:45:18] [Rank 0] step:341/10000 train_time:23747ms step_avg:69.64ms +[2025-09-03 04:45:18] [Rank 0] step:341/10000 train_time:23747ms step_avg:69.64ms +[2025-09-03 04:45:20] [Rank 0] step:361/10000 train_time:25153ms step_avg:69.67ms +[2025-09-03 04:45:20] [Rank 0] step:361/10000 train_time:25153ms step_avg:69.67ms +[2025-09-03 04:45:21] [Rank 0] step:381/10000 train_time:26557ms step_avg:69.70ms +[2025-09-03 04:45:21] [Rank 0] step:381/10000 train_time:26557ms step_avg:69.70ms +[2025-09-03 04:45:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:45:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:45:35] [Rank 0] PRINT: step:400/10000 val_loss:6.0218 svd_entropy: attn_qk:H=0.5140,top10E=0.68,eRank=75.7,q75/q25=13.26 attn_vo:H=0.5292,top10E=0.54,eRank=85.6,q75/q25=inf mlp_w1:H=0.4476,top10E=0.70,eRank=25.4,q75/q25=3.31 mlp_w2:H=0.5136,top10E=0.64,eRank=31.6,q75/q25=16.27 vo_prod:H=0.3239,top10E=0.81,eRank=13.0,q75/q25=inf train_time:28104ms step_avg:70.26ms +[2025-09-03 04:45:35] [Rank 0] PRINT: step:400/10000 val_loss:6.0218 svd_entropy: attn_qk:H=0.5140,top10E=0.68,eRank=75.7,q75/q25=13.26 attn_vo:H=0.5292,top10E=0.54,eRank=85.6,q75/q25=inf mlp_w1:H=0.4476,top10E=0.70,eRank=25.4,q75/q25=3.31 mlp_w2:H=0.5136,top10E=0.64,eRank=31.6,q75/q25=16.27 vo_prod:H=0.3239,top10E=0.81,eRank=13.0,q75/q25=inf train_time:28104ms step_avg:70.26ms +[2025-09-03 04:45:35] [Rank 0] step:401/10000 train_time:28116ms step_avg:70.11ms +[2025-09-03 04:45:35] [Rank 0] step:401/10000 train_time:28116ms step_avg:70.11ms +[2025-09-03 04:45:36] [Rank 0] step:421/10000 train_time:29390ms step_avg:69.81ms +[2025-09-03 04:45:36] [Rank 0] step:421/10000 train_time:29390ms step_avg:69.81ms +[2025-09-03 04:45:37] [Rank 0] step:441/10000 train_time:30794ms step_avg:69.83ms +[2025-09-03 04:45:37] [Rank 0] step:441/10000 train_time:30794ms step_avg:69.83ms +[2025-09-03 04:45:39] [Rank 0] step:461/10000 train_time:32199ms step_avg:69.85ms +[2025-09-03 04:45:39] [Rank 0] step:461/10000 train_time:32199ms step_avg:69.85ms +[2025-09-03 04:45:40] [Rank 0] step:481/10000 train_time:33605ms step_avg:69.86ms +[2025-09-03 04:45:40] [Rank 0] step:481/10000 train_time:33605ms step_avg:69.86ms +[2025-09-03 04:45:42] [Rank 0] step:501/10000 train_time:35010ms step_avg:69.88ms +[2025-09-03 04:45:42] [Rank 0] step:501/10000 train_time:35010ms step_avg:69.88ms +[2025-09-03 04:45:43] [Rank 0] step:521/10000 train_time:36416ms step_avg:69.90ms +[2025-09-03 04:45:43] [Rank 0] step:521/10000 train_time:36416ms step_avg:69.90ms +[2025-09-03 04:45:45] [Rank 0] step:541/10000 train_time:37823ms step_avg:69.91ms +[2025-09-03 04:45:45] [Rank 0] step:541/10000 train_time:37823ms step_avg:69.91ms +[2025-09-03 04:45:46] [Rank 0] step:561/10000 train_time:39228ms step_avg:69.93ms +[2025-09-03 04:45:46] [Rank 0] step:561/10000 train_time:39228ms step_avg:69.93ms +[2025-09-03 04:45:47] [Rank 0] step:581/10000 train_time:40635ms step_avg:69.94ms +[2025-09-03 04:45:47] [Rank 0] step:581/10000 train_time:40635ms step_avg:69.94ms +[2025-09-03 04:45:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:45:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:46:00] [Rank 0] PRINT: step:600/10000 val_loss:5.7250 svd_entropy: attn_qk:H=0.5501,top10E=0.61,eRank=82.3,q75/q25=14.78 attn_vo:H=0.5563,top10E=0.48,eRank=91.0,q75/q25=inf mlp_w1:H=0.4902,top10E=0.64,eRank=35.7,q75/q25=3.76 mlp_w2:H=0.6142,top10E=0.48,eRank=60.4,q75/q25=10.15 vo_prod:H=0.3864,top10E=0.72,eRank=18.6,q75/q25=inf train_time:42182ms step_avg:70.30ms +[2025-09-03 04:46:00] [Rank 0] PRINT: step:600/10000 val_loss:5.7250 svd_entropy: attn_qk:H=0.5501,top10E=0.61,eRank=82.3,q75/q25=14.78 attn_vo:H=0.5563,top10E=0.48,eRank=91.0,q75/q25=inf mlp_w1:H=0.4902,top10E=0.64,eRank=35.7,q75/q25=3.76 mlp_w2:H=0.6142,top10E=0.48,eRank=60.4,q75/q25=10.15 vo_prod:H=0.3864,top10E=0.72,eRank=18.6,q75/q25=inf train_time:42182ms step_avg:70.30ms +[2025-09-03 04:46:00] [Rank 0] step:601/10000 train_time:42193ms step_avg:70.21ms +[2025-09-03 04:46:00] [Rank 0] step:601/10000 train_time:42193ms step_avg:70.21ms +[2025-09-03 04:46:02] [Rank 0] step:621/10000 train_time:43462ms step_avg:69.99ms +[2025-09-03 04:46:02] [Rank 0] step:621/10000 train_time:43462ms step_avg:69.99ms +[2025-09-03 04:46:03] [Rank 0] step:641/10000 train_time:44868ms step_avg:70.00ms +[2025-09-03 04:46:03] [Rank 0] step:641/10000 train_time:44868ms step_avg:70.00ms +[2025-09-03 04:46:05] [Rank 0] step:661/10000 train_time:46273ms step_avg:70.00ms +[2025-09-03 04:46:05] [Rank 0] step:661/10000 train_time:46273ms step_avg:70.00ms +[2025-09-03 04:46:06] [Rank 0] step:681/10000 train_time:47681ms step_avg:70.02ms +[2025-09-03 04:46:06] [Rank 0] step:681/10000 train_time:47681ms step_avg:70.02ms +[2025-09-03 04:46:07] [Rank 0] step:701/10000 train_time:49087ms step_avg:70.02ms +[2025-09-03 04:46:07] [Rank 0] step:701/10000 train_time:49087ms step_avg:70.02ms +[2025-09-03 04:46:09] [Rank 0] step:721/10000 train_time:50494ms step_avg:70.03ms +[2025-09-03 04:46:09] [Rank 0] step:721/10000 train_time:50494ms step_avg:70.03ms +[2025-09-03 04:46:10] [Rank 0] step:741/10000 train_time:51901ms step_avg:70.04ms +[2025-09-03 04:46:10] [Rank 0] step:741/10000 train_time:51901ms step_avg:70.04ms +[2025-09-03 04:46:12] [Rank 0] step:761/10000 train_time:53320ms step_avg:70.07ms +[2025-09-03 04:46:12] [Rank 0] step:761/10000 train_time:53320ms step_avg:70.07ms +[2025-09-03 04:46:13] [Rank 0] step:781/10000 train_time:54741ms step_avg:70.09ms +[2025-09-03 04:46:13] [Rank 0] step:781/10000 train_time:54741ms step_avg:70.09ms +[2025-09-03 04:46:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:46:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:46:26] [Rank 0] PRINT: step:800/10000 val_loss:5.4963 svd_entropy: attn_qk:H=0.5774,top10E=0.56,eRank=88.1,q75/q25=16.48 attn_vo:H=0.5814,top10E=0.44,eRank=100.1,q75/q25=inf mlp_w1:H=0.5260,top10E=0.60,eRank=45.1,q75/q25=4.13 mlp_w2:H=0.6689,top10E=0.39,eRank=86.5,q75/q25=9.14 vo_prod:H=0.4268,top10E=0.64,eRank=23.9,q75/q25=inf train_time:56305ms step_avg:70.38ms +[2025-09-03 04:46:26] [Rank 0] PRINT: step:800/10000 val_loss:5.4963 svd_entropy: attn_qk:H=0.5774,top10E=0.56,eRank=88.1,q75/q25=16.48 attn_vo:H=0.5814,top10E=0.44,eRank=100.1,q75/q25=inf mlp_w1:H=0.5260,top10E=0.60,eRank=45.1,q75/q25=4.13 mlp_w2:H=0.6689,top10E=0.39,eRank=86.5,q75/q25=9.14 vo_prod:H=0.4268,top10E=0.64,eRank=23.9,q75/q25=inf train_time:56305ms step_avg:70.38ms +[2025-09-03 04:46:26] [Rank 0] step:801/10000 train_time:56317ms step_avg:70.31ms +[2025-09-03 04:46:26] [Rank 0] step:801/10000 train_time:56317ms step_avg:70.31ms +[2025-09-03 04:46:28] [Rank 0] step:821/10000 train_time:57618ms step_avg:70.18ms +[2025-09-03 04:46:28] [Rank 0] step:821/10000 train_time:57618ms step_avg:70.18ms +[2025-09-03 04:46:29] [Rank 0] step:841/10000 train_time:59038ms step_avg:70.20ms +[2025-09-03 04:46:29] [Rank 0] step:841/10000 train_time:59038ms step_avg:70.20ms +[2025-09-03 04:46:31] [Rank 0] step:861/10000 train_time:60457ms step_avg:70.22ms +[2025-09-03 04:46:31] [Rank 0] step:861/10000 train_time:60457ms step_avg:70.22ms +[2025-09-03 04:46:32] [Rank 0] step:881/10000 train_time:61878ms step_avg:70.24ms +[2025-09-03 04:46:32] [Rank 0] step:881/10000 train_time:61878ms step_avg:70.24ms +[2025-09-03 04:46:33] [Rank 0] step:901/10000 train_time:63298ms step_avg:70.25ms +[2025-09-03 04:46:33] [Rank 0] step:901/10000 train_time:63298ms step_avg:70.25ms +[2025-09-03 04:46:35] [Rank 0] step:921/10000 train_time:64718ms step_avg:70.27ms +[2025-09-03 04:46:35] [Rank 0] step:921/10000 train_time:64718ms step_avg:70.27ms +[2025-09-03 04:46:36] [Rank 0] step:941/10000 train_time:66140ms step_avg:70.29ms +[2025-09-03 04:46:36] [Rank 0] step:941/10000 train_time:66140ms step_avg:70.29ms +[2025-09-03 04:46:38] [Rank 0] step:961/10000 train_time:67560ms step_avg:70.30ms +[2025-09-03 04:46:38] [Rank 0] step:961/10000 train_time:67560ms step_avg:70.30ms +[2025-09-03 04:46:39] [Rank 0] step:981/10000 train_time:68982ms step_avg:70.32ms +[2025-09-03 04:46:39] [Rank 0] step:981/10000 train_time:68982ms step_avg:70.32ms +[2025-09-03 04:46:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:46:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:46:52] [Rank 0] PRINT: step:1000/10000 val_loss:5.3359 svd_entropy: attn_qk:H=0.5991,top10E=0.52,eRank=93.5,q75/q25=18.68 attn_vo:H=0.6032,top10E=0.41,eRank=110.5,q75/q25=inf mlp_w1:H=0.5588,top10E=0.56,eRank=54.5,q75/q25=4.51 mlp_w2:H=0.7066,top10E=0.33,eRank=110.8,q75/q25=9.08 vo_prod:H=0.4557,top10E=0.59,eRank=28.8,q75/q25=inf train_time:70546ms step_avg:70.55ms +[2025-09-03 04:46:52] [Rank 0] PRINT: step:1000/10000 val_loss:5.3359 svd_entropy: attn_qk:H=0.5991,top10E=0.52,eRank=93.5,q75/q25=18.68 attn_vo:H=0.6032,top10E=0.41,eRank=110.5,q75/q25=inf mlp_w1:H=0.5588,top10E=0.56,eRank=54.5,q75/q25=4.51 mlp_w2:H=0.7066,top10E=0.33,eRank=110.8,q75/q25=9.08 vo_prod:H=0.4557,top10E=0.59,eRank=28.8,q75/q25=inf train_time:70546ms step_avg:70.55ms +[2025-09-03 04:46:52] [Rank 0] step:1001/10000 train_time:70558ms step_avg:70.49ms +[2025-09-03 04:46:52] [Rank 0] step:1001/10000 train_time:70558ms step_avg:70.49ms +[2025-09-03 04:46:54] [Rank 0] step:1021/10000 train_time:71838ms step_avg:70.36ms +[2025-09-03 04:46:54] [Rank 0] step:1021/10000 train_time:71838ms step_avg:70.36ms +[2025-09-03 04:46:55] [Rank 0] step:1041/10000 train_time:73258ms step_avg:70.37ms +[2025-09-03 04:46:55] [Rank 0] step:1041/10000 train_time:73258ms step_avg:70.37ms +[2025-09-03 04:46:57] [Rank 0] step:1061/10000 train_time:74678ms step_avg:70.39ms +[2025-09-03 04:46:57] [Rank 0] step:1061/10000 train_time:74678ms step_avg:70.39ms +[2025-09-03 04:46:58] [Rank 0] step:1081/10000 train_time:76098ms step_avg:70.40ms +[2025-09-03 04:46:58] [Rank 0] step:1081/10000 train_time:76098ms step_avg:70.40ms +[2025-09-03 04:46:59] [Rank 0] step:1101/10000 train_time:77518ms step_avg:70.41ms +[2025-09-03 04:46:59] [Rank 0] step:1101/10000 train_time:77518ms step_avg:70.41ms +[2025-09-03 04:47:01] [Rank 0] step:1121/10000 train_time:78938ms step_avg:70.42ms +[2025-09-03 04:47:01] [Rank 0] step:1121/10000 train_time:78938ms step_avg:70.42ms +[2025-09-03 04:47:02] [Rank 0] step:1141/10000 train_time:80359ms step_avg:70.43ms +[2025-09-03 04:47:02] [Rank 0] step:1141/10000 train_time:80359ms step_avg:70.43ms +[2025-09-03 04:47:04] [Rank 0] step:1161/10000 train_time:81780ms step_avg:70.44ms +[2025-09-03 04:47:04] [Rank 0] step:1161/10000 train_time:81780ms step_avg:70.44ms +[2025-09-03 04:47:05] [Rank 0] step:1181/10000 train_time:83201ms step_avg:70.45ms +[2025-09-03 04:47:05] [Rank 0] step:1181/10000 train_time:83201ms step_avg:70.45ms +[2025-09-03 04:47:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:47:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:47:18] [Rank 0] PRINT: step:1200/10000 val_loss:5.1860 svd_entropy: attn_qk:H=0.6174,top10E=0.48,eRank=98.9,q75/q25=21.57 attn_vo:H=0.6230,top10E=0.38,eRank=122.0,q75/q25=inf mlp_w1:H=0.5821,top10E=0.53,eRank=62.2,q75/q25=4.93 mlp_w2:H=0.7296,top10E=0.29,eRank=129.1,q75/q25=10.04 vo_prod:H=0.4799,top10E=0.54,eRank=33.8,q75/q25=inf train_time:84763ms step_avg:70.64ms +[2025-09-03 04:47:18] [Rank 0] PRINT: step:1200/10000 val_loss:5.1860 svd_entropy: attn_qk:H=0.6174,top10E=0.48,eRank=98.9,q75/q25=21.57 attn_vo:H=0.6230,top10E=0.38,eRank=122.0,q75/q25=inf mlp_w1:H=0.5821,top10E=0.53,eRank=62.2,q75/q25=4.93 mlp_w2:H=0.7296,top10E=0.29,eRank=129.1,q75/q25=10.04 vo_prod:H=0.4799,top10E=0.54,eRank=33.8,q75/q25=inf train_time:84763ms step_avg:70.64ms +[2025-09-03 04:47:18] [Rank 0] step:1201/10000 train_time:84775ms step_avg:70.59ms +[2025-09-03 04:47:18] [Rank 0] step:1201/10000 train_time:84775ms step_avg:70.59ms +[2025-09-03 04:47:19] [Rank 0] step:1221/10000 train_time:86084ms step_avg:70.50ms +[2025-09-03 04:47:19] [Rank 0] step:1221/10000 train_time:86084ms step_avg:70.50ms +[2025-09-03 04:47:21] [Rank 0] step:1241/10000 train_time:87503ms step_avg:70.51ms +[2025-09-03 04:47:21] [Rank 0] step:1241/10000 train_time:87503ms step_avg:70.51ms +[2025-09-03 04:47:22] [Rank 0] step:1261/10000 train_time:88924ms step_avg:70.52ms +[2025-09-03 04:47:22] [Rank 0] step:1261/10000 train_time:88924ms step_avg:70.52ms +[2025-09-03 04:47:24] [Rank 0] step:1281/10000 train_time:90344ms step_avg:70.53ms +[2025-09-03 04:47:24] [Rank 0] step:1281/10000 train_time:90344ms step_avg:70.53ms +[2025-09-03 04:47:25] [Rank 0] step:1301/10000 train_time:91764ms step_avg:70.53ms +[2025-09-03 04:47:25] [Rank 0] step:1301/10000 train_time:91764ms step_avg:70.53ms +[2025-09-03 04:47:27] [Rank 0] step:1321/10000 train_time:93186ms step_avg:70.54ms +[2025-09-03 04:47:27] [Rank 0] step:1321/10000 train_time:93186ms step_avg:70.54ms +[2025-09-03 04:47:28] [Rank 0] step:1341/10000 train_time:94606ms step_avg:70.55ms +[2025-09-03 04:47:28] [Rank 0] step:1341/10000 train_time:94606ms step_avg:70.55ms +[2025-09-03 04:47:29] [Rank 0] step:1361/10000 train_time:96027ms step_avg:70.56ms +[2025-09-03 04:47:29] [Rank 0] step:1361/10000 train_time:96027ms step_avg:70.56ms +[2025-09-03 04:47:31] [Rank 0] step:1381/10000 train_time:97449ms step_avg:70.56ms +[2025-09-03 04:47:31] [Rank 0] step:1381/10000 train_time:97449ms step_avg:70.56ms +[2025-09-03 04:47:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:47:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:47:44] [Rank 0] PRINT: step:1400/10000 val_loss:5.0503 svd_entropy: attn_qk:H=0.6329,top10E=0.45,eRank=104.2,q75/q25=25.37 attn_vo:H=0.6410,top10E=0.36,eRank=133.8,q75/q25=inf mlp_w1:H=0.6023,top10E=0.50,eRank=69.6,q75/q25=5.38 mlp_w2:H=0.7470,top10E=0.27,eRank=144.7,q75/q25=11.12 vo_prod:H=0.5011,top10E=0.50,eRank=38.9,q75/q25=inf train_time:99012ms step_avg:70.72ms +[2025-09-03 04:47:44] [Rank 0] PRINT: step:1400/10000 val_loss:5.0503 svd_entropy: attn_qk:H=0.6329,top10E=0.45,eRank=104.2,q75/q25=25.37 attn_vo:H=0.6410,top10E=0.36,eRank=133.8,q75/q25=inf mlp_w1:H=0.6023,top10E=0.50,eRank=69.6,q75/q25=5.38 mlp_w2:H=0.7470,top10E=0.27,eRank=144.7,q75/q25=11.12 vo_prod:H=0.5011,top10E=0.50,eRank=38.9,q75/q25=inf train_time:99012ms step_avg:70.72ms +[2025-09-03 04:47:44] [Rank 0] step:1401/10000 train_time:99024ms step_avg:70.68ms +[2025-09-03 04:47:44] [Rank 0] step:1401/10000 train_time:99024ms step_avg:70.68ms +[2025-09-03 04:47:45] [Rank 0] step:1421/10000 train_time:100304ms step_avg:70.59ms +[2025-09-03 04:47:45] [Rank 0] step:1421/10000 train_time:100304ms step_avg:70.59ms +[2025-09-03 04:47:47] [Rank 0] step:1441/10000 train_time:101723ms step_avg:70.59ms +[2025-09-03 04:47:47] [Rank 0] step:1441/10000 train_time:101723ms step_avg:70.59ms +[2025-09-03 04:47:48] [Rank 0] step:1461/10000 train_time:103142ms step_avg:70.60ms +[2025-09-03 04:47:48] [Rank 0] step:1461/10000 train_time:103142ms step_avg:70.60ms +[2025-09-03 04:47:50] [Rank 0] step:1481/10000 train_time:104562ms step_avg:70.60ms +[2025-09-03 04:47:50] [Rank 0] step:1481/10000 train_time:104562ms step_avg:70.60ms +[2025-09-03 04:47:51] [Rank 0] step:1501/10000 train_time:105993ms step_avg:70.61ms +[2025-09-03 04:47:51] [Rank 0] step:1501/10000 train_time:105993ms step_avg:70.61ms +[2025-09-03 04:47:52] [Rank 0] step:1521/10000 train_time:107424ms step_avg:70.63ms +[2025-09-03 04:47:52] [Rank 0] step:1521/10000 train_time:107424ms step_avg:70.63ms +[2025-09-03 04:47:54] [Rank 0] step:1541/10000 train_time:108858ms step_avg:70.64ms +[2025-09-03 04:47:54] [Rank 0] step:1541/10000 train_time:108858ms step_avg:70.64ms +[2025-09-03 04:47:55] [Rank 0] step:1561/10000 train_time:110290ms step_avg:70.65ms +[2025-09-03 04:47:55] [Rank 0] step:1561/10000 train_time:110290ms step_avg:70.65ms +[2025-09-03 04:47:57] [Rank 0] step:1581/10000 train_time:111722ms step_avg:70.67ms +[2025-09-03 04:47:57] [Rank 0] step:1581/10000 train_time:111722ms step_avg:70.67ms +[2025-09-03 04:47:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:47:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:48:10] [Rank 0] PRINT: step:1600/10000 val_loss:4.8968 svd_entropy: attn_qk:H=0.6456,top10E=0.43,eRank=108.8,q75/q25=30.18 attn_vo:H=0.6569,top10E=0.33,eRank=144.9,q75/q25=inf mlp_w1:H=0.6203,top10E=0.48,eRank=77.0,q75/q25=5.91 mlp_w2:H=0.7619,top10E=0.25,eRank=160.0,q75/q25=12.74 vo_prod:H=0.5188,top10E=0.47,eRank=43.6,q75/q25=inf train_time:113298ms step_avg:70.81ms +[2025-09-03 04:48:10] [Rank 0] PRINT: step:1600/10000 val_loss:4.8968 svd_entropy: attn_qk:H=0.6456,top10E=0.43,eRank=108.8,q75/q25=30.18 attn_vo:H=0.6569,top10E=0.33,eRank=144.9,q75/q25=inf mlp_w1:H=0.6203,top10E=0.48,eRank=77.0,q75/q25=5.91 mlp_w2:H=0.7619,top10E=0.25,eRank=160.0,q75/q25=12.74 vo_prod:H=0.5188,top10E=0.47,eRank=43.6,q75/q25=inf train_time:113298ms step_avg:70.81ms +[2025-09-03 04:48:10] [Rank 0] step:1601/10000 train_time:113310ms step_avg:70.77ms +[2025-09-03 04:48:10] [Rank 0] step:1601/10000 train_time:113310ms step_avg:70.77ms +[2025-09-03 04:48:11] [Rank 0] step:1621/10000 train_time:114606ms step_avg:70.70ms +[2025-09-03 04:48:11] [Rank 0] step:1621/10000 train_time:114606ms step_avg:70.70ms +[2025-09-03 04:48:13] [Rank 0] step:1641/10000 train_time:116037ms step_avg:70.71ms +[2025-09-03 04:48:13] [Rank 0] step:1641/10000 train_time:116037ms step_avg:70.71ms +[2025-09-03 04:48:14] [Rank 0] step:1661/10000 train_time:117469ms step_avg:70.72ms +[2025-09-03 04:48:14] [Rank 0] step:1661/10000 train_time:117469ms step_avg:70.72ms +[2025-09-03 04:48:16] [Rank 0] step:1681/10000 train_time:118900ms step_avg:70.73ms +[2025-09-03 04:48:16] [Rank 0] step:1681/10000 train_time:118900ms step_avg:70.73ms +[2025-09-03 04:48:17] [Rank 0] step:1701/10000 train_time:120331ms step_avg:70.74ms +[2025-09-03 04:48:17] [Rank 0] step:1701/10000 train_time:120331ms step_avg:70.74ms +[2025-09-03 04:48:19] [Rank 0] step:1721/10000 train_time:121763ms step_avg:70.75ms +[2025-09-03 04:48:19] [Rank 0] step:1721/10000 train_time:121763ms step_avg:70.75ms +[2025-09-03 04:48:20] [Rank 0] step:1741/10000 train_time:123195ms step_avg:70.76ms +[2025-09-03 04:48:20] [Rank 0] step:1741/10000 train_time:123195ms step_avg:70.76ms +[2025-09-03 04:48:21] [Rank 0] step:1761/10000 train_time:124627ms step_avg:70.77ms +[2025-09-03 04:48:21] [Rank 0] step:1761/10000 train_time:124627ms step_avg:70.77ms +[2025-09-03 04:48:23] [Rank 0] step:1781/10000 train_time:126060ms step_avg:70.78ms +[2025-09-03 04:48:23] [Rank 0] step:1781/10000 train_time:126060ms step_avg:70.78ms +[2025-09-03 04:48:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:48:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:48:36] [Rank 0] PRINT: step:1800/10000 val_loss:4.7696 svd_entropy: attn_qk:H=0.6562,top10E=0.41,eRank=113.0,q75/q25=35.06 attn_vo:H=0.6702,top10E=0.32,eRank=154.4,q75/q25=inf mlp_w1:H=0.6374,top10E=0.46,eRank=84.7,q75/q25=6.50 mlp_w2:H=0.7737,top10E=0.23,eRank=173.3,q75/q25=14.40 vo_prod:H=0.5325,top10E=0.44,eRank=47.9,q75/q25=inf train_time:127637ms step_avg:70.91ms +[2025-09-03 04:48:36] [Rank 0] PRINT: step:1800/10000 val_loss:4.7696 svd_entropy: attn_qk:H=0.6562,top10E=0.41,eRank=113.0,q75/q25=35.06 attn_vo:H=0.6702,top10E=0.32,eRank=154.4,q75/q25=inf mlp_w1:H=0.6374,top10E=0.46,eRank=84.7,q75/q25=6.50 mlp_w2:H=0.7737,top10E=0.23,eRank=173.3,q75/q25=14.40 vo_prod:H=0.5325,top10E=0.44,eRank=47.9,q75/q25=inf train_time:127637ms step_avg:70.91ms +[2025-09-03 04:48:36] [Rank 0] step:1801/10000 train_time:127649ms step_avg:70.88ms +[2025-09-03 04:48:36] [Rank 0] step:1801/10000 train_time:127649ms step_avg:70.88ms +[2025-09-03 04:48:38] [Rank 0] step:1821/10000 train_time:128941ms step_avg:70.81ms +[2025-09-03 04:48:38] [Rank 0] step:1821/10000 train_time:128941ms step_avg:70.81ms +[2025-09-03 04:48:39] [Rank 0] step:1841/10000 train_time:130370ms step_avg:70.81ms +[2025-09-03 04:48:39] [Rank 0] step:1841/10000 train_time:130370ms step_avg:70.81ms +[2025-09-03 04:48:41] [Rank 0] step:1861/10000 train_time:131800ms step_avg:70.82ms +[2025-09-03 04:48:41] [Rank 0] step:1861/10000 train_time:131800ms step_avg:70.82ms +[2025-09-03 04:48:42] [Rank 0] step:1881/10000 train_time:133231ms step_avg:70.83ms +[2025-09-03 04:48:42] [Rank 0] step:1881/10000 train_time:133231ms step_avg:70.83ms +[2025-09-03 04:48:43] [Rank 0] step:1901/10000 train_time:134662ms step_avg:70.84ms +[2025-09-03 04:48:43] [Rank 0] step:1901/10000 train_time:134662ms step_avg:70.84ms +[2025-09-03 04:48:45] [Rank 0] step:1921/10000 train_time:136097ms step_avg:70.85ms +[2025-09-03 04:48:45] [Rank 0] step:1921/10000 train_time:136097ms step_avg:70.85ms +[2025-09-03 04:48:46] [Rank 0] step:1941/10000 train_time:137529ms step_avg:70.85ms +[2025-09-03 04:48:46] [Rank 0] step:1941/10000 train_time:137529ms step_avg:70.85ms +[2025-09-03 04:48:48] [Rank 0] step:1961/10000 train_time:138960ms step_avg:70.86ms +[2025-09-03 04:48:48] [Rank 0] step:1961/10000 train_time:138960ms step_avg:70.86ms +[2025-09-03 04:48:49] [Rank 0] step:1981/10000 train_time:140393ms step_avg:70.87ms +[2025-09-03 04:48:49] [Rank 0] step:1981/10000 train_time:140393ms step_avg:70.87ms +[2025-09-03 04:48:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:48:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:49:02] [Rank 0] PRINT: step:2000/10000 val_loss:4.6855 svd_entropy: attn_qk:H=0.6652,top10E=0.40,eRank=116.8,q75/q25=40.28 attn_vo:H=0.6816,top10E=0.30,eRank=162.7,q75/q25=inf mlp_w1:H=0.6524,top10E=0.44,eRank=91.9,q75/q25=7.12 mlp_w2:H=0.7833,top10E=0.22,eRank=185.0,q75/q25=15.88 vo_prod:H=0.5448,top10E=0.42,eRank=52.1,q75/q25=inf train_time:141969ms step_avg:70.98ms +[2025-09-03 04:49:02] [Rank 0] PRINT: step:2000/10000 val_loss:4.6855 svd_entropy: attn_qk:H=0.6652,top10E=0.40,eRank=116.8,q75/q25=40.28 attn_vo:H=0.6816,top10E=0.30,eRank=162.7,q75/q25=inf mlp_w1:H=0.6524,top10E=0.44,eRank=91.9,q75/q25=7.12 mlp_w2:H=0.7833,top10E=0.22,eRank=185.0,q75/q25=15.88 vo_prod:H=0.5448,top10E=0.42,eRank=52.1,q75/q25=inf train_time:141969ms step_avg:70.98ms +[2025-09-03 04:49:02] [Rank 0] step:2001/10000 train_time:141980ms step_avg:70.95ms +[2025-09-03 04:49:02] [Rank 0] step:2001/10000 train_time:141980ms step_avg:70.95ms +[2025-09-03 04:49:04] [Rank 0] step:2021/10000 train_time:143293ms step_avg:70.90ms +[2025-09-03 04:49:04] [Rank 0] step:2021/10000 train_time:143293ms step_avg:70.90ms +[2025-09-03 04:49:05] [Rank 0] step:2041/10000 train_time:144847ms step_avg:70.97ms +[2025-09-03 04:49:05] [Rank 0] step:2041/10000 train_time:144847ms step_avg:70.97ms +[2025-09-03 04:49:07] [Rank 0] step:2061/10000 train_time:146277ms step_avg:70.97ms +[2025-09-03 04:49:07] [Rank 0] step:2061/10000 train_time:146277ms step_avg:70.97ms +[2025-09-03 04:49:08] [Rank 0] step:2081/10000 train_time:147708ms step_avg:70.98ms +[2025-09-03 04:49:08] [Rank 0] step:2081/10000 train_time:147708ms step_avg:70.98ms +[2025-09-03 04:49:10] [Rank 0] step:2101/10000 train_time:149139ms step_avg:70.98ms +[2025-09-03 04:49:10] [Rank 0] step:2101/10000 train_time:149139ms step_avg:70.98ms +[2025-09-03 04:49:11] [Rank 0] step:2121/10000 train_time:150571ms step_avg:70.99ms +[2025-09-03 04:49:11] [Rank 0] step:2121/10000 train_time:150571ms step_avg:70.99ms +[2025-09-03 04:49:13] [Rank 0] step:2141/10000 train_time:152003ms step_avg:71.00ms +[2025-09-03 04:49:13] [Rank 0] step:2141/10000 train_time:152003ms step_avg:71.00ms +[2025-09-03 04:49:14] [Rank 0] step:2161/10000 train_time:153434ms step_avg:71.00ms +[2025-09-03 04:49:14] [Rank 0] step:2161/10000 train_time:153434ms step_avg:71.00ms +[2025-09-03 04:49:16] [Rank 0] step:2181/10000 train_time:154881ms step_avg:71.01ms +[2025-09-03 04:49:16] [Rank 0] step:2181/10000 train_time:154881ms step_avg:71.01ms +[2025-09-03 04:49:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:49:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:49:29] [Rank 0] PRINT: step:2200/10000 val_loss:4.5997 svd_entropy: attn_qk:H=0.6727,top10E=0.39,eRank=120.3,q75/q25=44.81 attn_vo:H=0.6910,top10E=0.29,eRank=169.8,q75/q25=inf mlp_w1:H=0.6655,top10E=0.42,eRank=99.0,q75/q25=7.70 mlp_w2:H=0.7913,top10E=0.21,eRank=195.4,q75/q25=17.25 vo_prod:H=0.5546,top10E=0.40,eRank=55.9,q75/q25=inf train_time:156457ms step_avg:71.12ms +[2025-09-03 04:49:29] [Rank 0] PRINT: step:2200/10000 val_loss:4.5997 svd_entropy: attn_qk:H=0.6727,top10E=0.39,eRank=120.3,q75/q25=44.81 attn_vo:H=0.6910,top10E=0.29,eRank=169.8,q75/q25=inf mlp_w1:H=0.6655,top10E=0.42,eRank=99.0,q75/q25=7.70 mlp_w2:H=0.7913,top10E=0.21,eRank=195.4,q75/q25=17.25 vo_prod:H=0.5546,top10E=0.40,eRank=55.9,q75/q25=inf train_time:156457ms step_avg:71.12ms +[2025-09-03 04:49:29] [Rank 0] step:2201/10000 train_time:156469ms step_avg:71.09ms +[2025-09-03 04:49:29] [Rank 0] step:2201/10000 train_time:156469ms step_avg:71.09ms +[2025-09-03 04:49:30] [Rank 0] step:2221/10000 train_time:157781ms step_avg:71.04ms +[2025-09-03 04:49:30] [Rank 0] step:2221/10000 train_time:157781ms step_avg:71.04ms +[2025-09-03 04:49:32] [Rank 0] step:2241/10000 train_time:159244ms step_avg:71.06ms +[2025-09-03 04:49:32] [Rank 0] step:2241/10000 train_time:159244ms step_avg:71.06ms +[2025-09-03 04:49:33] [Rank 0] step:2261/10000 train_time:160718ms step_avg:71.08ms +[2025-09-03 04:49:33] [Rank 0] step:2261/10000 train_time:160718ms step_avg:71.08ms +[2025-09-03 04:49:35] [Rank 0] step:2281/10000 train_time:162192ms step_avg:71.11ms +[2025-09-03 04:49:35] [Rank 0] step:2281/10000 train_time:162192ms step_avg:71.11ms +[2025-09-03 04:49:36] [Rank 0] step:2301/10000 train_time:163666ms step_avg:71.13ms +[2025-09-03 04:49:36] [Rank 0] step:2301/10000 train_time:163666ms step_avg:71.13ms +[2025-09-03 04:49:38] [Rank 0] step:2321/10000 train_time:165139ms step_avg:71.15ms +[2025-09-03 04:49:38] [Rank 0] step:2321/10000 train_time:165139ms step_avg:71.15ms +[2025-09-03 04:49:39] [Rank 0] step:2341/10000 train_time:166613ms step_avg:71.17ms +[2025-09-03 04:49:39] [Rank 0] step:2341/10000 train_time:166613ms step_avg:71.17ms +[2025-09-03 04:49:41] [Rank 0] step:2361/10000 train_time:168087ms step_avg:71.19ms +[2025-09-03 04:49:41] [Rank 0] step:2361/10000 train_time:168087ms step_avg:71.19ms +[2025-09-03 04:49:42] [Rank 0] step:2381/10000 train_time:169561ms step_avg:71.21ms +[2025-09-03 04:49:42] [Rank 0] step:2381/10000 train_time:169561ms step_avg:71.21ms +[2025-09-03 04:49:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:49:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:49:56] [Rank 0] PRINT: step:2400/10000 val_loss:4.5123 svd_entropy: attn_qk:H=0.6789,top10E=0.38,eRank=123.1,q75/q25=49.43 attn_vo:H=0.6995,top10E=0.27,eRank=176.2,q75/q25=inf mlp_w1:H=0.6773,top10E=0.41,eRank=105.8,q75/q25=8.31 mlp_w2:H=0.7986,top10E=0.20,eRank=205.3,q75/q25=18.29 vo_prod:H=0.5638,top10E=0.38,eRank=59.6,q75/q25=inf train_time:171184ms step_avg:71.33ms +[2025-09-03 04:49:56] [Rank 0] PRINT: step:2400/10000 val_loss:4.5123 svd_entropy: attn_qk:H=0.6789,top10E=0.38,eRank=123.1,q75/q25=49.43 attn_vo:H=0.6995,top10E=0.27,eRank=176.2,q75/q25=inf mlp_w1:H=0.6773,top10E=0.41,eRank=105.8,q75/q25=8.31 mlp_w2:H=0.7986,top10E=0.20,eRank=205.3,q75/q25=18.29 vo_prod:H=0.5638,top10E=0.38,eRank=59.6,q75/q25=inf train_time:171184ms step_avg:71.33ms +[2025-09-03 04:49:56] [Rank 0] step:2401/10000 train_time:171196ms step_avg:71.30ms +[2025-09-03 04:49:56] [Rank 0] step:2401/10000 train_time:171196ms step_avg:71.30ms +[2025-09-03 04:49:57] [Rank 0] step:2421/10000 train_time:172536ms step_avg:71.27ms +[2025-09-03 04:49:57] [Rank 0] step:2421/10000 train_time:172536ms step_avg:71.27ms +[2025-09-03 04:49:59] [Rank 0] step:2441/10000 train_time:174010ms step_avg:71.29ms +[2025-09-03 04:49:59] [Rank 0] step:2441/10000 train_time:174010ms step_avg:71.29ms +[2025-09-03 04:50:00] [Rank 0] step:2461/10000 train_time:175484ms step_avg:71.31ms +[2025-09-03 04:50:00] [Rank 0] step:2461/10000 train_time:175484ms step_avg:71.31ms +[2025-09-03 04:50:02] [Rank 0] step:2481/10000 train_time:176958ms step_avg:71.33ms +[2025-09-03 04:50:02] [Rank 0] step:2481/10000 train_time:176958ms step_avg:71.33ms +[2025-09-03 04:50:03] [Rank 0] step:2501/10000 train_time:178433ms step_avg:71.34ms +[2025-09-03 04:50:03] [Rank 0] step:2501/10000 train_time:178433ms step_avg:71.34ms +[2025-09-03 04:50:04] [Rank 0] step:2521/10000 train_time:179913ms step_avg:71.37ms +[2025-09-03 04:50:04] [Rank 0] step:2521/10000 train_time:179913ms step_avg:71.37ms +[2025-09-03 04:50:06] [Rank 0] step:2541/10000 train_time:181387ms step_avg:71.38ms +[2025-09-03 04:50:06] [Rank 0] step:2541/10000 train_time:181387ms step_avg:71.38ms +[2025-09-03 04:50:07] [Rank 0] step:2561/10000 train_time:182862ms step_avg:71.40ms +[2025-09-03 04:50:07] [Rank 0] step:2561/10000 train_time:182862ms step_avg:71.40ms +[2025-09-03 04:50:09] [Rank 0] step:2581/10000 train_time:184340ms step_avg:71.42ms +[2025-09-03 04:50:09] [Rank 0] step:2581/10000 train_time:184340ms step_avg:71.42ms +[2025-09-03 04:50:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:50:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:50:22] [Rank 0] PRINT: step:2600/10000 val_loss:4.4479 svd_entropy: attn_qk:H=0.6851,top10E=0.37,eRank=126.3,q75/q25=53.59 attn_vo:H=0.7071,top10E=0.26,eRank=182.1,q75/q25=inf mlp_w1:H=0.6878,top10E=0.39,eRank=112.5,q75/q25=8.83 mlp_w2:H=0.8049,top10E=0.19,eRank=214.4,q75/q25=19.20 vo_prod:H=0.5720,top10E=0.37,eRank=63.1,q75/q25=inf train_time:185964ms step_avg:71.52ms +[2025-09-03 04:50:22] [Rank 0] PRINT: step:2600/10000 val_loss:4.4479 svd_entropy: attn_qk:H=0.6851,top10E=0.37,eRank=126.3,q75/q25=53.59 attn_vo:H=0.7071,top10E=0.26,eRank=182.1,q75/q25=inf mlp_w1:H=0.6878,top10E=0.39,eRank=112.5,q75/q25=8.83 mlp_w2:H=0.8049,top10E=0.19,eRank=214.4,q75/q25=19.20 vo_prod:H=0.5720,top10E=0.37,eRank=63.1,q75/q25=inf train_time:185964ms step_avg:71.52ms +[2025-09-03 04:50:22] [Rank 0] step:2601/10000 train_time:185975ms step_avg:71.50ms +[2025-09-03 04:50:22] [Rank 0] step:2601/10000 train_time:185975ms step_avg:71.50ms +[2025-09-03 04:50:24] [Rank 0] step:2621/10000 train_time:187321ms step_avg:71.47ms +[2025-09-03 04:50:24] [Rank 0] step:2621/10000 train_time:187321ms step_avg:71.47ms +[2025-09-03 04:50:25] [Rank 0] step:2641/10000 train_time:188795ms step_avg:71.49ms +[2025-09-03 04:50:25] [Rank 0] step:2641/10000 train_time:188795ms step_avg:71.49ms +[2025-09-03 04:50:27] [Rank 0] step:2661/10000 train_time:190269ms step_avg:71.50ms +[2025-09-03 04:50:27] [Rank 0] step:2661/10000 train_time:190269ms step_avg:71.50ms +[2025-09-03 04:50:28] [Rank 0] step:2681/10000 train_time:191743ms step_avg:71.52ms +[2025-09-03 04:50:28] [Rank 0] step:2681/10000 train_time:191743ms step_avg:71.52ms +[2025-09-03 04:50:30] [Rank 0] step:2701/10000 train_time:193217ms step_avg:71.54ms +[2025-09-03 04:50:30] [Rank 0] step:2701/10000 train_time:193217ms step_avg:71.54ms +[2025-09-03 04:50:31] [Rank 0] step:2721/10000 train_time:194692ms step_avg:71.55ms +[2025-09-03 04:50:31] [Rank 0] step:2721/10000 train_time:194692ms step_avg:71.55ms +[2025-09-03 04:50:32] [Rank 0] step:2741/10000 train_time:196167ms step_avg:71.57ms +[2025-09-03 04:50:32] [Rank 0] step:2741/10000 train_time:196167ms step_avg:71.57ms +[2025-09-03 04:50:34] [Rank 0] step:2761/10000 train_time:197643ms step_avg:71.58ms +[2025-09-03 04:50:34] [Rank 0] step:2761/10000 train_time:197643ms step_avg:71.58ms +[2025-09-03 04:50:35] [Rank 0] step:2781/10000 train_time:199118ms step_avg:71.60ms +[2025-09-03 04:50:35] [Rank 0] step:2781/10000 train_time:199118ms step_avg:71.60ms +[2025-09-03 04:50:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:50:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:50:49] [Rank 0] PRINT: step:2800/10000 val_loss:4.4047 svd_entropy: attn_qk:H=0.6908,top10E=0.36,eRank=129.2,q75/q25=57.77 attn_vo:H=0.7139,top10E=0.25,eRank=187.6,q75/q25=inf mlp_w1:H=0.6974,top10E=0.38,eRank=119.1,q75/q25=9.38 mlp_w2:H=0.8104,top10E=0.18,eRank=222.6,q75/q25=20.03 vo_prod:H=0.5797,top10E=0.35,eRank=66.5,q75/q25=inf train_time:200741ms step_avg:71.69ms +[2025-09-03 04:50:49] [Rank 0] PRINT: step:2800/10000 val_loss:4.4047 svd_entropy: attn_qk:H=0.6908,top10E=0.36,eRank=129.2,q75/q25=57.77 attn_vo:H=0.7139,top10E=0.25,eRank=187.6,q75/q25=inf mlp_w1:H=0.6974,top10E=0.38,eRank=119.1,q75/q25=9.38 mlp_w2:H=0.8104,top10E=0.18,eRank=222.6,q75/q25=20.03 vo_prod:H=0.5797,top10E=0.35,eRank=66.5,q75/q25=inf train_time:200741ms step_avg:71.69ms +[2025-09-03 04:50:49] [Rank 0] step:2801/10000 train_time:200753ms step_avg:71.67ms +[2025-09-03 04:50:49] [Rank 0] step:2801/10000 train_time:200753ms step_avg:71.67ms +[2025-09-03 04:50:50] [Rank 0] step:2821/10000 train_time:202090ms step_avg:71.64ms +[2025-09-03 04:50:50] [Rank 0] step:2821/10000 train_time:202090ms step_avg:71.64ms +[2025-09-03 04:50:52] [Rank 0] step:2841/10000 train_time:203563ms step_avg:71.65ms +[2025-09-03 04:50:52] [Rank 0] step:2841/10000 train_time:203563ms step_avg:71.65ms +[2025-09-03 04:50:53] [Rank 0] step:2861/10000 train_time:205036ms step_avg:71.67ms +[2025-09-03 04:50:53] [Rank 0] step:2861/10000 train_time:205036ms step_avg:71.67ms +[2025-09-03 04:50:55] [Rank 0] step:2881/10000 train_time:206509ms step_avg:71.68ms +[2025-09-03 04:50:55] [Rank 0] step:2881/10000 train_time:206509ms step_avg:71.68ms +[2025-09-03 04:50:56] [Rank 0] step:2901/10000 train_time:207982ms step_avg:71.69ms +[2025-09-03 04:50:56] [Rank 0] step:2901/10000 train_time:207982ms step_avg:71.69ms +[2025-09-03 04:50:58] [Rank 0] step:2921/10000 train_time:209456ms step_avg:71.71ms +[2025-09-03 04:50:58] [Rank 0] step:2921/10000 train_time:209456ms step_avg:71.71ms +[2025-09-03 04:50:59] [Rank 0] step:2941/10000 train_time:210930ms step_avg:71.72ms +[2025-09-03 04:50:59] [Rank 0] step:2941/10000 train_time:210930ms step_avg:71.72ms +[2025-09-03 04:51:01] [Rank 0] step:2961/10000 train_time:212404ms step_avg:71.73ms +[2025-09-03 04:51:01] [Rank 0] step:2961/10000 train_time:212404ms step_avg:71.73ms +[2025-09-03 04:51:02] [Rank 0] step:2981/10000 train_time:213884ms step_avg:71.75ms +[2025-09-03 04:51:02] [Rank 0] step:2981/10000 train_time:213884ms step_avg:71.75ms +[2025-09-03 04:51:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:51:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:51:16] [Rank 0] PRINT: step:3000/10000 val_loss:4.3586 svd_entropy: attn_qk:H=0.6958,top10E=0.35,eRank=131.9,q75/q25=61.17 attn_vo:H=0.7200,top10E=0.24,eRank=192.6,q75/q25=inf mlp_w1:H=0.7058,top10E=0.37,eRank=125.2,q75/q25=9.88 mlp_w2:H=0.8152,top10E=0.18,eRank=230.1,q75/q25=20.83 vo_prod:H=0.5865,top10E=0.34,eRank=69.7,q75/q25=inf train_time:215517ms step_avg:71.84ms +[2025-09-03 04:51:16] [Rank 0] PRINT: step:3000/10000 val_loss:4.3586 svd_entropy: attn_qk:H=0.6958,top10E=0.35,eRank=131.9,q75/q25=61.17 attn_vo:H=0.7200,top10E=0.24,eRank=192.6,q75/q25=inf mlp_w1:H=0.7058,top10E=0.37,eRank=125.2,q75/q25=9.88 mlp_w2:H=0.8152,top10E=0.18,eRank=230.1,q75/q25=20.83 vo_prod:H=0.5865,top10E=0.34,eRank=69.7,q75/q25=inf train_time:215517ms step_avg:71.84ms +[2025-09-03 04:51:16] [Rank 0] step:3001/10000 train_time:215529ms step_avg:71.82ms +[2025-09-03 04:51:16] [Rank 0] step:3001/10000 train_time:215529ms step_avg:71.82ms +[2025-09-03 04:51:17] [Rank 0] step:3021/10000 train_time:216883ms step_avg:71.79ms +[2025-09-03 04:51:17] [Rank 0] step:3021/10000 train_time:216883ms step_avg:71.79ms +[2025-09-03 04:51:19] [Rank 0] step:3041/10000 train_time:218369ms step_avg:71.81ms +[2025-09-03 04:51:19] [Rank 0] step:3041/10000 train_time:218369ms step_avg:71.81ms +[2025-09-03 04:51:20] [Rank 0] step:3061/10000 train_time:219852ms step_avg:71.82ms +[2025-09-03 04:51:20] [Rank 0] step:3061/10000 train_time:219852ms step_avg:71.82ms +[2025-09-03 04:51:22] [Rank 0] step:3081/10000 train_time:221385ms step_avg:71.85ms +[2025-09-03 04:51:22] [Rank 0] step:3081/10000 train_time:221385ms step_avg:71.85ms +[2025-09-03 04:51:23] [Rank 0] step:3101/10000 train_time:222867ms step_avg:71.87ms +[2025-09-03 04:51:23] [Rank 0] step:3101/10000 train_time:222867ms step_avg:71.87ms +[2025-09-03 04:51:25] [Rank 0] step:3121/10000 train_time:224349ms step_avg:71.88ms +[2025-09-03 04:51:25] [Rank 0] step:3121/10000 train_time:224349ms step_avg:71.88ms +[2025-09-03 04:51:26] [Rank 0] step:3141/10000 train_time:225831ms step_avg:71.90ms +[2025-09-03 04:51:26] [Rank 0] step:3141/10000 train_time:225831ms step_avg:71.90ms +[2025-09-03 04:51:28] [Rank 0] step:3161/10000 train_time:227315ms step_avg:71.91ms +[2025-09-03 04:51:28] [Rank 0] step:3161/10000 train_time:227315ms step_avg:71.91ms +[2025-09-03 04:51:29] [Rank 0] step:3181/10000 train_time:228796ms step_avg:71.93ms +[2025-09-03 04:51:29] [Rank 0] step:3181/10000 train_time:228796ms step_avg:71.93ms +[2025-09-03 04:51:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:51:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:51:42] [Rank 0] PRINT: step:3200/10000 val_loss:4.3166 svd_entropy: attn_qk:H=0.7002,top10E=0.34,eRank=134.4,q75/q25=64.64 attn_vo:H=0.7253,top10E=0.24,eRank=197.2,q75/q25=inf mlp_w1:H=0.7137,top10E=0.36,eRank=131.2,q75/q25=10.42 mlp_w2:H=0.8193,top10E=0.17,eRank=236.7,q75/q25=21.66 vo_prod:H=0.5924,top10E=0.33,eRank=72.7,q75/q25=inf train_time:230428ms step_avg:72.01ms +[2025-09-03 04:51:42] [Rank 0] PRINT: step:3200/10000 val_loss:4.3166 svd_entropy: attn_qk:H=0.7002,top10E=0.34,eRank=134.4,q75/q25=64.64 attn_vo:H=0.7253,top10E=0.24,eRank=197.2,q75/q25=inf mlp_w1:H=0.7137,top10E=0.36,eRank=131.2,q75/q25=10.42 mlp_w2:H=0.8193,top10E=0.17,eRank=236.7,q75/q25=21.66 vo_prod:H=0.5924,top10E=0.33,eRank=72.7,q75/q25=inf train_time:230428ms step_avg:72.01ms +[2025-09-03 04:51:43] [Rank 0] step:3201/10000 train_time:230440ms step_avg:71.99ms +[2025-09-03 04:51:43] [Rank 0] step:3201/10000 train_time:230440ms step_avg:71.99ms +[2025-09-03 04:51:44] [Rank 0] step:3221/10000 train_time:231786ms step_avg:71.96ms +[2025-09-03 04:51:44] [Rank 0] step:3221/10000 train_time:231786ms step_avg:71.96ms +[2025-09-03 04:51:46] [Rank 0] step:3241/10000 train_time:233268ms step_avg:71.97ms +[2025-09-03 04:51:46] [Rank 0] step:3241/10000 train_time:233268ms step_avg:71.97ms +[2025-09-03 04:51:47] [Rank 0] step:3261/10000 train_time:234750ms step_avg:71.99ms +[2025-09-03 04:51:47] [Rank 0] step:3261/10000 train_time:234750ms step_avg:71.99ms +[2025-09-03 04:51:49] [Rank 0] step:3281/10000 train_time:236233ms step_avg:72.00ms +[2025-09-03 04:51:49] [Rank 0] step:3281/10000 train_time:236233ms step_avg:72.00ms +[2025-09-03 04:51:50] [Rank 0] step:3301/10000 train_time:237715ms step_avg:72.01ms +[2025-09-03 04:51:50] [Rank 0] step:3301/10000 train_time:237715ms step_avg:72.01ms +[2025-09-03 04:51:51] [Rank 0] step:3321/10000 train_time:239197ms step_avg:72.03ms +[2025-09-03 04:51:51] [Rank 0] step:3321/10000 train_time:239197ms step_avg:72.03ms +[2025-09-03 04:51:53] [Rank 0] step:3341/10000 train_time:240679ms step_avg:72.04ms +[2025-09-03 04:51:53] [Rank 0] step:3341/10000 train_time:240679ms step_avg:72.04ms +[2025-09-03 04:51:54] [Rank 0] step:3361/10000 train_time:242162ms step_avg:72.05ms +[2025-09-03 04:51:54] [Rank 0] step:3361/10000 train_time:242162ms step_avg:72.05ms +[2025-09-03 04:51:56] [Rank 0] step:3381/10000 train_time:243646ms step_avg:72.06ms +[2025-09-03 04:51:56] [Rank 0] step:3381/10000 train_time:243646ms step_avg:72.06ms +[2025-09-03 04:51:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:51:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:52:09] [Rank 0] PRINT: step:3400/10000 val_loss:4.2732 svd_entropy: attn_qk:H=0.7046,top10E=0.34,eRank=136.9,q75/q25=67.83 attn_vo:H=0.7305,top10E=0.23,eRank=201.8,q75/q25=inf mlp_w1:H=0.7209,top10E=0.35,eRank=137.1,q75/q25=10.98 mlp_w2:H=0.8229,top10E=0.17,eRank=242.6,q75/q25=22.58 vo_prod:H=0.5986,top10E=0.32,eRank=75.9,q75/q25=inf train_time:245279ms step_avg:72.14ms +[2025-09-03 04:52:09] [Rank 0] PRINT: step:3400/10000 val_loss:4.2732 svd_entropy: attn_qk:H=0.7046,top10E=0.34,eRank=136.9,q75/q25=67.83 attn_vo:H=0.7305,top10E=0.23,eRank=201.8,q75/q25=inf mlp_w1:H=0.7209,top10E=0.35,eRank=137.1,q75/q25=10.98 mlp_w2:H=0.8229,top10E=0.17,eRank=242.6,q75/q25=22.58 vo_prod:H=0.5986,top10E=0.32,eRank=75.9,q75/q25=inf train_time:245279ms step_avg:72.14ms +[2025-09-03 04:52:09] [Rank 0] step:3401/10000 train_time:245291ms step_avg:72.12ms +[2025-09-03 04:52:09] [Rank 0] step:3401/10000 train_time:245291ms step_avg:72.12ms +[2025-09-03 04:52:11] [Rank 0] step:3421/10000 train_time:246635ms step_avg:72.09ms +[2025-09-03 04:52:11] [Rank 0] step:3421/10000 train_time:246635ms step_avg:72.09ms +[2025-09-03 04:52:12] [Rank 0] step:3441/10000 train_time:248117ms step_avg:72.11ms +[2025-09-03 04:52:12] [Rank 0] step:3441/10000 train_time:248117ms step_avg:72.11ms +[2025-09-03 04:52:14] [Rank 0] step:3461/10000 train_time:249600ms step_avg:72.12ms +[2025-09-03 04:52:14] [Rank 0] step:3461/10000 train_time:249600ms step_avg:72.12ms +[2025-09-03 04:52:15] [Rank 0] step:3481/10000 train_time:251081ms step_avg:72.13ms +[2025-09-03 04:52:15] [Rank 0] step:3481/10000 train_time:251081ms step_avg:72.13ms +[2025-09-03 04:52:17] [Rank 0] step:3501/10000 train_time:252565ms step_avg:72.14ms +[2025-09-03 04:52:17] [Rank 0] step:3501/10000 train_time:252565ms step_avg:72.14ms +[2025-09-03 04:52:18] [Rank 0] step:3521/10000 train_time:254049ms step_avg:72.15ms +[2025-09-03 04:52:18] [Rank 0] step:3521/10000 train_time:254049ms step_avg:72.15ms +[2025-09-03 04:52:20] [Rank 0] step:3541/10000 train_time:255533ms step_avg:72.16ms +[2025-09-03 04:52:20] [Rank 0] step:3541/10000 train_time:255533ms step_avg:72.16ms +[2025-09-03 04:52:21] [Rank 0] step:3561/10000 train_time:257018ms step_avg:72.18ms +[2025-09-03 04:52:21] [Rank 0] step:3561/10000 train_time:257018ms step_avg:72.18ms +[2025-09-03 04:52:23] [Rank 0] step:3581/10000 train_time:258501ms step_avg:72.19ms +[2025-09-03 04:52:23] [Rank 0] step:3581/10000 train_time:258501ms step_avg:72.19ms +[2025-09-03 04:52:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:52:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:52:36] [Rank 0] PRINT: step:3600/10000 val_loss:4.2547 svd_entropy: attn_qk:H=0.7086,top10E=0.33,eRank=139.3,q75/q25=70.49 attn_vo:H=0.7351,top10E=0.22,eRank=206.0,q75/q25=inf mlp_w1:H=0.7280,top10E=0.34,eRank=143.0,q75/q25=11.38 mlp_w2:H=0.8267,top10E=0.16,eRank=248.9,q75/q25=22.96 vo_prod:H=0.6038,top10E=0.31,eRank=78.8,q75/q25=inf train_time:260134ms step_avg:72.26ms +[2025-09-03 04:52:36] [Rank 0] PRINT: step:3600/10000 val_loss:4.2547 svd_entropy: attn_qk:H=0.7086,top10E=0.33,eRank=139.3,q75/q25=70.49 attn_vo:H=0.7351,top10E=0.22,eRank=206.0,q75/q25=inf mlp_w1:H=0.7280,top10E=0.34,eRank=143.0,q75/q25=11.38 mlp_w2:H=0.8267,top10E=0.16,eRank=248.9,q75/q25=22.96 vo_prod:H=0.6038,top10E=0.31,eRank=78.8,q75/q25=inf train_time:260134ms step_avg:72.26ms +[2025-09-03 04:52:36] [Rank 0] step:3601/10000 train_time:260146ms step_avg:72.24ms +[2025-09-03 04:52:36] [Rank 0] step:3601/10000 train_time:260146ms step_avg:72.24ms +[2025-09-03 04:52:38] [Rank 0] step:3621/10000 train_time:261490ms step_avg:72.21ms +[2025-09-03 04:52:38] [Rank 0] step:3621/10000 train_time:261490ms step_avg:72.21ms +[2025-09-03 04:52:39] [Rank 0] step:3641/10000 train_time:262971ms step_avg:72.22ms +[2025-09-03 04:52:39] [Rank 0] step:3641/10000 train_time:262971ms step_avg:72.22ms +[2025-09-03 04:52:41] [Rank 0] step:3661/10000 train_time:264454ms step_avg:72.24ms +[2025-09-03 04:52:41] [Rank 0] step:3661/10000 train_time:264454ms step_avg:72.24ms +[2025-09-03 04:52:42] [Rank 0] step:3681/10000 train_time:265937ms step_avg:72.25ms +[2025-09-03 04:52:42] [Rank 0] step:3681/10000 train_time:265937ms step_avg:72.25ms +[2025-09-03 04:52:44] [Rank 0] step:3701/10000 train_time:267420ms step_avg:72.26ms +[2025-09-03 04:52:44] [Rank 0] step:3701/10000 train_time:267420ms step_avg:72.26ms +[2025-09-03 04:52:45] [Rank 0] step:3721/10000 train_time:268930ms step_avg:72.27ms +[2025-09-03 04:52:45] [Rank 0] step:3721/10000 train_time:268930ms step_avg:72.27ms +[2025-09-03 04:52:47] [Rank 0] step:3741/10000 train_time:270448ms step_avg:72.29ms +[2025-09-03 04:52:47] [Rank 0] step:3741/10000 train_time:270448ms step_avg:72.29ms +[2025-09-03 04:52:48] [Rank 0] step:3761/10000 train_time:271966ms step_avg:72.31ms +[2025-09-03 04:52:48] [Rank 0] step:3761/10000 train_time:271966ms step_avg:72.31ms +[2025-09-03 04:52:50] [Rank 0] step:3781/10000 train_time:273486ms step_avg:72.33ms +[2025-09-03 04:52:50] [Rank 0] step:3781/10000 train_time:273486ms step_avg:72.33ms +[2025-09-03 04:52:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:52:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:53:03] [Rank 0] PRINT: step:3800/10000 val_loss:4.2005 svd_entropy: attn_qk:H=0.7122,top10E=0.32,eRank=141.5,q75/q25=72.95 attn_vo:H=0.7394,top10E=0.22,eRank=210.0,q75/q25=inf mlp_w1:H=0.7341,top10E=0.33,eRank=148.5,q75/q25=11.81 mlp_w2:H=0.8299,top10E=0.16,eRank=254.4,q75/q25=23.55 vo_prod:H=0.6088,top10E=0.30,eRank=81.6,q75/q25=inf train_time:275160ms step_avg:72.41ms +[2025-09-03 04:53:03] [Rank 0] PRINT: step:3800/10000 val_loss:4.2005 svd_entropy: attn_qk:H=0.7122,top10E=0.32,eRank=141.5,q75/q25=72.95 attn_vo:H=0.7394,top10E=0.22,eRank=210.0,q75/q25=inf mlp_w1:H=0.7341,top10E=0.33,eRank=148.5,q75/q25=11.81 mlp_w2:H=0.8299,top10E=0.16,eRank=254.4,q75/q25=23.55 vo_prod:H=0.6088,top10E=0.30,eRank=81.6,q75/q25=inf train_time:275160ms step_avg:72.41ms +[2025-09-03 04:53:03] [Rank 0] step:3801/10000 train_time:275172ms step_avg:72.39ms +[2025-09-03 04:53:03] [Rank 0] step:3801/10000 train_time:275172ms step_avg:72.39ms +[2025-09-03 04:53:05] [Rank 0] step:3821/10000 train_time:276555ms step_avg:72.38ms +[2025-09-03 04:53:05] [Rank 0] step:3821/10000 train_time:276555ms step_avg:72.38ms +[2025-09-03 04:53:06] [Rank 0] step:3841/10000 train_time:278075ms step_avg:72.40ms +[2025-09-03 04:53:06] [Rank 0] step:3841/10000 train_time:278075ms step_avg:72.40ms +[2025-09-03 04:53:08] [Rank 0] step:3861/10000 train_time:279595ms step_avg:72.42ms +[2025-09-03 04:53:08] [Rank 0] step:3861/10000 train_time:279595ms step_avg:72.42ms +[2025-09-03 04:53:09] [Rank 0] step:3881/10000 train_time:281114ms step_avg:72.43ms +[2025-09-03 04:53:09] [Rank 0] step:3881/10000 train_time:281114ms step_avg:72.43ms +[2025-09-03 04:53:11] [Rank 0] step:3901/10000 train_time:282634ms step_avg:72.45ms +[2025-09-03 04:53:11] [Rank 0] step:3901/10000 train_time:282634ms step_avg:72.45ms +[2025-09-03 04:53:12] [Rank 0] step:3921/10000 train_time:284150ms step_avg:72.47ms +[2025-09-03 04:53:12] [Rank 0] step:3921/10000 train_time:284150ms step_avg:72.47ms +[2025-09-03 04:53:14] [Rank 0] step:3941/10000 train_time:285669ms step_avg:72.49ms +[2025-09-03 04:53:14] [Rank 0] step:3941/10000 train_time:285669ms step_avg:72.49ms +[2025-09-03 04:53:15] [Rank 0] step:3961/10000 train_time:287186ms step_avg:72.50ms +[2025-09-03 04:53:15] [Rank 0] step:3961/10000 train_time:287186ms step_avg:72.50ms +[2025-09-03 04:53:17] [Rank 0] step:3981/10000 train_time:288705ms step_avg:72.52ms +[2025-09-03 04:53:17] [Rank 0] step:3981/10000 train_time:288705ms step_avg:72.52ms +[2025-09-03 04:53:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:53:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:53:30] [Rank 0] PRINT: step:4000/10000 val_loss:4.1730 svd_entropy: attn_qk:H=0.7157,top10E=0.32,eRank=143.8,q75/q25=74.81 attn_vo:H=0.7432,top10E=0.21,eRank=213.6,q75/q25=inf mlp_w1:H=0.7399,top10E=0.32,eRank=154.0,q75/q25=12.25 mlp_w2:H=0.8328,top10E=0.16,eRank=259.3,q75/q25=24.01 vo_prod:H=0.6131,top10E=0.30,eRank=84.1,q75/q25=inf train_time:290376ms step_avg:72.59ms +[2025-09-03 04:53:30] [Rank 0] PRINT: step:4000/10000 val_loss:4.1730 svd_entropy: attn_qk:H=0.7157,top10E=0.32,eRank=143.8,q75/q25=74.81 attn_vo:H=0.7432,top10E=0.21,eRank=213.6,q75/q25=inf mlp_w1:H=0.7399,top10E=0.32,eRank=154.0,q75/q25=12.25 mlp_w2:H=0.8328,top10E=0.16,eRank=259.3,q75/q25=24.01 vo_prod:H=0.6131,top10E=0.30,eRank=84.1,q75/q25=inf train_time:290376ms step_avg:72.59ms +[2025-09-03 04:53:30] [Rank 0] step:4001/10000 train_time:290388ms step_avg:72.58ms +[2025-09-03 04:53:30] [Rank 0] step:4001/10000 train_time:290388ms step_avg:72.58ms +[2025-09-03 04:53:32] [Rank 0] step:4021/10000 train_time:291779ms step_avg:72.56ms +[2025-09-03 04:53:32] [Rank 0] step:4021/10000 train_time:291779ms step_avg:72.56ms +[2025-09-03 04:53:33] [Rank 0] step:4041/10000 train_time:293297ms step_avg:72.58ms +[2025-09-03 04:53:33] [Rank 0] step:4041/10000 train_time:293297ms step_avg:72.58ms +[2025-09-03 04:53:35] [Rank 0] step:4061/10000 train_time:294815ms step_avg:72.60ms +[2025-09-03 04:53:35] [Rank 0] step:4061/10000 train_time:294815ms step_avg:72.60ms +[2025-09-03 04:53:36] [Rank 0] step:4081/10000 train_time:296443ms step_avg:72.64ms +[2025-09-03 04:53:36] [Rank 0] step:4081/10000 train_time:296443ms step_avg:72.64ms +[2025-09-03 04:53:38] [Rank 0] step:4101/10000 train_time:297961ms step_avg:72.66ms +[2025-09-03 04:53:38] [Rank 0] step:4101/10000 train_time:297961ms step_avg:72.66ms +[2025-09-03 04:53:39] [Rank 0] step:4121/10000 train_time:299481ms step_avg:72.67ms +[2025-09-03 04:53:39] [Rank 0] step:4121/10000 train_time:299481ms step_avg:72.67ms +[2025-09-03 04:53:41] [Rank 0] step:4141/10000 train_time:301001ms step_avg:72.69ms +[2025-09-03 04:53:41] [Rank 0] step:4141/10000 train_time:301001ms step_avg:72.69ms +[2025-09-03 04:53:43] [Rank 0] step:4161/10000 train_time:302519ms step_avg:72.70ms +[2025-09-03 04:53:43] [Rank 0] step:4161/10000 train_time:302519ms step_avg:72.70ms +[2025-09-03 04:53:44] [Rank 0] step:4181/10000 train_time:304041ms step_avg:72.72ms +[2025-09-03 04:53:44] [Rank 0] step:4181/10000 train_time:304041ms step_avg:72.72ms +[2025-09-03 04:53:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:53:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:53:57] [Rank 0] PRINT: step:4200/10000 val_loss:4.1555 svd_entropy: attn_qk:H=0.7190,top10E=0.31,eRank=145.9,q75/q25=76.93 attn_vo:H=0.7468,top10E=0.21,eRank=217.1,q75/q25=inf mlp_w1:H=0.7453,top10E=0.31,eRank=159.2,q75/q25=12.79 mlp_w2:H=0.8353,top10E=0.15,eRank=263.8,q75/q25=24.67 vo_prod:H=0.6173,top10E=0.29,eRank=86.7,q75/q25=inf train_time:305714ms step_avg:72.79ms +[2025-09-03 04:53:57] [Rank 0] PRINT: step:4200/10000 val_loss:4.1555 svd_entropy: attn_qk:H=0.7190,top10E=0.31,eRank=145.9,q75/q25=76.93 attn_vo:H=0.7468,top10E=0.21,eRank=217.1,q75/q25=inf mlp_w1:H=0.7453,top10E=0.31,eRank=159.2,q75/q25=12.79 mlp_w2:H=0.8353,top10E=0.15,eRank=263.8,q75/q25=24.67 vo_prod:H=0.6173,top10E=0.29,eRank=86.7,q75/q25=inf train_time:305714ms step_avg:72.79ms +[2025-09-03 04:53:57] [Rank 0] step:4201/10000 train_time:305725ms step_avg:72.77ms +[2025-09-03 04:53:57] [Rank 0] step:4201/10000 train_time:305725ms step_avg:72.77ms +[2025-09-03 04:53:59] [Rank 0] step:4221/10000 train_time:307112ms step_avg:72.76ms +[2025-09-03 04:53:59] [Rank 0] step:4221/10000 train_time:307112ms step_avg:72.76ms +[2025-09-03 04:54:00] [Rank 0] step:4241/10000 train_time:308631ms step_avg:72.77ms +[2025-09-03 04:54:00] [Rank 0] step:4241/10000 train_time:308631ms step_avg:72.77ms +[2025-09-03 04:54:02] [Rank 0] step:4261/10000 train_time:310151ms step_avg:72.79ms +[2025-09-03 04:54:02] [Rank 0] step:4261/10000 train_time:310151ms step_avg:72.79ms +[2025-09-03 04:54:03] [Rank 0] step:4281/10000 train_time:311669ms step_avg:72.80ms +[2025-09-03 04:54:03] [Rank 0] step:4281/10000 train_time:311669ms step_avg:72.80ms +[2025-09-03 04:54:05] [Rank 0] step:4301/10000 train_time:313189ms step_avg:72.82ms +[2025-09-03 04:54:05] [Rank 0] step:4301/10000 train_time:313189ms step_avg:72.82ms +[2025-09-03 04:54:06] [Rank 0] step:4321/10000 train_time:314711ms step_avg:72.83ms +[2025-09-03 04:54:06] [Rank 0] step:4321/10000 train_time:314711ms step_avg:72.83ms +[2025-09-03 04:54:08] [Rank 0] step:4341/10000 train_time:316231ms step_avg:72.85ms +[2025-09-03 04:54:08] [Rank 0] step:4341/10000 train_time:316231ms step_avg:72.85ms +[2025-09-03 04:54:09] [Rank 0] step:4361/10000 train_time:317752ms step_avg:72.86ms +[2025-09-03 04:54:09] [Rank 0] step:4361/10000 train_time:317752ms step_avg:72.86ms +[2025-09-03 04:54:11] [Rank 0] step:4381/10000 train_time:319274ms step_avg:72.88ms +[2025-09-03 04:54:11] [Rank 0] step:4381/10000 train_time:319274ms step_avg:72.88ms +[2025-09-03 04:54:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:54:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:54:24] [Rank 0] PRINT: step:4400/10000 val_loss:4.1289 svd_entropy: attn_qk:H=0.7222,top10E=0.31,eRank=148.0,q75/q25=78.51 attn_vo:H=0.7501,top10E=0.20,eRank=220.4,q75/q25=inf mlp_w1:H=0.7504,top10E=0.31,eRank=164.2,q75/q25=13.32 mlp_w2:H=0.8376,top10E=0.15,eRank=267.9,q75/q25=25.25 vo_prod:H=0.6212,top10E=0.28,eRank=89.1,q75/q25=inf train_time:320946ms step_avg:72.94ms +[2025-09-03 04:54:24] [Rank 0] PRINT: step:4400/10000 val_loss:4.1289 svd_entropy: attn_qk:H=0.7222,top10E=0.31,eRank=148.0,q75/q25=78.51 attn_vo:H=0.7501,top10E=0.20,eRank=220.4,q75/q25=inf mlp_w1:H=0.7504,top10E=0.31,eRank=164.2,q75/q25=13.32 mlp_w2:H=0.8376,top10E=0.15,eRank=267.9,q75/q25=25.25 vo_prod:H=0.6212,top10E=0.28,eRank=89.1,q75/q25=inf train_time:320946ms step_avg:72.94ms +[2025-09-03 04:54:24] [Rank 0] step:4401/10000 train_time:320958ms step_avg:72.93ms +[2025-09-03 04:54:24] [Rank 0] step:4401/10000 train_time:320958ms step_avg:72.93ms +[2025-09-03 04:54:26] [Rank 0] step:4421/10000 train_time:322347ms step_avg:72.91ms +[2025-09-03 04:54:26] [Rank 0] step:4421/10000 train_time:322347ms step_avg:72.91ms +[2025-09-03 04:54:27] [Rank 0] step:4441/10000 train_time:323864ms step_avg:72.93ms +[2025-09-03 04:54:27] [Rank 0] step:4441/10000 train_time:323864ms step_avg:72.93ms +[2025-09-03 04:54:29] [Rank 0] step:4461/10000 train_time:325386ms step_avg:72.94ms +[2025-09-03 04:54:29] [Rank 0] step:4461/10000 train_time:325386ms step_avg:72.94ms +[2025-09-03 04:54:30] [Rank 0] step:4481/10000 train_time:326910ms step_avg:72.95ms +[2025-09-03 04:54:30] [Rank 0] step:4481/10000 train_time:326910ms step_avg:72.95ms +[2025-09-03 04:54:32] [Rank 0] step:4501/10000 train_time:328433ms step_avg:72.97ms +[2025-09-03 04:54:32] [Rank 0] step:4501/10000 train_time:328433ms step_avg:72.97ms +[2025-09-03 04:54:33] [Rank 0] step:4521/10000 train_time:329956ms step_avg:72.98ms +[2025-09-03 04:54:33] [Rank 0] step:4521/10000 train_time:329956ms step_avg:72.98ms +[2025-09-03 04:54:35] [Rank 0] step:4541/10000 train_time:331480ms step_avg:73.00ms +[2025-09-03 04:54:35] [Rank 0] step:4541/10000 train_time:331480ms step_avg:73.00ms +[2025-09-03 04:54:37] [Rank 0] step:4561/10000 train_time:333006ms step_avg:73.01ms +[2025-09-03 04:54:37] [Rank 0] step:4561/10000 train_time:333006ms step_avg:73.01ms +[2025-09-03 04:54:38] [Rank 0] step:4581/10000 train_time:334530ms step_avg:73.03ms +[2025-09-03 04:54:38] [Rank 0] step:4581/10000 train_time:334530ms step_avg:73.03ms +[2025-09-03 04:54:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:54:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:54:51] [Rank 0] PRINT: step:4600/10000 val_loss:4.0968 svd_entropy: attn_qk:H=0.7252,top10E=0.30,eRank=150.0,q75/q25=80.17 attn_vo:H=0.7533,top10E=0.20,eRank=223.8,q75/q25=inf mlp_w1:H=0.7551,top10E=0.30,eRank=169.2,q75/q25=13.73 mlp_w2:H=0.8397,top10E=0.15,eRank=271.7,q75/q25=25.91 vo_prod:H=0.6249,top10E=0.28,eRank=91.5,q75/q25=inf train_time:336208ms step_avg:73.09ms +[2025-09-03 04:54:51] [Rank 0] PRINT: step:4600/10000 val_loss:4.0968 svd_entropy: attn_qk:H=0.7252,top10E=0.30,eRank=150.0,q75/q25=80.17 attn_vo:H=0.7533,top10E=0.20,eRank=223.8,q75/q25=inf mlp_w1:H=0.7551,top10E=0.30,eRank=169.2,q75/q25=13.73 mlp_w2:H=0.8397,top10E=0.15,eRank=271.7,q75/q25=25.91 vo_prod:H=0.6249,top10E=0.28,eRank=91.5,q75/q25=inf train_time:336208ms step_avg:73.09ms +[2025-09-03 04:54:51] [Rank 0] step:4601/10000 train_time:336220ms step_avg:73.08ms +[2025-09-03 04:54:51] [Rank 0] step:4601/10000 train_time:336220ms step_avg:73.08ms +[2025-09-03 04:54:53] [Rank 0] step:4621/10000 train_time:337618ms step_avg:73.06ms +[2025-09-03 04:54:53] [Rank 0] step:4621/10000 train_time:337618ms step_avg:73.06ms +[2025-09-03 04:54:54] [Rank 0] step:4641/10000 train_time:339146ms step_avg:73.08ms +[2025-09-03 04:54:54] [Rank 0] step:4641/10000 train_time:339146ms step_avg:73.08ms +[2025-09-03 04:54:56] [Rank 0] step:4661/10000 train_time:340669ms step_avg:73.09ms +[2025-09-03 04:54:56] [Rank 0] step:4661/10000 train_time:340669ms step_avg:73.09ms +[2025-09-03 04:54:58] [Rank 0] step:4681/10000 train_time:342193ms step_avg:73.10ms +[2025-09-03 04:54:58] [Rank 0] step:4681/10000 train_time:342193ms step_avg:73.10ms +[2025-09-03 04:54:59] [Rank 0] step:4701/10000 train_time:343719ms step_avg:73.12ms +[2025-09-03 04:54:59] [Rank 0] step:4701/10000 train_time:343719ms step_avg:73.12ms +[2025-09-03 04:55:01] [Rank 0] step:4721/10000 train_time:345244ms step_avg:73.13ms +[2025-09-03 04:55:01] [Rank 0] step:4721/10000 train_time:345244ms step_avg:73.13ms +[2025-09-03 04:55:02] [Rank 0] step:4741/10000 train_time:346770ms step_avg:73.14ms +[2025-09-03 04:55:02] [Rank 0] step:4741/10000 train_time:346770ms step_avg:73.14ms +[2025-09-03 04:55:04] [Rank 0] step:4761/10000 train_time:348296ms step_avg:73.16ms +[2025-09-03 04:55:04] [Rank 0] step:4761/10000 train_time:348296ms step_avg:73.16ms +[2025-09-03 04:55:05] [Rank 0] step:4781/10000 train_time:349820ms step_avg:73.17ms +[2025-09-03 04:55:05] [Rank 0] step:4781/10000 train_time:349820ms step_avg:73.17ms +[2025-09-03 04:55:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:55:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:55:18] [Rank 0] PRINT: step:4800/10000 val_loss:4.0805 svd_entropy: attn_qk:H=0.7281,top10E=0.30,eRank=152.0,q75/q25=81.68 attn_vo:H=0.7564,top10E=0.19,eRank=227.0,q75/q25=inf mlp_w1:H=0.7594,top10E=0.29,eRank=173.8,q75/q25=14.23 mlp_w2:H=0.8416,top10E=0.15,eRank=275.3,q75/q25=26.62 vo_prod:H=0.6287,top10E=0.27,eRank=93.9,q75/q25=inf train_time:351500ms step_avg:73.23ms +[2025-09-03 04:55:18] [Rank 0] PRINT: step:4800/10000 val_loss:4.0805 svd_entropy: attn_qk:H=0.7281,top10E=0.30,eRank=152.0,q75/q25=81.68 attn_vo:H=0.7564,top10E=0.19,eRank=227.0,q75/q25=inf mlp_w1:H=0.7594,top10E=0.29,eRank=173.8,q75/q25=14.23 mlp_w2:H=0.8416,top10E=0.15,eRank=275.3,q75/q25=26.62 vo_prod:H=0.6287,top10E=0.27,eRank=93.9,q75/q25=inf train_time:351500ms step_avg:73.23ms +[2025-09-03 04:55:18] [Rank 0] step:4801/10000 train_time:351512ms step_avg:73.22ms +[2025-09-03 04:55:18] [Rank 0] step:4801/10000 train_time:351512ms step_avg:73.22ms +[2025-09-03 04:55:20] [Rank 0] step:4821/10000 train_time:352908ms step_avg:73.20ms +[2025-09-03 04:55:20] [Rank 0] step:4821/10000 train_time:352908ms step_avg:73.20ms +[2025-09-03 04:55:21] [Rank 0] step:4841/10000 train_time:354431ms step_avg:73.21ms +[2025-09-03 04:55:21] [Rank 0] step:4841/10000 train_time:354431ms step_avg:73.21ms +[2025-09-03 04:55:23] [Rank 0] step:4861/10000 train_time:355956ms step_avg:73.23ms +[2025-09-03 04:55:23] [Rank 0] step:4861/10000 train_time:355956ms step_avg:73.23ms +[2025-09-03 04:55:25] [Rank 0] step:4881/10000 train_time:357478ms step_avg:73.24ms +[2025-09-03 04:55:25] [Rank 0] step:4881/10000 train_time:357478ms step_avg:73.24ms +[2025-09-03 04:55:26] [Rank 0] step:4901/10000 train_time:358999ms step_avg:73.25ms +[2025-09-03 04:55:26] [Rank 0] step:4901/10000 train_time:358999ms step_avg:73.25ms +[2025-09-03 04:55:28] [Rank 0] step:4921/10000 train_time:360525ms step_avg:73.26ms +[2025-09-03 04:55:28] [Rank 0] step:4921/10000 train_time:360525ms step_avg:73.26ms +[2025-09-03 04:55:29] [Rank 0] step:4941/10000 train_time:362054ms step_avg:73.28ms +[2025-09-03 04:55:29] [Rank 0] step:4941/10000 train_time:362054ms step_avg:73.28ms +[2025-09-03 04:55:31] [Rank 0] step:4961/10000 train_time:363578ms step_avg:73.29ms +[2025-09-03 04:55:31] [Rank 0] step:4961/10000 train_time:363578ms step_avg:73.29ms +[2025-09-03 04:55:32] [Rank 0] step:4981/10000 train_time:365104ms step_avg:73.30ms +[2025-09-03 04:55:32] [Rank 0] step:4981/10000 train_time:365104ms step_avg:73.30ms +[2025-09-03 04:55:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:55:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:55:45] [Rank 0] PRINT: step:5000/10000 val_loss:4.0622 svd_entropy: attn_qk:H=0.7308,top10E=0.30,eRank=153.9,q75/q25=82.89 attn_vo:H=0.7592,top10E=0.19,eRank=230.0,q75/q25=inf mlp_w1:H=0.7635,top10E=0.29,eRank=178.2,q75/q25=14.78 mlp_w2:H=0.8433,top10E=0.15,eRank=278.5,q75/q25=27.17 vo_prod:H=0.6318,top10E=0.27,eRank=96.0,q75/q25=inf train_time:366782ms step_avg:73.36ms +[2025-09-03 04:55:45] [Rank 0] PRINT: step:5000/10000 val_loss:4.0622 svd_entropy: attn_qk:H=0.7308,top10E=0.30,eRank=153.9,q75/q25=82.89 attn_vo:H=0.7592,top10E=0.19,eRank=230.0,q75/q25=inf mlp_w1:H=0.7635,top10E=0.29,eRank=178.2,q75/q25=14.78 mlp_w2:H=0.8433,top10E=0.15,eRank=278.5,q75/q25=27.17 vo_prod:H=0.6318,top10E=0.27,eRank=96.0,q75/q25=inf train_time:366782ms step_avg:73.36ms +[2025-09-03 04:55:45] [Rank 0] step:5001/10000 train_time:366795ms step_avg:73.34ms +[2025-09-03 04:55:45] [Rank 0] step:5001/10000 train_time:366795ms step_avg:73.34ms +[2025-09-03 04:55:47] [Rank 0] step:5021/10000 train_time:368189ms step_avg:73.33ms +[2025-09-03 04:55:47] [Rank 0] step:5021/10000 train_time:368189ms step_avg:73.33ms +[2025-09-03 04:55:49] [Rank 0] step:5041/10000 train_time:369712ms step_avg:73.34ms +[2025-09-03 04:55:49] [Rank 0] step:5041/10000 train_time:369712ms step_avg:73.34ms +[2025-09-03 04:55:50] [Rank 0] step:5061/10000 train_time:371233ms step_avg:73.35ms +[2025-09-03 04:55:50] [Rank 0] step:5061/10000 train_time:371233ms step_avg:73.35ms +[2025-09-03 04:55:52] [Rank 0] step:5081/10000 train_time:372758ms step_avg:73.36ms +[2025-09-03 04:55:52] [Rank 0] step:5081/10000 train_time:372758ms step_avg:73.36ms +[2025-09-03 04:55:53] [Rank 0] step:5101/10000 train_time:374283ms step_avg:73.37ms +[2025-09-03 04:55:53] [Rank 0] step:5101/10000 train_time:374283ms step_avg:73.37ms +[2025-09-03 04:55:55] [Rank 0] step:5121/10000 train_time:375810ms step_avg:73.39ms +[2025-09-03 04:55:55] [Rank 0] step:5121/10000 train_time:375810ms step_avg:73.39ms +[2025-09-03 04:55:56] [Rank 0] step:5141/10000 train_time:377337ms step_avg:73.40ms +[2025-09-03 04:55:56] [Rank 0] step:5141/10000 train_time:377337ms step_avg:73.40ms +[2025-09-03 04:55:58] [Rank 0] step:5161/10000 train_time:378863ms step_avg:73.41ms +[2025-09-03 04:55:58] [Rank 0] step:5161/10000 train_time:378863ms step_avg:73.41ms +[2025-09-03 04:55:59] [Rank 0] step:5181/10000 train_time:380390ms step_avg:73.42ms +[2025-09-03 04:55:59] [Rank 0] step:5181/10000 train_time:380390ms step_avg:73.42ms +[2025-09-03 04:56:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:56:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:56:12] [Rank 0] PRINT: step:5200/10000 val_loss:4.0394 svd_entropy: attn_qk:H=0.7333,top10E=0.29,eRank=155.8,q75/q25=84.05 attn_vo:H=0.7618,top10E=0.19,eRank=232.8,q75/q25=inf mlp_w1:H=0.7675,top10E=0.28,eRank=182.6,q75/q25=15.22 mlp_w2:H=0.8450,top10E=0.14,eRank=281.7,q75/q25=27.76 vo_prod:H=0.6351,top10E=0.26,eRank=98.3,q75/q25=inf train_time:382096ms step_avg:73.48ms +[2025-09-03 04:56:12] [Rank 0] PRINT: step:5200/10000 val_loss:4.0394 svd_entropy: attn_qk:H=0.7333,top10E=0.29,eRank=155.8,q75/q25=84.05 attn_vo:H=0.7618,top10E=0.19,eRank=232.8,q75/q25=inf mlp_w1:H=0.7675,top10E=0.28,eRank=182.6,q75/q25=15.22 mlp_w2:H=0.8450,top10E=0.14,eRank=281.7,q75/q25=27.76 vo_prod:H=0.6351,top10E=0.26,eRank=98.3,q75/q25=inf train_time:382096ms step_avg:73.48ms +[2025-09-03 04:56:12] [Rank 0] step:5201/10000 train_time:382108ms step_avg:73.47ms +[2025-09-03 04:56:12] [Rank 0] step:5201/10000 train_time:382108ms step_avg:73.47ms +[2025-09-03 04:56:14] [Rank 0] step:5221/10000 train_time:383534ms step_avg:73.46ms +[2025-09-03 04:56:14] [Rank 0] step:5221/10000 train_time:383534ms step_avg:73.46ms +[2025-09-03 04:56:16] [Rank 0] step:5241/10000 train_time:385088ms step_avg:73.48ms +[2025-09-03 04:56:16] [Rank 0] step:5241/10000 train_time:385088ms step_avg:73.48ms +[2025-09-03 04:56:17] [Rank 0] step:5261/10000 train_time:386646ms step_avg:73.49ms +[2025-09-03 04:56:17] [Rank 0] step:5261/10000 train_time:386646ms step_avg:73.49ms +[2025-09-03 04:56:19] [Rank 0] step:5281/10000 train_time:388205ms step_avg:73.51ms +[2025-09-03 04:56:19] [Rank 0] step:5281/10000 train_time:388205ms step_avg:73.51ms +[2025-09-03 04:56:20] [Rank 0] step:5301/10000 train_time:389773ms step_avg:73.53ms +[2025-09-03 04:56:20] [Rank 0] step:5301/10000 train_time:389773ms step_avg:73.53ms +[2025-09-03 04:56:22] [Rank 0] step:5321/10000 train_time:391331ms step_avg:73.54ms +[2025-09-03 04:56:22] [Rank 0] step:5321/10000 train_time:391331ms step_avg:73.54ms +[2025-09-03 04:56:23] [Rank 0] step:5341/10000 train_time:392887ms step_avg:73.56ms +[2025-09-03 04:56:23] [Rank 0] step:5341/10000 train_time:392887ms step_avg:73.56ms +[2025-09-03 04:56:25] [Rank 0] step:5361/10000 train_time:394449ms step_avg:73.58ms +[2025-09-03 04:56:25] [Rank 0] step:5361/10000 train_time:394449ms step_avg:73.58ms +[2025-09-03 04:56:27] [Rank 0] step:5381/10000 train_time:396012ms step_avg:73.59ms +[2025-09-03 04:56:27] [Rank 0] step:5381/10000 train_time:396012ms step_avg:73.59ms +[2025-09-03 04:56:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:56:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:56:40] [Rank 0] PRINT: step:5400/10000 val_loss:4.0221 svd_entropy: attn_qk:H=0.7358,top10E=0.29,eRank=157.6,q75/q25=84.73 attn_vo:H=0.7642,top10E=0.18,eRank=235.5,q75/q25=inf mlp_w1:H=0.7713,top10E=0.28,eRank=187.0,q75/q25=15.84 mlp_w2:H=0.8465,top10E=0.14,eRank=284.6,q75/q25=28.37 vo_prod:H=0.6379,top10E=0.26,eRank=100.3,q75/q25=inf train_time:397726ms step_avg:73.65ms +[2025-09-03 04:56:40] [Rank 0] PRINT: step:5400/10000 val_loss:4.0221 svd_entropy: attn_qk:H=0.7358,top10E=0.29,eRank=157.6,q75/q25=84.73 attn_vo:H=0.7642,top10E=0.18,eRank=235.5,q75/q25=inf mlp_w1:H=0.7713,top10E=0.28,eRank=187.0,q75/q25=15.84 mlp_w2:H=0.8465,top10E=0.14,eRank=284.6,q75/q25=28.37 vo_prod:H=0.6379,top10E=0.26,eRank=100.3,q75/q25=inf train_time:397726ms step_avg:73.65ms +[2025-09-03 04:56:40] [Rank 0] step:5401/10000 train_time:397738ms step_avg:73.64ms +[2025-09-03 04:56:40] [Rank 0] step:5401/10000 train_time:397738ms step_avg:73.64ms +[2025-09-03 04:56:41] [Rank 0] step:5421/10000 train_time:399141ms step_avg:73.63ms +[2025-09-03 04:56:41] [Rank 0] step:5421/10000 train_time:399141ms step_avg:73.63ms +[2025-09-03 04:56:43] [Rank 0] step:5441/10000 train_time:400694ms step_avg:73.64ms +[2025-09-03 04:56:43] [Rank 0] step:5441/10000 train_time:400694ms step_avg:73.64ms +[2025-09-03 04:56:45] [Rank 0] step:5461/10000 train_time:402252ms step_avg:73.66ms +[2025-09-03 04:56:45] [Rank 0] step:5461/10000 train_time:402252ms step_avg:73.66ms +[2025-09-03 04:56:46] [Rank 0] step:5481/10000 train_time:403814ms step_avg:73.68ms +[2025-09-03 04:56:46] [Rank 0] step:5481/10000 train_time:403814ms step_avg:73.68ms +[2025-09-03 04:56:48] [Rank 0] step:5501/10000 train_time:405377ms step_avg:73.69ms +[2025-09-03 04:56:48] [Rank 0] step:5501/10000 train_time:405377ms step_avg:73.69ms +[2025-09-03 04:56:49] [Rank 0] step:5521/10000 train_time:406941ms step_avg:73.71ms +[2025-09-03 04:56:49] [Rank 0] step:5521/10000 train_time:406941ms step_avg:73.71ms +[2025-09-03 04:56:51] [Rank 0] step:5541/10000 train_time:408501ms step_avg:73.72ms +[2025-09-03 04:56:51] [Rank 0] step:5541/10000 train_time:408501ms step_avg:73.72ms +[2025-09-03 04:56:52] [Rank 0] step:5561/10000 train_time:410062ms step_avg:73.74ms +[2025-09-03 04:56:52] [Rank 0] step:5561/10000 train_time:410062ms step_avg:73.74ms +[2025-09-03 04:56:54] [Rank 0] step:5581/10000 train_time:411621ms step_avg:73.75ms +[2025-09-03 04:56:54] [Rank 0] step:5581/10000 train_time:411621ms step_avg:73.75ms +[2025-09-03 04:56:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:56:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:57:07] [Rank 0] PRINT: step:5600/10000 val_loss:4.0065 svd_entropy: attn_qk:H=0.7379,top10E=0.29,eRank=159.2,q75/q25=85.24 attn_vo:H=0.7665,top10E=0.18,eRank=238.1,q75/q25=inf mlp_w1:H=0.7746,top10E=0.27,eRank=191.0,q75/q25=16.25 mlp_w2:H=0.8479,top10E=0.14,eRank=287.3,q75/q25=28.90 vo_prod:H=0.6405,top10E=0.26,eRank=102.2,q75/q25=inf train_time:413337ms step_avg:73.81ms +[2025-09-03 04:57:07] [Rank 0] PRINT: step:5600/10000 val_loss:4.0065 svd_entropy: attn_qk:H=0.7379,top10E=0.29,eRank=159.2,q75/q25=85.24 attn_vo:H=0.7665,top10E=0.18,eRank=238.1,q75/q25=inf mlp_w1:H=0.7746,top10E=0.27,eRank=191.0,q75/q25=16.25 mlp_w2:H=0.8479,top10E=0.14,eRank=287.3,q75/q25=28.90 vo_prod:H=0.6405,top10E=0.26,eRank=102.2,q75/q25=inf train_time:413337ms step_avg:73.81ms +[2025-09-03 04:57:07] [Rank 0] step:5601/10000 train_time:413348ms step_avg:73.80ms +[2025-09-03 04:57:07] [Rank 0] step:5601/10000 train_time:413348ms step_avg:73.80ms +[2025-09-03 04:57:09] [Rank 0] step:5621/10000 train_time:414764ms step_avg:73.79ms +[2025-09-03 04:57:09] [Rank 0] step:5621/10000 train_time:414764ms step_avg:73.79ms +[2025-09-03 04:57:10] [Rank 0] step:5641/10000 train_time:416322ms step_avg:73.80ms +[2025-09-03 04:57:10] [Rank 0] step:5641/10000 train_time:416322ms step_avg:73.80ms +[2025-09-03 04:57:12] [Rank 0] step:5661/10000 train_time:417878ms step_avg:73.82ms +[2025-09-03 04:57:12] [Rank 0] step:5661/10000 train_time:417878ms step_avg:73.82ms +[2025-09-03 04:57:13] [Rank 0] step:5681/10000 train_time:419440ms step_avg:73.83ms +[2025-09-03 04:57:13] [Rank 0] step:5681/10000 train_time:419440ms step_avg:73.83ms +[2025-09-03 04:57:15] [Rank 0] step:5701/10000 train_time:420996ms step_avg:73.85ms +[2025-09-03 04:57:15] [Rank 0] step:5701/10000 train_time:420996ms step_avg:73.85ms +[2025-09-03 04:57:17] [Rank 0] step:5721/10000 train_time:422557ms step_avg:73.86ms +[2025-09-03 04:57:17] [Rank 0] step:5721/10000 train_time:422557ms step_avg:73.86ms +[2025-09-03 04:57:18] [Rank 0] step:5741/10000 train_time:424115ms step_avg:73.87ms +[2025-09-03 04:57:18] [Rank 0] step:5741/10000 train_time:424115ms step_avg:73.87ms +[2025-09-03 04:57:20] [Rank 0] step:5761/10000 train_time:425673ms step_avg:73.89ms +[2025-09-03 04:57:20] [Rank 0] step:5761/10000 train_time:425673ms step_avg:73.89ms +[2025-09-03 04:57:21] [Rank 0] step:5781/10000 train_time:427232ms step_avg:73.90ms +[2025-09-03 04:57:21] [Rank 0] step:5781/10000 train_time:427232ms step_avg:73.90ms +[2025-09-03 04:57:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:57:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:57:34] [Rank 0] PRINT: step:5800/10000 val_loss:3.9956 svd_entropy: attn_qk:H=0.7402,top10E=0.28,eRank=161.0,q75/q25=85.83 attn_vo:H=0.7687,top10E=0.18,eRank=240.5,q75/q25=inf mlp_w1:H=0.7778,top10E=0.27,eRank=194.8,q75/q25=16.70 mlp_w2:H=0.8493,top10E=0.14,eRank=290.0,q75/q25=29.35 vo_prod:H=0.6430,top10E=0.25,eRank=104.0,q75/q25=inf train_time:428951ms step_avg:73.96ms +[2025-09-03 04:57:34] [Rank 0] PRINT: step:5800/10000 val_loss:3.9956 svd_entropy: attn_qk:H=0.7402,top10E=0.28,eRank=161.0,q75/q25=85.83 attn_vo:H=0.7687,top10E=0.18,eRank=240.5,q75/q25=inf mlp_w1:H=0.7778,top10E=0.27,eRank=194.8,q75/q25=16.70 mlp_w2:H=0.8493,top10E=0.14,eRank=290.0,q75/q25=29.35 vo_prod:H=0.6430,top10E=0.25,eRank=104.0,q75/q25=inf train_time:428951ms step_avg:73.96ms +[2025-09-03 04:57:34] [Rank 0] step:5801/10000 train_time:428963ms step_avg:73.95ms +[2025-09-03 04:57:34] [Rank 0] step:5801/10000 train_time:428963ms step_avg:73.95ms +[2025-09-03 04:57:36] [Rank 0] step:5821/10000 train_time:430387ms step_avg:73.94ms +[2025-09-03 04:57:36] [Rank 0] step:5821/10000 train_time:430387ms step_avg:73.94ms +[2025-09-03 04:57:38] [Rank 0] step:5841/10000 train_time:431941ms step_avg:73.95ms +[2025-09-03 04:57:38] [Rank 0] step:5841/10000 train_time:431941ms step_avg:73.95ms +[2025-09-03 04:57:39] [Rank 0] step:5861/10000 train_time:433500ms step_avg:73.96ms +[2025-09-03 04:57:39] [Rank 0] step:5861/10000 train_time:433500ms step_avg:73.96ms +[2025-09-03 04:57:41] [Rank 0] step:5881/10000 train_time:435057ms step_avg:73.98ms +[2025-09-03 04:57:41] [Rank 0] step:5881/10000 train_time:435057ms step_avg:73.98ms +[2025-09-03 04:57:42] [Rank 0] step:5901/10000 train_time:436614ms step_avg:73.99ms +[2025-09-03 04:57:42] [Rank 0] step:5901/10000 train_time:436614ms step_avg:73.99ms +[2025-09-03 04:57:44] [Rank 0] step:5921/10000 train_time:438174ms step_avg:74.00ms +[2025-09-03 04:57:44] [Rank 0] step:5921/10000 train_time:438174ms step_avg:74.00ms +[2025-09-03 04:57:45] [Rank 0] step:5941/10000 train_time:439738ms step_avg:74.02ms +[2025-09-03 04:57:45] [Rank 0] step:5941/10000 train_time:439738ms step_avg:74.02ms +[2025-09-03 04:57:47] [Rank 0] step:5961/10000 train_time:441304ms step_avg:74.03ms +[2025-09-03 04:57:47] [Rank 0] step:5961/10000 train_time:441304ms step_avg:74.03ms +[2025-09-03 04:57:48] [Rank 0] step:5981/10000 train_time:442865ms step_avg:74.05ms +[2025-09-03 04:57:48] [Rank 0] step:5981/10000 train_time:442865ms step_avg:74.05ms +[2025-09-03 04:57:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:57:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:58:02] [Rank 0] PRINT: step:6000/10000 val_loss:3.9725 svd_entropy: attn_qk:H=0.7425,top10E=0.28,eRank=162.7,q75/q25=85.84 attn_vo:H=0.7708,top10E=0.17,eRank=243.0,q75/q25=inf mlp_w1:H=0.7810,top10E=0.26,eRank=198.5,q75/q25=17.12 mlp_w2:H=0.8507,top10E=0.14,eRank=292.7,q75/q25=29.72 vo_prod:H=0.6455,top10E=0.25,eRank=105.8,q75/q25=inf train_time:444579ms step_avg:74.10ms +[2025-09-03 04:58:02] [Rank 0] PRINT: step:6000/10000 val_loss:3.9725 svd_entropy: attn_qk:H=0.7425,top10E=0.28,eRank=162.7,q75/q25=85.84 attn_vo:H=0.7708,top10E=0.17,eRank=243.0,q75/q25=inf mlp_w1:H=0.7810,top10E=0.26,eRank=198.5,q75/q25=17.12 mlp_w2:H=0.8507,top10E=0.14,eRank=292.7,q75/q25=29.72 vo_prod:H=0.6455,top10E=0.25,eRank=105.8,q75/q25=inf train_time:444579ms step_avg:74.10ms +[2025-09-03 04:58:02] [Rank 0] step:6001/10000 train_time:444590ms step_avg:74.09ms +[2025-09-03 04:58:02] [Rank 0] step:6001/10000 train_time:444590ms step_avg:74.09ms +[2025-09-03 04:58:03] [Rank 0] step:6021/10000 train_time:446002ms step_avg:74.07ms +[2025-09-03 04:58:03] [Rank 0] step:6021/10000 train_time:446002ms step_avg:74.07ms +[2025-09-03 04:58:05] [Rank 0] step:6041/10000 train_time:447560ms step_avg:74.09ms +[2025-09-03 04:58:05] [Rank 0] step:6041/10000 train_time:447560ms step_avg:74.09ms +[2025-09-03 04:58:06] [Rank 0] step:6061/10000 train_time:449126ms step_avg:74.10ms +[2025-09-03 04:58:06] [Rank 0] step:6061/10000 train_time:449126ms step_avg:74.10ms +[2025-09-03 04:58:08] [Rank 0] step:6081/10000 train_time:450688ms step_avg:74.11ms +[2025-09-03 04:58:08] [Rank 0] step:6081/10000 train_time:450688ms step_avg:74.11ms +[2025-09-03 04:58:10] [Rank 0] step:6101/10000 train_time:452251ms step_avg:74.13ms +[2025-09-03 04:58:10] [Rank 0] step:6101/10000 train_time:452251ms step_avg:74.13ms +[2025-09-03 04:58:11] [Rank 0] step:6121/10000 train_time:454081ms step_avg:74.18ms +[2025-09-03 04:58:11] [Rank 0] step:6121/10000 train_time:454081ms step_avg:74.18ms +[2025-09-03 04:58:13] [Rank 0] step:6141/10000 train_time:455651ms step_avg:74.20ms +[2025-09-03 04:58:13] [Rank 0] step:6141/10000 train_time:455651ms step_avg:74.20ms +[2025-09-03 04:58:15] [Rank 0] step:6161/10000 train_time:457212ms step_avg:74.21ms +[2025-09-03 04:58:15] [Rank 0] step:6161/10000 train_time:457212ms step_avg:74.21ms +[2025-09-03 04:58:16] [Rank 0] step:6181/10000 train_time:458773ms step_avg:74.22ms +[2025-09-03 04:58:16] [Rank 0] step:6181/10000 train_time:458773ms step_avg:74.22ms +[2025-09-03 04:58:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:58:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:58:29] [Rank 0] PRINT: step:6200/10000 val_loss:3.9563 svd_entropy: attn_qk:H=0.7444,top10E=0.28,eRank=164.2,q75/q25=86.84 attn_vo:H=0.7727,top10E=0.17,eRank=245.3,q75/q25=inf mlp_w1:H=0.7837,top10E=0.26,eRank=201.9,q75/q25=17.46 mlp_w2:H=0.8519,top10E=0.14,eRank=295.1,q75/q25=30.10 vo_prod:H=0.6477,top10E=0.25,eRank=107.5,q75/q25=inf train_time:460494ms step_avg:74.27ms +[2025-09-03 04:58:29] [Rank 0] PRINT: step:6200/10000 val_loss:3.9563 svd_entropy: attn_qk:H=0.7444,top10E=0.28,eRank=164.2,q75/q25=86.84 attn_vo:H=0.7727,top10E=0.17,eRank=245.3,q75/q25=inf mlp_w1:H=0.7837,top10E=0.26,eRank=201.9,q75/q25=17.46 mlp_w2:H=0.8519,top10E=0.14,eRank=295.1,q75/q25=30.10 vo_prod:H=0.6477,top10E=0.25,eRank=107.5,q75/q25=inf train_time:460494ms step_avg:74.27ms +[2025-09-03 04:58:29] [Rank 0] step:6201/10000 train_time:460505ms step_avg:74.26ms +[2025-09-03 04:58:29] [Rank 0] step:6201/10000 train_time:460505ms step_avg:74.26ms +[2025-09-03 04:58:31] [Rank 0] step:6221/10000 train_time:461915ms step_avg:74.25ms +[2025-09-03 04:58:31] [Rank 0] step:6221/10000 train_time:461915ms step_avg:74.25ms +[2025-09-03 04:58:33] [Rank 0] step:6241/10000 train_time:463473ms step_avg:74.26ms +[2025-09-03 04:58:33] [Rank 0] step:6241/10000 train_time:463473ms step_avg:74.26ms +[2025-09-03 04:58:34] [Rank 0] step:6261/10000 train_time:465034ms step_avg:74.27ms +[2025-09-03 04:58:34] [Rank 0] step:6261/10000 train_time:465034ms step_avg:74.27ms +[2025-09-03 04:58:36] [Rank 0] step:6281/10000 train_time:466602ms step_avg:74.29ms +[2025-09-03 04:58:36] [Rank 0] step:6281/10000 train_time:466602ms step_avg:74.29ms +[2025-09-03 04:58:37] [Rank 0] step:6301/10000 train_time:468167ms step_avg:74.30ms +[2025-09-03 04:58:37] [Rank 0] step:6301/10000 train_time:468167ms step_avg:74.30ms +[2025-09-03 04:58:39] [Rank 0] step:6321/10000 train_time:469729ms step_avg:74.31ms +[2025-09-03 04:58:39] [Rank 0] step:6321/10000 train_time:469729ms step_avg:74.31ms +[2025-09-03 04:58:40] [Rank 0] step:6341/10000 train_time:471296ms step_avg:74.33ms +[2025-09-03 04:58:40] [Rank 0] step:6341/10000 train_time:471296ms step_avg:74.33ms +[2025-09-03 04:58:42] [Rank 0] step:6361/10000 train_time:472867ms step_avg:74.34ms +[2025-09-03 04:58:42] [Rank 0] step:6361/10000 train_time:472867ms step_avg:74.34ms +[2025-09-03 04:58:44] [Rank 0] step:6381/10000 train_time:474437ms step_avg:74.35ms +[2025-09-03 04:58:44] [Rank 0] step:6381/10000 train_time:474437ms step_avg:74.35ms +[2025-09-03 04:58:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:58:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:58:57] [Rank 0] PRINT: step:6400/10000 val_loss:3.9388 svd_entropy: attn_qk:H=0.7461,top10E=0.27,eRank=165.6,q75/q25=87.58 attn_vo:H=0.7744,top10E=0.17,eRank=247.3,q75/q25=inf mlp_w1:H=0.7863,top10E=0.26,eRank=205.1,q75/q25=17.86 mlp_w2:H=0.8530,top10E=0.14,eRank=297.3,q75/q25=30.23 vo_prod:H=0.6499,top10E=0.25,eRank=109.2,q75/q25=inf train_time:476159ms step_avg:74.40ms +[2025-09-03 04:58:57] [Rank 0] PRINT: step:6400/10000 val_loss:3.9388 svd_entropy: attn_qk:H=0.7461,top10E=0.27,eRank=165.6,q75/q25=87.58 attn_vo:H=0.7744,top10E=0.17,eRank=247.3,q75/q25=inf mlp_w1:H=0.7863,top10E=0.26,eRank=205.1,q75/q25=17.86 mlp_w2:H=0.8530,top10E=0.14,eRank=297.3,q75/q25=30.23 vo_prod:H=0.6499,top10E=0.25,eRank=109.2,q75/q25=inf train_time:476159ms step_avg:74.40ms +[2025-09-03 04:58:57] [Rank 0] step:6401/10000 train_time:476170ms step_avg:74.39ms +[2025-09-03 04:58:57] [Rank 0] step:6401/10000 train_time:476170ms step_avg:74.39ms +[2025-09-03 04:58:58] [Rank 0] step:6421/10000 train_time:477582ms step_avg:74.38ms +[2025-09-03 04:58:58] [Rank 0] step:6421/10000 train_time:477582ms step_avg:74.38ms +[2025-09-03 04:59:00] [Rank 0] step:6441/10000 train_time:479144ms step_avg:74.39ms +[2025-09-03 04:59:00] [Rank 0] step:6441/10000 train_time:479144ms step_avg:74.39ms +[2025-09-03 04:59:02] [Rank 0] step:6461/10000 train_time:480709ms step_avg:74.40ms +[2025-09-03 04:59:02] [Rank 0] step:6461/10000 train_time:480709ms step_avg:74.40ms +[2025-09-03 04:59:03] [Rank 0] step:6481/10000 train_time:482283ms step_avg:74.41ms +[2025-09-03 04:59:03] [Rank 0] step:6481/10000 train_time:482283ms step_avg:74.41ms +[2025-09-03 04:59:05] [Rank 0] step:6501/10000 train_time:483844ms step_avg:74.43ms +[2025-09-03 04:59:05] [Rank 0] step:6501/10000 train_time:483844ms step_avg:74.43ms +[2025-09-03 04:59:06] [Rank 0] step:6521/10000 train_time:485403ms step_avg:74.44ms +[2025-09-03 04:59:06] [Rank 0] step:6521/10000 train_time:485403ms step_avg:74.44ms +[2025-09-03 04:59:08] [Rank 0] step:6541/10000 train_time:486967ms step_avg:74.45ms +[2025-09-03 04:59:08] [Rank 0] step:6541/10000 train_time:486967ms step_avg:74.45ms +[2025-09-03 04:59:09] [Rank 0] step:6561/10000 train_time:488536ms step_avg:74.46ms +[2025-09-03 04:59:09] [Rank 0] step:6561/10000 train_time:488536ms step_avg:74.46ms +[2025-09-03 04:59:11] [Rank 0] step:6581/10000 train_time:490097ms step_avg:74.47ms +[2025-09-03 04:59:11] [Rank 0] step:6581/10000 train_time:490097ms step_avg:74.47ms +[2025-09-03 04:59:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:59:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:59:24] [Rank 0] PRINT: step:6600/10000 val_loss:3.9273 svd_entropy: attn_qk:H=0.7478,top10E=0.27,eRank=167.0,q75/q25=87.38 attn_vo:H=0.7760,top10E=0.17,eRank=249.2,q75/q25=inf mlp_w1:H=0.7885,top10E=0.25,eRank=207.9,q75/q25=18.20 mlp_w2:H=0.8541,top10E=0.13,eRank=299.5,q75/q25=30.84 vo_prod:H=0.6518,top10E=0.24,eRank=110.7,q75/q25=inf train_time:491819ms step_avg:74.52ms +[2025-09-03 04:59:24] [Rank 0] PRINT: step:6600/10000 val_loss:3.9273 svd_entropy: attn_qk:H=0.7478,top10E=0.27,eRank=167.0,q75/q25=87.38 attn_vo:H=0.7760,top10E=0.17,eRank=249.2,q75/q25=inf mlp_w1:H=0.7885,top10E=0.25,eRank=207.9,q75/q25=18.20 mlp_w2:H=0.8541,top10E=0.13,eRank=299.5,q75/q25=30.84 vo_prod:H=0.6518,top10E=0.24,eRank=110.7,q75/q25=inf train_time:491819ms step_avg:74.52ms +[2025-09-03 04:59:24] [Rank 0] step:6601/10000 train_time:491830ms step_avg:74.51ms +[2025-09-03 04:59:24] [Rank 0] step:6601/10000 train_time:491830ms step_avg:74.51ms +[2025-09-03 04:59:26] [Rank 0] step:6621/10000 train_time:493246ms step_avg:74.50ms +[2025-09-03 04:59:26] [Rank 0] step:6621/10000 train_time:493246ms step_avg:74.50ms +[2025-09-03 04:59:27] [Rank 0] step:6641/10000 train_time:494812ms step_avg:74.51ms +[2025-09-03 04:59:27] [Rank 0] step:6641/10000 train_time:494812ms step_avg:74.51ms +[2025-09-03 04:59:29] [Rank 0] step:6661/10000 train_time:496374ms step_avg:74.52ms +[2025-09-03 04:59:29] [Rank 0] step:6661/10000 train_time:496374ms step_avg:74.52ms +[2025-09-03 04:59:31] [Rank 0] step:6681/10000 train_time:497953ms step_avg:74.53ms +[2025-09-03 04:59:31] [Rank 0] step:6681/10000 train_time:497953ms step_avg:74.53ms +[2025-09-03 04:59:32] [Rank 0] step:6701/10000 train_time:499548ms step_avg:74.55ms +[2025-09-03 04:59:32] [Rank 0] step:6701/10000 train_time:499548ms step_avg:74.55ms +[2025-09-03 04:59:34] [Rank 0] step:6721/10000 train_time:501139ms step_avg:74.56ms +[2025-09-03 04:59:34] [Rank 0] step:6721/10000 train_time:501139ms step_avg:74.56ms +[2025-09-03 04:59:35] [Rank 0] step:6741/10000 train_time:502726ms step_avg:74.58ms +[2025-09-03 04:59:35] [Rank 0] step:6741/10000 train_time:502726ms step_avg:74.58ms +[2025-09-03 04:59:37] [Rank 0] step:6761/10000 train_time:504318ms step_avg:74.59ms +[2025-09-03 04:59:37] [Rank 0] step:6761/10000 train_time:504318ms step_avg:74.59ms +[2025-09-03 04:59:39] [Rank 0] step:6781/10000 train_time:505914ms step_avg:74.61ms +[2025-09-03 04:59:39] [Rank 0] step:6781/10000 train_time:505914ms step_avg:74.61ms +[2025-09-03 04:59:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:59:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 04:59:52] [Rank 0] PRINT: step:6800/10000 val_loss:3.9136 svd_entropy: attn_qk:H=0.7492,top10E=0.27,eRank=168.2,q75/q25=87.29 attn_vo:H=0.7775,top10E=0.17,eRank=251.0,q75/q25=inf mlp_w1:H=0.7905,top10E=0.25,eRank=210.5,q75/q25=18.50 mlp_w2:H=0.8551,top10E=0.13,eRank=301.5,q75/q25=31.10 vo_prod:H=0.6538,top10E=0.24,eRank=112.3,q75/q25=inf train_time:507669ms step_avg:74.66ms +[2025-09-03 04:59:52] [Rank 0] PRINT: step:6800/10000 val_loss:3.9136 svd_entropy: attn_qk:H=0.7492,top10E=0.27,eRank=168.2,q75/q25=87.29 attn_vo:H=0.7775,top10E=0.17,eRank=251.0,q75/q25=inf mlp_w1:H=0.7905,top10E=0.25,eRank=210.5,q75/q25=18.50 mlp_w2:H=0.8551,top10E=0.13,eRank=301.5,q75/q25=31.10 vo_prod:H=0.6538,top10E=0.24,eRank=112.3,q75/q25=inf train_time:507669ms step_avg:74.66ms +[2025-09-03 04:59:52] [Rank 0] step:6801/10000 train_time:507681ms step_avg:74.65ms +[2025-09-03 04:59:52] [Rank 0] step:6801/10000 train_time:507681ms step_avg:74.65ms +[2025-09-03 04:59:53] [Rank 0] step:6821/10000 train_time:509126ms step_avg:74.64ms +[2025-09-03 04:59:53] [Rank 0] step:6821/10000 train_time:509126ms step_avg:74.64ms +[2025-09-03 04:59:55] [Rank 0] step:6841/10000 train_time:510711ms step_avg:74.65ms +[2025-09-03 04:59:55] [Rank 0] step:6841/10000 train_time:510711ms step_avg:74.65ms +[2025-09-03 04:59:57] [Rank 0] step:6861/10000 train_time:512303ms step_avg:74.67ms +[2025-09-03 04:59:57] [Rank 0] step:6861/10000 train_time:512303ms step_avg:74.67ms +[2025-09-03 04:59:58] [Rank 0] step:6881/10000 train_time:513893ms step_avg:74.68ms +[2025-09-03 04:59:58] [Rank 0] step:6881/10000 train_time:513893ms step_avg:74.68ms +[2025-09-03 05:00:00] [Rank 0] step:6901/10000 train_time:515482ms step_avg:74.70ms +[2025-09-03 05:00:00] [Rank 0] step:6901/10000 train_time:515482ms step_avg:74.70ms +[2025-09-03 05:00:01] [Rank 0] step:6921/10000 train_time:517071ms step_avg:74.71ms +[2025-09-03 05:00:01] [Rank 0] step:6921/10000 train_time:517071ms step_avg:74.71ms +[2025-09-03 05:00:03] [Rank 0] step:6941/10000 train_time:518669ms step_avg:74.73ms +[2025-09-03 05:00:03] [Rank 0] step:6941/10000 train_time:518669ms step_avg:74.73ms +[2025-09-03 05:00:05] [Rank 0] step:6961/10000 train_time:520276ms step_avg:74.74ms +[2025-09-03 05:00:05] [Rank 0] step:6961/10000 train_time:520276ms step_avg:74.74ms +[2025-09-03 05:00:06] [Rank 0] step:6981/10000 train_time:521872ms step_avg:74.76ms +[2025-09-03 05:00:06] [Rank 0] step:6981/10000 train_time:521872ms step_avg:74.76ms +[2025-09-03 05:00:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:00:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:00:20] [Rank 0] PRINT: step:7000/10000 val_loss:3.9000 svd_entropy: attn_qk:H=0.7506,top10E=0.27,eRank=169.3,q75/q25=87.61 attn_vo:H=0.7788,top10E=0.16,eRank=252.6,q75/q25=inf mlp_w1:H=0.7923,top10E=0.25,eRank=212.9,q75/q25=18.78 mlp_w2:H=0.8560,top10E=0.13,eRank=303.4,q75/q25=31.48 vo_prod:H=0.6555,top10E=0.24,eRank=113.6,q75/q25=inf train_time:523627ms step_avg:74.80ms +[2025-09-03 05:00:20] [Rank 0] PRINT: step:7000/10000 val_loss:3.9000 svd_entropy: attn_qk:H=0.7506,top10E=0.27,eRank=169.3,q75/q25=87.61 attn_vo:H=0.7788,top10E=0.16,eRank=252.6,q75/q25=inf mlp_w1:H=0.7923,top10E=0.25,eRank=212.9,q75/q25=18.78 mlp_w2:H=0.8560,top10E=0.13,eRank=303.4,q75/q25=31.48 vo_prod:H=0.6555,top10E=0.24,eRank=113.6,q75/q25=inf train_time:523627ms step_avg:74.80ms +[2025-09-03 05:00:20] [Rank 0] step:7001/10000 train_time:523639ms step_avg:74.79ms +[2025-09-03 05:00:20] [Rank 0] step:7001/10000 train_time:523639ms step_avg:74.79ms +[2025-09-03 05:00:21] [Rank 0] step:7021/10000 train_time:525090ms step_avg:74.79ms +[2025-09-03 05:00:21] [Rank 0] step:7021/10000 train_time:525090ms step_avg:74.79ms +[2025-09-03 05:00:23] [Rank 0] step:7041/10000 train_time:526680ms step_avg:74.80ms +[2025-09-03 05:00:23] [Rank 0] step:7041/10000 train_time:526680ms step_avg:74.80ms +[2025-09-03 05:00:24] [Rank 0] step:7061/10000 train_time:528268ms step_avg:74.81ms +[2025-09-03 05:00:24] [Rank 0] step:7061/10000 train_time:528268ms step_avg:74.81ms +[2025-09-03 05:00:26] [Rank 0] step:7081/10000 train_time:529858ms step_avg:74.83ms +[2025-09-03 05:00:26] [Rank 0] step:7081/10000 train_time:529858ms step_avg:74.83ms +[2025-09-03 05:00:28] [Rank 0] step:7101/10000 train_time:531449ms step_avg:74.84ms +[2025-09-03 05:00:28] [Rank 0] step:7101/10000 train_time:531449ms step_avg:74.84ms +[2025-09-03 05:00:29] [Rank 0] step:7121/10000 train_time:533042ms step_avg:74.85ms +[2025-09-03 05:00:29] [Rank 0] step:7121/10000 train_time:533042ms step_avg:74.85ms +[2025-09-03 05:00:31] [Rank 0] step:7141/10000 train_time:534636ms step_avg:74.87ms +[2025-09-03 05:00:31] [Rank 0] step:7141/10000 train_time:534636ms step_avg:74.87ms +[2025-09-03 05:00:32] [Rank 0] step:7161/10000 train_time:536229ms step_avg:74.88ms +[2025-09-03 05:00:32] [Rank 0] step:7161/10000 train_time:536229ms step_avg:74.88ms +[2025-09-03 05:00:34] [Rank 0] step:7181/10000 train_time:537819ms step_avg:74.89ms +[2025-09-03 05:00:34] [Rank 0] step:7181/10000 train_time:537819ms step_avg:74.89ms +[2025-09-03 05:00:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:00:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:00:47] [Rank 0] PRINT: step:7200/10000 val_loss:3.8879 svd_entropy: attn_qk:H=0.7518,top10E=0.27,eRank=170.4,q75/q25=87.52 attn_vo:H=0.7800,top10E=0.16,eRank=254.1,q75/q25=inf mlp_w1:H=0.7940,top10E=0.25,eRank=215.0,q75/q25=18.99 mlp_w2:H=0.8569,top10E=0.13,eRank=305.2,q75/q25=31.48 vo_prod:H=0.6571,top10E=0.24,eRank=115.0,q75/q25=inf train_time:539573ms step_avg:74.94ms +[2025-09-03 05:00:47] [Rank 0] PRINT: step:7200/10000 val_loss:3.8879 svd_entropy: attn_qk:H=0.7518,top10E=0.27,eRank=170.4,q75/q25=87.52 attn_vo:H=0.7800,top10E=0.16,eRank=254.1,q75/q25=inf mlp_w1:H=0.7940,top10E=0.25,eRank=215.0,q75/q25=18.99 mlp_w2:H=0.8569,top10E=0.13,eRank=305.2,q75/q25=31.48 vo_prod:H=0.6571,top10E=0.24,eRank=115.0,q75/q25=inf train_time:539573ms step_avg:74.94ms +[2025-09-03 05:00:47] [Rank 0] step:7201/10000 train_time:539584ms step_avg:74.93ms +[2025-09-03 05:00:47] [Rank 0] step:7201/10000 train_time:539584ms step_avg:74.93ms +[2025-09-03 05:00:49] [Rank 0] step:7221/10000 train_time:541043ms step_avg:74.93ms +[2025-09-03 05:00:49] [Rank 0] step:7221/10000 train_time:541043ms step_avg:74.93ms +[2025-09-03 05:00:51] [Rank 0] step:7241/10000 train_time:542631ms step_avg:74.94ms +[2025-09-03 05:00:51] [Rank 0] step:7241/10000 train_time:542631ms step_avg:74.94ms +[2025-09-03 05:00:52] [Rank 0] step:7261/10000 train_time:544219ms step_avg:74.95ms +[2025-09-03 05:00:52] [Rank 0] step:7261/10000 train_time:544219ms step_avg:74.95ms +[2025-09-03 05:00:54] [Rank 0] step:7281/10000 train_time:545818ms step_avg:74.96ms +[2025-09-03 05:00:54] [Rank 0] step:7281/10000 train_time:545818ms step_avg:74.96ms +[2025-09-03 05:00:55] [Rank 0] step:7301/10000 train_time:547407ms step_avg:74.98ms +[2025-09-03 05:00:55] [Rank 0] step:7301/10000 train_time:547407ms step_avg:74.98ms +[2025-09-03 05:00:57] [Rank 0] step:7321/10000 train_time:549006ms step_avg:74.99ms +[2025-09-03 05:00:57] [Rank 0] step:7321/10000 train_time:549006ms step_avg:74.99ms +[2025-09-03 05:00:59] [Rank 0] step:7341/10000 train_time:550597ms step_avg:75.00ms +[2025-09-03 05:00:59] [Rank 0] step:7341/10000 train_time:550597ms step_avg:75.00ms +[2025-09-03 05:01:00] [Rank 0] step:7361/10000 train_time:552193ms step_avg:75.02ms +[2025-09-03 05:01:00] [Rank 0] step:7361/10000 train_time:552193ms step_avg:75.02ms +[2025-09-03 05:01:02] [Rank 0] step:7381/10000 train_time:553792ms step_avg:75.03ms +[2025-09-03 05:01:02] [Rank 0] step:7381/10000 train_time:553792ms step_avg:75.03ms +[2025-09-03 05:01:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:01:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:01:15] [Rank 0] PRINT: step:7400/10000 val_loss:3.8679 svd_entropy: attn_qk:H=0.7530,top10E=0.27,eRank=171.4,q75/q25=87.43 attn_vo:H=0.7811,top10E=0.16,eRank=255.4,q75/q25=inf mlp_w1:H=0.7954,top10E=0.25,eRank=216.9,q75/q25=19.17 mlp_w2:H=0.8577,top10E=0.13,eRank=306.8,q75/q25=31.66 vo_prod:H=0.6585,top10E=0.23,eRank=116.1,q75/q25=inf train_time:555533ms step_avg:75.07ms +[2025-09-03 05:01:15] [Rank 0] PRINT: step:7400/10000 val_loss:3.8679 svd_entropy: attn_qk:H=0.7530,top10E=0.27,eRank=171.4,q75/q25=87.43 attn_vo:H=0.7811,top10E=0.16,eRank=255.4,q75/q25=inf mlp_w1:H=0.7954,top10E=0.25,eRank=216.9,q75/q25=19.17 mlp_w2:H=0.8577,top10E=0.13,eRank=306.8,q75/q25=31.66 vo_prod:H=0.6585,top10E=0.23,eRank=116.1,q75/q25=inf train_time:555533ms step_avg:75.07ms +[2025-09-03 05:01:15] [Rank 0] step:7401/10000 train_time:555545ms step_avg:75.06ms +[2025-09-03 05:01:15] [Rank 0] step:7401/10000 train_time:555545ms step_avg:75.06ms +[2025-09-03 05:01:17] [Rank 0] step:7421/10000 train_time:556996ms step_avg:75.06ms +[2025-09-03 05:01:17] [Rank 0] step:7421/10000 train_time:556996ms step_avg:75.06ms +[2025-09-03 05:01:18] [Rank 0] step:7441/10000 train_time:558587ms step_avg:75.07ms +[2025-09-03 05:01:18] [Rank 0] step:7441/10000 train_time:558587ms step_avg:75.07ms +[2025-09-03 05:01:20] [Rank 0] step:7461/10000 train_time:560177ms step_avg:75.08ms +[2025-09-03 05:01:20] [Rank 0] step:7461/10000 train_time:560177ms step_avg:75.08ms +[2025-09-03 05:01:21] [Rank 0] step:7481/10000 train_time:561778ms step_avg:75.09ms +[2025-09-03 05:01:21] [Rank 0] step:7481/10000 train_time:561778ms step_avg:75.09ms +[2025-09-03 05:01:23] [Rank 0] step:7501/10000 train_time:563378ms step_avg:75.11ms +[2025-09-03 05:01:23] [Rank 0] step:7501/10000 train_time:563378ms step_avg:75.11ms +[2025-09-03 05:01:25] [Rank 0] step:7521/10000 train_time:564976ms step_avg:75.12ms +[2025-09-03 05:01:25] [Rank 0] step:7521/10000 train_time:564976ms step_avg:75.12ms +[2025-09-03 05:01:26] [Rank 0] step:7541/10000 train_time:566588ms step_avg:75.13ms +[2025-09-03 05:01:26] [Rank 0] step:7541/10000 train_time:566588ms step_avg:75.13ms +[2025-09-03 05:01:28] [Rank 0] step:7561/10000 train_time:568176ms step_avg:75.15ms +[2025-09-03 05:01:28] [Rank 0] step:7561/10000 train_time:568176ms step_avg:75.15ms +[2025-09-03 05:01:29] [Rank 0] step:7581/10000 train_time:569782ms step_avg:75.16ms +[2025-09-03 05:01:29] [Rank 0] step:7581/10000 train_time:569782ms step_avg:75.16ms +[2025-09-03 05:01:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:01:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:01:43] [Rank 0] PRINT: step:7600/10000 val_loss:3.8651 svd_entropy: attn_qk:H=0.7540,top10E=0.26,eRank=172.3,q75/q25=87.52 attn_vo:H=0.7820,top10E=0.16,eRank=256.6,q75/q25=inf mlp_w1:H=0.7967,top10E=0.24,eRank=218.6,q75/q25=19.35 mlp_w2:H=0.8585,top10E=0.13,eRank=308.4,q75/q25=31.71 vo_prod:H=0.6598,top10E=0.23,eRank=117.2,q75/q25=inf train_time:571551ms step_avg:75.20ms +[2025-09-03 05:01:43] [Rank 0] PRINT: step:7600/10000 val_loss:3.8651 svd_entropy: attn_qk:H=0.7540,top10E=0.26,eRank=172.3,q75/q25=87.52 attn_vo:H=0.7820,top10E=0.16,eRank=256.6,q75/q25=inf mlp_w1:H=0.7967,top10E=0.24,eRank=218.6,q75/q25=19.35 mlp_w2:H=0.8585,top10E=0.13,eRank=308.4,q75/q25=31.71 vo_prod:H=0.6598,top10E=0.23,eRank=117.2,q75/q25=inf train_time:571551ms step_avg:75.20ms +[2025-09-03 05:01:43] [Rank 0] step:7601/10000 train_time:571562ms step_avg:75.20ms +[2025-09-03 05:01:43] [Rank 0] step:7601/10000 train_time:571562ms step_avg:75.20ms +[2025-09-03 05:01:44] [Rank 0] step:7621/10000 train_time:572999ms step_avg:75.19ms +[2025-09-03 05:01:44] [Rank 0] step:7621/10000 train_time:572999ms step_avg:75.19ms +[2025-09-03 05:01:46] [Rank 0] step:7641/10000 train_time:574594ms step_avg:75.20ms +[2025-09-03 05:01:46] [Rank 0] step:7641/10000 train_time:574594ms step_avg:75.20ms +[2025-09-03 05:01:48] [Rank 0] step:7661/10000 train_time:576191ms step_avg:75.21ms +[2025-09-03 05:01:48] [Rank 0] step:7661/10000 train_time:576191ms step_avg:75.21ms +[2025-09-03 05:01:49] [Rank 0] step:7681/10000 train_time:577782ms step_avg:75.22ms +[2025-09-03 05:01:49] [Rank 0] step:7681/10000 train_time:577782ms step_avg:75.22ms +[2025-09-03 05:01:51] [Rank 0] step:7701/10000 train_time:579373ms step_avg:75.23ms +[2025-09-03 05:01:51] [Rank 0] step:7701/10000 train_time:579373ms step_avg:75.23ms +[2025-09-03 05:01:52] [Rank 0] step:7721/10000 train_time:581011ms step_avg:75.25ms +[2025-09-03 05:01:52] [Rank 0] step:7721/10000 train_time:581011ms step_avg:75.25ms +[2025-09-03 05:01:54] [Rank 0] step:7741/10000 train_time:582607ms step_avg:75.26ms +[2025-09-03 05:01:54] [Rank 0] step:7741/10000 train_time:582607ms step_avg:75.26ms +[2025-09-03 05:01:56] [Rank 0] step:7761/10000 train_time:584207ms step_avg:75.27ms +[2025-09-03 05:01:56] [Rank 0] step:7761/10000 train_time:584207ms step_avg:75.27ms +[2025-09-03 05:01:57] [Rank 0] step:7781/10000 train_time:585814ms step_avg:75.29ms +[2025-09-03 05:01:57] [Rank 0] step:7781/10000 train_time:585814ms step_avg:75.29ms +[2025-09-03 05:01:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:01:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:02:10] [Rank 0] PRINT: step:7800/10000 val_loss:3.8504 svd_entropy: attn_qk:H=0.7550,top10E=0.26,eRank=173.1,q75/q25=87.36 attn_vo:H=0.7829,top10E=0.16,eRank=257.8,q75/q25=inf mlp_w1:H=0.7979,top10E=0.24,eRank=220.3,q75/q25=19.55 mlp_w2:H=0.8592,top10E=0.13,eRank=309.9,q75/q25=31.88 vo_prod:H=0.6610,top10E=0.23,eRank=118.3,q75/q25=inf train_time:587580ms step_avg:75.33ms +[2025-09-03 05:02:10] [Rank 0] PRINT: step:7800/10000 val_loss:3.8504 svd_entropy: attn_qk:H=0.7550,top10E=0.26,eRank=173.1,q75/q25=87.36 attn_vo:H=0.7829,top10E=0.16,eRank=257.8,q75/q25=inf mlp_w1:H=0.7979,top10E=0.24,eRank=220.3,q75/q25=19.55 mlp_w2:H=0.8592,top10E=0.13,eRank=309.9,q75/q25=31.88 vo_prod:H=0.6610,top10E=0.23,eRank=118.3,q75/q25=inf train_time:587580ms step_avg:75.33ms +[2025-09-03 05:02:11] [Rank 0] step:7801/10000 train_time:587591ms step_avg:75.32ms +[2025-09-03 05:02:11] [Rank 0] step:7801/10000 train_time:587591ms step_avg:75.32ms +[2025-09-03 05:02:12] [Rank 0] step:7821/10000 train_time:589030ms step_avg:75.31ms +[2025-09-03 05:02:12] [Rank 0] step:7821/10000 train_time:589030ms step_avg:75.31ms +[2025-09-03 05:02:14] [Rank 0] step:7841/10000 train_time:590622ms step_avg:75.32ms +[2025-09-03 05:02:14] [Rank 0] step:7841/10000 train_time:590622ms step_avg:75.32ms +[2025-09-03 05:02:15] [Rank 0] step:7861/10000 train_time:592221ms step_avg:75.34ms +[2025-09-03 05:02:15] [Rank 0] step:7861/10000 train_time:592221ms step_avg:75.34ms +[2025-09-03 05:02:17] [Rank 0] step:7881/10000 train_time:593825ms step_avg:75.35ms +[2025-09-03 05:02:17] [Rank 0] step:7881/10000 train_time:593825ms step_avg:75.35ms +[2025-09-03 05:02:19] [Rank 0] step:7901/10000 train_time:595422ms step_avg:75.36ms +[2025-09-03 05:02:19] [Rank 0] step:7901/10000 train_time:595422ms step_avg:75.36ms +[2025-09-03 05:02:20] [Rank 0] step:7921/10000 train_time:597019ms step_avg:75.37ms +[2025-09-03 05:02:20] [Rank 0] step:7921/10000 train_time:597019ms step_avg:75.37ms +[2025-09-03 05:02:22] [Rank 0] step:7941/10000 train_time:598624ms step_avg:75.38ms +[2025-09-03 05:02:22] [Rank 0] step:7941/10000 train_time:598624ms step_avg:75.38ms +[2025-09-03 05:02:23] [Rank 0] step:7961/10000 train_time:600229ms step_avg:75.40ms +[2025-09-03 05:02:23] [Rank 0] step:7961/10000 train_time:600229ms step_avg:75.40ms +[2025-09-03 05:02:25] [Rank 0] step:7981/10000 train_time:601824ms step_avg:75.41ms +[2025-09-03 05:02:25] [Rank 0] step:7981/10000 train_time:601824ms step_avg:75.41ms +[2025-09-03 05:02:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:02:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:02:38] [Rank 0] PRINT: step:8000/10000 val_loss:3.8347 svd_entropy: attn_qk:H=0.7558,top10E=0.26,eRank=173.8,q75/q25=87.41 attn_vo:H=0.7837,top10E=0.16,eRank=258.8,q75/q25=inf mlp_w1:H=0.7990,top10E=0.24,eRank=221.6,q75/q25=19.60 mlp_w2:H=0.8598,top10E=0.13,eRank=311.3,q75/q25=31.88 vo_prod:H=0.6623,top10E=0.23,eRank=119.3,q75/q25=inf train_time:603580ms step_avg:75.45ms +[2025-09-03 05:02:38] [Rank 0] PRINT: step:8000/10000 val_loss:3.8347 svd_entropy: attn_qk:H=0.7558,top10E=0.26,eRank=173.8,q75/q25=87.41 attn_vo:H=0.7837,top10E=0.16,eRank=258.8,q75/q25=inf mlp_w1:H=0.7990,top10E=0.24,eRank=221.6,q75/q25=19.60 mlp_w2:H=0.8598,top10E=0.13,eRank=311.3,q75/q25=31.88 vo_prod:H=0.6623,top10E=0.23,eRank=119.3,q75/q25=inf train_time:603580ms step_avg:75.45ms +[2025-09-03 05:02:38] [Rank 0] step:8001/10000 train_time:603591ms step_avg:75.44ms +[2025-09-03 05:02:38] [Rank 0] step:8001/10000 train_time:603591ms step_avg:75.44ms +[2025-09-03 05:02:40] [Rank 0] step:8021/10000 train_time:605044ms step_avg:75.43ms +[2025-09-03 05:02:40] [Rank 0] step:8021/10000 train_time:605044ms step_avg:75.43ms +[2025-09-03 05:02:42] [Rank 0] step:8041/10000 train_time:606651ms step_avg:75.44ms +[2025-09-03 05:02:42] [Rank 0] step:8041/10000 train_time:606651ms step_avg:75.44ms +[2025-09-03 05:02:43] [Rank 0] step:8061/10000 train_time:608245ms step_avg:75.46ms +[2025-09-03 05:02:43] [Rank 0] step:8061/10000 train_time:608245ms step_avg:75.46ms +[2025-09-03 05:02:45] [Rank 0] step:8081/10000 train_time:609834ms step_avg:75.47ms +[2025-09-03 05:02:45] [Rank 0] step:8081/10000 train_time:609834ms step_avg:75.47ms +[2025-09-03 05:02:46] [Rank 0] step:8101/10000 train_time:611439ms step_avg:75.48ms +[2025-09-03 05:02:46] [Rank 0] step:8101/10000 train_time:611439ms step_avg:75.48ms +[2025-09-03 05:02:48] [Rank 0] step:8121/10000 train_time:613035ms step_avg:75.49ms +[2025-09-03 05:02:48] [Rank 0] step:8121/10000 train_time:613035ms step_avg:75.49ms +[2025-09-03 05:02:50] [Rank 0] step:8141/10000 train_time:614737ms step_avg:75.51ms +[2025-09-03 05:02:50] [Rank 0] step:8141/10000 train_time:614737ms step_avg:75.51ms +[2025-09-03 05:02:51] [Rank 0] step:8161/10000 train_time:616350ms step_avg:75.52ms +[2025-09-03 05:02:51] [Rank 0] step:8161/10000 train_time:616350ms step_avg:75.52ms +[2025-09-03 05:02:53] [Rank 0] step:8181/10000 train_time:617974ms step_avg:75.54ms +[2025-09-03 05:02:53] [Rank 0] step:8181/10000 train_time:617974ms step_avg:75.54ms +[2025-09-03 05:02:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:02:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:03:06] [Rank 0] PRINT: step:8200/10000 val_loss:3.8260 svd_entropy: attn_qk:H=0.7566,top10E=0.26,eRank=174.5,q75/q25=87.52 attn_vo:H=0.7844,top10E=0.16,eRank=259.7,q75/q25=inf mlp_w1:H=0.7999,top10E=0.24,eRank=222.9,q75/q25=19.69 mlp_w2:H=0.8604,top10E=0.13,eRank=312.5,q75/q25=31.99 vo_prod:H=0.6634,top10E=0.23,eRank=120.2,q75/q25=inf train_time:619788ms step_avg:75.58ms +[2025-09-03 05:03:06] [Rank 0] PRINT: step:8200/10000 val_loss:3.8260 svd_entropy: attn_qk:H=0.7566,top10E=0.26,eRank=174.5,q75/q25=87.52 attn_vo:H=0.7844,top10E=0.16,eRank=259.7,q75/q25=inf mlp_w1:H=0.7999,top10E=0.24,eRank=222.9,q75/q25=19.69 mlp_w2:H=0.8604,top10E=0.13,eRank=312.5,q75/q25=31.99 vo_prod:H=0.6634,top10E=0.23,eRank=120.2,q75/q25=inf train_time:619788ms step_avg:75.58ms +[2025-09-03 05:03:06] [Rank 0] step:8201/10000 train_time:619799ms step_avg:75.58ms +[2025-09-03 05:03:06] [Rank 0] step:8201/10000 train_time:619799ms step_avg:75.58ms +[2025-09-03 05:03:08] [Rank 0] step:8221/10000 train_time:621277ms step_avg:75.57ms +[2025-09-03 05:03:08] [Rank 0] step:8221/10000 train_time:621277ms step_avg:75.57ms +[2025-09-03 05:03:09] [Rank 0] step:8241/10000 train_time:622914ms step_avg:75.59ms +[2025-09-03 05:03:09] [Rank 0] step:8241/10000 train_time:622914ms step_avg:75.59ms +[2025-09-03 05:03:11] [Rank 0] step:8261/10000 train_time:624537ms step_avg:75.60ms +[2025-09-03 05:03:11] [Rank 0] step:8261/10000 train_time:624537ms step_avg:75.60ms +[2025-09-03 05:03:13] [Rank 0] step:8281/10000 train_time:626169ms step_avg:75.62ms +[2025-09-03 05:03:13] [Rank 0] step:8281/10000 train_time:626169ms step_avg:75.62ms +[2025-09-03 05:03:14] [Rank 0] step:8301/10000 train_time:627796ms step_avg:75.63ms +[2025-09-03 05:03:14] [Rank 0] step:8301/10000 train_time:627796ms step_avg:75.63ms +[2025-09-03 05:03:16] [Rank 0] step:8321/10000 train_time:629412ms step_avg:75.64ms +[2025-09-03 05:03:16] [Rank 0] step:8321/10000 train_time:629412ms step_avg:75.64ms +[2025-09-03 05:03:17] [Rank 0] step:8341/10000 train_time:631044ms step_avg:75.66ms +[2025-09-03 05:03:17] [Rank 0] step:8341/10000 train_time:631044ms step_avg:75.66ms +[2025-09-03 05:03:19] [Rank 0] step:8361/10000 train_time:632672ms step_avg:75.67ms +[2025-09-03 05:03:19] [Rank 0] step:8361/10000 train_time:632672ms step_avg:75.67ms +[2025-09-03 05:03:21] [Rank 0] step:8381/10000 train_time:634298ms step_avg:75.68ms +[2025-09-03 05:03:21] [Rank 0] step:8381/10000 train_time:634298ms step_avg:75.68ms +[2025-09-03 05:03:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:03:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:03:34] [Rank 0] PRINT: step:8400/10000 val_loss:3.8177 svd_entropy: attn_qk:H=0.7573,top10E=0.26,eRank=175.1,q75/q25=87.23 attn_vo:H=0.7851,top10E=0.16,eRank=260.6,q75/q25=inf mlp_w1:H=0.8007,top10E=0.24,eRank=224.0,q75/q25=19.73 mlp_w2:H=0.8610,top10E=0.13,eRank=313.7,q75/q25=32.03 vo_prod:H=0.6643,top10E=0.23,eRank=121.0,q75/q25=inf train_time:636087ms step_avg:75.72ms +[2025-09-03 05:03:34] [Rank 0] PRINT: step:8400/10000 val_loss:3.8177 svd_entropy: attn_qk:H=0.7573,top10E=0.26,eRank=175.1,q75/q25=87.23 attn_vo:H=0.7851,top10E=0.16,eRank=260.6,q75/q25=inf mlp_w1:H=0.8007,top10E=0.24,eRank=224.0,q75/q25=19.73 mlp_w2:H=0.8610,top10E=0.13,eRank=313.7,q75/q25=32.03 vo_prod:H=0.6643,top10E=0.23,eRank=121.0,q75/q25=inf train_time:636087ms step_avg:75.72ms +[2025-09-03 05:03:34] [Rank 0] step:8401/10000 train_time:636098ms step_avg:75.72ms +[2025-09-03 05:03:34] [Rank 0] step:8401/10000 train_time:636098ms step_avg:75.72ms +[2025-09-03 05:03:36] [Rank 0] step:8421/10000 train_time:637558ms step_avg:75.71ms +[2025-09-03 05:03:36] [Rank 0] step:8421/10000 train_time:637558ms step_avg:75.71ms +[2025-09-03 05:03:37] [Rank 0] step:8441/10000 train_time:639183ms step_avg:75.72ms +[2025-09-03 05:03:37] [Rank 0] step:8441/10000 train_time:639183ms step_avg:75.72ms +[2025-09-03 05:03:39] [Rank 0] step:8461/10000 train_time:640798ms step_avg:75.74ms +[2025-09-03 05:03:39] [Rank 0] step:8461/10000 train_time:640798ms step_avg:75.74ms +[2025-09-03 05:03:41] [Rank 0] step:8481/10000 train_time:642433ms step_avg:75.75ms +[2025-09-03 05:03:41] [Rank 0] step:8481/10000 train_time:642433ms step_avg:75.75ms +[2025-09-03 05:03:42] [Rank 0] step:8501/10000 train_time:644080ms step_avg:75.77ms +[2025-09-03 05:03:42] [Rank 0] step:8501/10000 train_time:644080ms step_avg:75.77ms +[2025-09-03 05:03:44] [Rank 0] step:8521/10000 train_time:645715ms step_avg:75.78ms +[2025-09-03 05:03:44] [Rank 0] step:8521/10000 train_time:645715ms step_avg:75.78ms +[2025-09-03 05:03:46] [Rank 0] step:8541/10000 train_time:647354ms step_avg:75.79ms +[2025-09-03 05:03:46] [Rank 0] step:8541/10000 train_time:647354ms step_avg:75.79ms +[2025-09-03 05:03:47] [Rank 0] step:8561/10000 train_time:648984ms step_avg:75.81ms +[2025-09-03 05:03:47] [Rank 0] step:8561/10000 train_time:648984ms step_avg:75.81ms +[2025-09-03 05:03:49] [Rank 0] step:8581/10000 train_time:650616ms step_avg:75.82ms +[2025-09-03 05:03:49] [Rank 0] step:8581/10000 train_time:650616ms step_avg:75.82ms +[2025-09-03 05:03:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:03:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:04:02] [Rank 0] PRINT: step:8600/10000 val_loss:3.8089 svd_entropy: attn_qk:H=0.7579,top10E=0.26,eRank=175.7,q75/q25=87.43 attn_vo:H=0.7856,top10E=0.16,eRank=261.3,q75/q25=inf mlp_w1:H=0.8014,top10E=0.24,eRank=225.0,q75/q25=19.82 mlp_w2:H=0.8615,top10E=0.13,eRank=314.7,q75/q25=32.19 vo_prod:H=0.6652,top10E=0.23,eRank=121.8,q75/q25=inf train_time:652405ms step_avg:75.86ms +[2025-09-03 05:04:02] [Rank 0] PRINT: step:8600/10000 val_loss:3.8089 svd_entropy: attn_qk:H=0.7579,top10E=0.26,eRank=175.7,q75/q25=87.43 attn_vo:H=0.7856,top10E=0.16,eRank=261.3,q75/q25=inf mlp_w1:H=0.8014,top10E=0.24,eRank=225.0,q75/q25=19.82 mlp_w2:H=0.8615,top10E=0.13,eRank=314.7,q75/q25=32.19 vo_prod:H=0.6652,top10E=0.23,eRank=121.8,q75/q25=inf train_time:652405ms step_avg:75.86ms +[2025-09-03 05:04:02] [Rank 0] step:8601/10000 train_time:652416ms step_avg:75.85ms +[2025-09-03 05:04:02] [Rank 0] step:8601/10000 train_time:652416ms step_avg:75.85ms +[2025-09-03 05:04:04] [Rank 0] step:8621/10000 train_time:653890ms step_avg:75.85ms +[2025-09-03 05:04:04] [Rank 0] step:8621/10000 train_time:653890ms step_avg:75.85ms +[2025-09-03 05:04:05] [Rank 0] step:8641/10000 train_time:655515ms step_avg:75.86ms +[2025-09-03 05:04:05] [Rank 0] step:8641/10000 train_time:655515ms step_avg:75.86ms +[2025-09-03 05:04:07] [Rank 0] step:8661/10000 train_time:657139ms step_avg:75.87ms +[2025-09-03 05:04:07] [Rank 0] step:8661/10000 train_time:657139ms step_avg:75.87ms +[2025-09-03 05:04:09] [Rank 0] step:8681/10000 train_time:658757ms step_avg:75.88ms +[2025-09-03 05:04:09] [Rank 0] step:8681/10000 train_time:658757ms step_avg:75.88ms +[2025-09-03 05:04:10] [Rank 0] step:8701/10000 train_time:660374ms step_avg:75.90ms +[2025-09-03 05:04:10] [Rank 0] step:8701/10000 train_time:660374ms step_avg:75.90ms +[2025-09-03 05:04:12] [Rank 0] step:8721/10000 train_time:662005ms step_avg:75.91ms +[2025-09-03 05:04:12] [Rank 0] step:8721/10000 train_time:662005ms step_avg:75.91ms +[2025-09-03 05:04:14] [Rank 0] step:8741/10000 train_time:663620ms step_avg:75.92ms +[2025-09-03 05:04:14] [Rank 0] step:8741/10000 train_time:663620ms step_avg:75.92ms +[2025-09-03 05:04:15] [Rank 0] step:8761/10000 train_time:665241ms step_avg:75.93ms +[2025-09-03 05:04:15] [Rank 0] step:8761/10000 train_time:665241ms step_avg:75.93ms +[2025-09-03 05:04:17] [Rank 0] step:8781/10000 train_time:666875ms step_avg:75.95ms +[2025-09-03 05:04:17] [Rank 0] step:8781/10000 train_time:666875ms step_avg:75.95ms +[2025-09-03 05:04:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:04:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:04:30] [Rank 0] PRINT: step:8800/10000 val_loss:3.7989 svd_entropy: attn_qk:H=0.7584,top10E=0.26,eRank=176.1,q75/q25=87.52 attn_vo:H=0.7861,top10E=0.16,eRank=262.0,q75/q25=inf mlp_w1:H=0.8021,top10E=0.24,eRank=225.9,q75/q25=19.99 mlp_w2:H=0.8620,top10E=0.13,eRank=315.7,q75/q25=32.18 vo_prod:H=0.6659,top10E=0.23,eRank=122.5,q75/q25=inf train_time:668668ms step_avg:75.98ms +[2025-09-03 05:04:30] [Rank 0] PRINT: step:8800/10000 val_loss:3.7989 svd_entropy: attn_qk:H=0.7584,top10E=0.26,eRank=176.1,q75/q25=87.52 attn_vo:H=0.7861,top10E=0.16,eRank=262.0,q75/q25=inf mlp_w1:H=0.8021,top10E=0.24,eRank=225.9,q75/q25=19.99 mlp_w2:H=0.8620,top10E=0.13,eRank=315.7,q75/q25=32.18 vo_prod:H=0.6659,top10E=0.23,eRank=122.5,q75/q25=inf train_time:668668ms step_avg:75.98ms +[2025-09-03 05:04:30] [Rank 0] step:8801/10000 train_time:668679ms step_avg:75.98ms +[2025-09-03 05:04:30] [Rank 0] step:8801/10000 train_time:668679ms step_avg:75.98ms +[2025-09-03 05:04:32] [Rank 0] step:8821/10000 train_time:670142ms step_avg:75.97ms +[2025-09-03 05:04:32] [Rank 0] step:8821/10000 train_time:670142ms step_avg:75.97ms +[2025-09-03 05:04:34] [Rank 0] step:8841/10000 train_time:671787ms step_avg:75.99ms +[2025-09-03 05:04:34] [Rank 0] step:8841/10000 train_time:671787ms step_avg:75.99ms +[2025-09-03 05:04:35] [Rank 0] step:8861/10000 train_time:673408ms step_avg:76.00ms +[2025-09-03 05:04:35] [Rank 0] step:8861/10000 train_time:673408ms step_avg:76.00ms +[2025-09-03 05:04:37] [Rank 0] step:8881/10000 train_time:675032ms step_avg:76.01ms +[2025-09-03 05:04:37] [Rank 0] step:8881/10000 train_time:675032ms step_avg:76.01ms +[2025-09-03 05:04:38] [Rank 0] step:8901/10000 train_time:676663ms step_avg:76.02ms +[2025-09-03 05:04:38] [Rank 0] step:8901/10000 train_time:676663ms step_avg:76.02ms +[2025-09-03 05:04:40] [Rank 0] step:8921/10000 train_time:678296ms step_avg:76.03ms +[2025-09-03 05:04:40] [Rank 0] step:8921/10000 train_time:678296ms step_avg:76.03ms +[2025-09-03 05:04:42] [Rank 0] step:8941/10000 train_time:679937ms step_avg:76.05ms +[2025-09-03 05:04:42] [Rank 0] step:8941/10000 train_time:679937ms step_avg:76.05ms +[2025-09-03 05:04:43] [Rank 0] step:8961/10000 train_time:681560ms step_avg:76.06ms +[2025-09-03 05:04:43] [Rank 0] step:8961/10000 train_time:681560ms step_avg:76.06ms +[2025-09-03 05:04:45] [Rank 0] step:8981/10000 train_time:683183ms step_avg:76.07ms +[2025-09-03 05:04:45] [Rank 0] step:8981/10000 train_time:683183ms step_avg:76.07ms +[2025-09-03 05:04:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:04:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:04:58] [Rank 0] PRINT: step:9000/10000 val_loss:3.7900 svd_entropy: attn_qk:H=0.7589,top10E=0.26,eRank=176.5,q75/q25=86.90 attn_vo:H=0.7866,top10E=0.16,eRank=262.5,q75/q25=inf mlp_w1:H=0.8026,top10E=0.24,eRank=226.6,q75/q25=20.04 mlp_w2:H=0.8624,top10E=0.13,eRank=316.6,q75/q25=32.16 vo_prod:H=0.6666,top10E=0.23,eRank=123.1,q75/q25=inf train_time:684967ms step_avg:76.11ms +[2025-09-03 05:04:58] [Rank 0] PRINT: step:9000/10000 val_loss:3.7900 svd_entropy: attn_qk:H=0.7589,top10E=0.26,eRank=176.5,q75/q25=86.90 attn_vo:H=0.7866,top10E=0.16,eRank=262.5,q75/q25=inf mlp_w1:H=0.8026,top10E=0.24,eRank=226.6,q75/q25=20.04 mlp_w2:H=0.8624,top10E=0.13,eRank=316.6,q75/q25=32.16 vo_prod:H=0.6666,top10E=0.23,eRank=123.1,q75/q25=inf train_time:684967ms step_avg:76.11ms +[2025-09-03 05:04:58] [Rank 0] step:9001/10000 train_time:684979ms step_avg:76.10ms +[2025-09-03 05:04:58] [Rank 0] step:9001/10000 train_time:684979ms step_avg:76.10ms +[2025-09-03 05:05:00] [Rank 0] step:9021/10000 train_time:686442ms step_avg:76.09ms +[2025-09-03 05:05:00] [Rank 0] step:9021/10000 train_time:686442ms step_avg:76.09ms +[2025-09-03 05:05:02] [Rank 0] step:9041/10000 train_time:688063ms step_avg:76.10ms +[2025-09-03 05:05:02] [Rank 0] step:9041/10000 train_time:688063ms step_avg:76.10ms +[2025-09-03 05:05:03] [Rank 0] step:9061/10000 train_time:689700ms step_avg:76.12ms +[2025-09-03 05:05:03] [Rank 0] step:9061/10000 train_time:689700ms step_avg:76.12ms +[2025-09-03 05:05:05] [Rank 0] step:9081/10000 train_time:691334ms step_avg:76.13ms +[2025-09-03 05:05:05] [Rank 0] step:9081/10000 train_time:691334ms step_avg:76.13ms +[2025-09-03 05:05:06] [Rank 0] step:9101/10000 train_time:692981ms step_avg:76.14ms +[2025-09-03 05:05:06] [Rank 0] step:9101/10000 train_time:692981ms step_avg:76.14ms +[2025-09-03 05:05:08] [Rank 0] step:9121/10000 train_time:694608ms step_avg:76.15ms +[2025-09-03 05:05:08] [Rank 0] step:9121/10000 train_time:694608ms step_avg:76.15ms +[2025-09-03 05:05:10] [Rank 0] step:9141/10000 train_time:696236ms step_avg:76.17ms +[2025-09-03 05:05:10] [Rank 0] step:9141/10000 train_time:696236ms step_avg:76.17ms +[2025-09-03 05:05:11] [Rank 0] step:9161/10000 train_time:697859ms step_avg:76.18ms +[2025-09-03 05:05:11] [Rank 0] step:9161/10000 train_time:697859ms step_avg:76.18ms +[2025-09-03 05:05:13] [Rank 0] step:9181/10000 train_time:699518ms step_avg:76.19ms +[2025-09-03 05:05:13] [Rank 0] step:9181/10000 train_time:699518ms step_avg:76.19ms +[2025-09-03 05:05:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:05:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:05:26] [Rank 0] PRINT: step:9200/10000 val_loss:3.7834 svd_entropy: attn_qk:H=0.7593,top10E=0.26,eRank=176.9,q75/q25=86.84 attn_vo:H=0.7869,top10E=0.16,eRank=263.0,q75/q25=inf mlp_w1:H=0.8031,top10E=0.24,eRank=227.2,q75/q25=20.08 mlp_w2:H=0.8628,top10E=0.13,eRank=317.4,q75/q25=32.14 vo_prod:H=0.6672,top10E=0.23,eRank=123.6,q75/q25=inf train_time:701313ms step_avg:76.23ms +[2025-09-03 05:05:26] [Rank 0] PRINT: step:9200/10000 val_loss:3.7834 svd_entropy: attn_qk:H=0.7593,top10E=0.26,eRank=176.9,q75/q25=86.84 attn_vo:H=0.7869,top10E=0.16,eRank=263.0,q75/q25=inf mlp_w1:H=0.8031,top10E=0.24,eRank=227.2,q75/q25=20.08 mlp_w2:H=0.8628,top10E=0.13,eRank=317.4,q75/q25=32.14 vo_prod:H=0.6672,top10E=0.23,eRank=123.6,q75/q25=inf train_time:701313ms step_avg:76.23ms +[2025-09-03 05:05:26] [Rank 0] step:9201/10000 train_time:701324ms step_avg:76.22ms +[2025-09-03 05:05:26] [Rank 0] step:9201/10000 train_time:701324ms step_avg:76.22ms +[2025-09-03 05:05:28] [Rank 0] step:9221/10000 train_time:702821ms step_avg:76.22ms +[2025-09-03 05:05:28] [Rank 0] step:9221/10000 train_time:702821ms step_avg:76.22ms +[2025-09-03 05:05:30] [Rank 0] step:9241/10000 train_time:704459ms step_avg:76.23ms +[2025-09-03 05:05:30] [Rank 0] step:9241/10000 train_time:704459ms step_avg:76.23ms +[2025-09-03 05:05:31] [Rank 0] step:9261/10000 train_time:706099ms step_avg:76.24ms +[2025-09-03 05:05:31] [Rank 0] step:9261/10000 train_time:706099ms step_avg:76.24ms +[2025-09-03 05:05:33] [Rank 0] step:9281/10000 train_time:707715ms step_avg:76.25ms +[2025-09-03 05:05:33] [Rank 0] step:9281/10000 train_time:707715ms step_avg:76.25ms +[2025-09-03 05:05:35] [Rank 0] step:9301/10000 train_time:709341ms step_avg:76.27ms +[2025-09-03 05:05:35] [Rank 0] step:9301/10000 train_time:709341ms step_avg:76.27ms +[2025-09-03 05:05:36] [Rank 0] step:9321/10000 train_time:710974ms step_avg:76.28ms +[2025-09-03 05:05:36] [Rank 0] step:9321/10000 train_time:710974ms step_avg:76.28ms +[2025-09-03 05:05:38] [Rank 0] step:9341/10000 train_time:712606ms step_avg:76.29ms +[2025-09-03 05:05:38] [Rank 0] step:9341/10000 train_time:712606ms step_avg:76.29ms +[2025-09-03 05:05:39] [Rank 0] step:9361/10000 train_time:714243ms step_avg:76.30ms +[2025-09-03 05:05:39] [Rank 0] step:9361/10000 train_time:714243ms step_avg:76.30ms +[2025-09-03 05:05:41] [Rank 0] step:9381/10000 train_time:715883ms step_avg:76.31ms +[2025-09-03 05:05:41] [Rank 0] step:9381/10000 train_time:715883ms step_avg:76.31ms +[2025-09-03 05:05:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:05:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:05:54] [Rank 0] PRINT: step:9400/10000 val_loss:3.7761 svd_entropy: attn_qk:H=0.7596,top10E=0.26,eRank=177.2,q75/q25=86.70 attn_vo:H=0.7873,top10E=0.16,eRank=263.4,q75/q25=inf mlp_w1:H=0.8035,top10E=0.23,eRank=227.8,q75/q25=20.11 mlp_w2:H=0.8631,top10E=0.13,eRank=318.0,q75/q25=32.03 vo_prod:H=0.6677,top10E=0.22,eRank=124.1,q75/q25=inf train_time:717682ms step_avg:76.35ms +[2025-09-03 05:05:54] [Rank 0] PRINT: step:9400/10000 val_loss:3.7761 svd_entropy: attn_qk:H=0.7596,top10E=0.26,eRank=177.2,q75/q25=86.70 attn_vo:H=0.7873,top10E=0.16,eRank=263.4,q75/q25=inf mlp_w1:H=0.8035,top10E=0.23,eRank=227.8,q75/q25=20.11 mlp_w2:H=0.8631,top10E=0.13,eRank=318.0,q75/q25=32.03 vo_prod:H=0.6677,top10E=0.22,eRank=124.1,q75/q25=inf train_time:717682ms step_avg:76.35ms +[2025-09-03 05:05:55] [Rank 0] step:9401/10000 train_time:717694ms step_avg:76.34ms +[2025-09-03 05:05:55] [Rank 0] step:9401/10000 train_time:717694ms step_avg:76.34ms +[2025-09-03 05:05:56] [Rank 0] step:9421/10000 train_time:719163ms step_avg:76.34ms +[2025-09-03 05:05:56] [Rank 0] step:9421/10000 train_time:719163ms step_avg:76.34ms +[2025-09-03 05:05:58] [Rank 0] step:9441/10000 train_time:720792ms step_avg:76.35ms +[2025-09-03 05:05:58] [Rank 0] step:9441/10000 train_time:720792ms step_avg:76.35ms +[2025-09-03 05:05:59] [Rank 0] step:9461/10000 train_time:722427ms step_avg:76.36ms +[2025-09-03 05:05:59] [Rank 0] step:9461/10000 train_time:722427ms step_avg:76.36ms +[2025-09-03 05:06:01] [Rank 0] step:9481/10000 train_time:724061ms step_avg:76.37ms +[2025-09-03 05:06:01] [Rank 0] step:9481/10000 train_time:724061ms step_avg:76.37ms +[2025-09-03 05:06:03] [Rank 0] step:9501/10000 train_time:725706ms step_avg:76.38ms +[2025-09-03 05:06:03] [Rank 0] step:9501/10000 train_time:725706ms step_avg:76.38ms +[2025-09-03 05:06:04] [Rank 0] step:9521/10000 train_time:727332ms step_avg:76.39ms +[2025-09-03 05:06:04] [Rank 0] step:9521/10000 train_time:727332ms step_avg:76.39ms +[2025-09-03 05:06:06] [Rank 0] step:9541/10000 train_time:728964ms step_avg:76.40ms +[2025-09-03 05:06:06] [Rank 0] step:9541/10000 train_time:728964ms step_avg:76.40ms +[2025-09-03 05:06:08] [Rank 0] step:9561/10000 train_time:730590ms step_avg:76.41ms +[2025-09-03 05:06:08] [Rank 0] step:9561/10000 train_time:730590ms step_avg:76.41ms +[2025-09-03 05:06:09] [Rank 0] step:9581/10000 train_time:732220ms step_avg:76.42ms +[2025-09-03 05:06:09] [Rank 0] step:9581/10000 train_time:732220ms step_avg:76.42ms +[2025-09-03 05:06:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:06:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:06:23] [Rank 0] PRINT: step:9600/10000 val_loss:3.7709 svd_entropy: attn_qk:H=0.7598,top10E=0.26,eRank=177.4,q75/q25=86.75 attn_vo:H=0.7875,top10E=0.15,eRank=263.8,q75/q25=inf mlp_w1:H=0.8038,top10E=0.23,eRank=228.2,q75/q25=20.10 mlp_w2:H=0.8633,top10E=0.13,eRank=318.6,q75/q25=32.04 vo_prod:H=0.6681,top10E=0.22,eRank=124.5,q75/q25=inf train_time:734023ms step_avg:76.46ms +[2025-09-03 05:06:23] [Rank 0] PRINT: step:9600/10000 val_loss:3.7709 svd_entropy: attn_qk:H=0.7598,top10E=0.26,eRank=177.4,q75/q25=86.75 attn_vo:H=0.7875,top10E=0.15,eRank=263.8,q75/q25=inf mlp_w1:H=0.8038,top10E=0.23,eRank=228.2,q75/q25=20.10 mlp_w2:H=0.8633,top10E=0.13,eRank=318.6,q75/q25=32.04 vo_prod:H=0.6681,top10E=0.22,eRank=124.5,q75/q25=inf train_time:734023ms step_avg:76.46ms +[2025-09-03 05:06:23] [Rank 0] step:9601/10000 train_time:734035ms step_avg:76.45ms +[2025-09-03 05:06:23] [Rank 0] step:9601/10000 train_time:734035ms step_avg:76.45ms +[2025-09-03 05:06:24] [Rank 0] step:9621/10000 train_time:735520ms step_avg:76.45ms +[2025-09-03 05:06:24] [Rank 0] step:9621/10000 train_time:735520ms step_avg:76.45ms +[2025-09-03 05:06:26] [Rank 0] step:9641/10000 train_time:737150ms step_avg:76.46ms +[2025-09-03 05:06:26] [Rank 0] step:9641/10000 train_time:737150ms step_avg:76.46ms +[2025-09-03 05:06:28] [Rank 0] step:9661/10000 train_time:738807ms step_avg:76.47ms +[2025-09-03 05:06:28] [Rank 0] step:9661/10000 train_time:738807ms step_avg:76.47ms +[2025-09-03 05:06:29] [Rank 0] step:9681/10000 train_time:740459ms step_avg:76.49ms +[2025-09-03 05:06:29] [Rank 0] step:9681/10000 train_time:740459ms step_avg:76.49ms +[2025-09-03 05:06:31] [Rank 0] step:9701/10000 train_time:742133ms step_avg:76.50ms +[2025-09-03 05:06:31] [Rank 0] step:9701/10000 train_time:742133ms step_avg:76.50ms +[2025-09-03 05:06:33] [Rank 0] step:9721/10000 train_time:743785ms step_avg:76.51ms +[2025-09-03 05:06:33] [Rank 0] step:9721/10000 train_time:743785ms step_avg:76.51ms +[2025-09-03 05:06:34] [Rank 0] step:9741/10000 train_time:745457ms step_avg:76.53ms +[2025-09-03 05:06:34] [Rank 0] step:9741/10000 train_time:745457ms step_avg:76.53ms +[2025-09-03 05:06:36] [Rank 0] step:9761/10000 train_time:747114ms step_avg:76.54ms +[2025-09-03 05:06:36] [Rank 0] step:9761/10000 train_time:747114ms step_avg:76.54ms +[2025-09-03 05:06:38] [Rank 0] step:9781/10000 train_time:748785ms step_avg:76.56ms +[2025-09-03 05:06:38] [Rank 0] step:9781/10000 train_time:748785ms step_avg:76.56ms +[2025-09-03 05:06:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:06:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:06:51] [Rank 0] PRINT: step:9800/10000 val_loss:3.7650 svd_entropy: attn_qk:H=0.7600,top10E=0.26,eRank=177.6,q75/q25=86.60 attn_vo:H=0.7877,top10E=0.15,eRank=264.1,q75/q25=inf mlp_w1:H=0.8041,top10E=0.23,eRank=228.6,q75/q25=20.09 mlp_w2:H=0.8636,top10E=0.12,eRank=319.1,q75/q25=32.07 vo_prod:H=0.6685,top10E=0.22,eRank=124.8,q75/q25=inf train_time:750623ms step_avg:76.59ms +[2025-09-03 05:06:51] [Rank 0] PRINT: step:9800/10000 val_loss:3.7650 svd_entropy: attn_qk:H=0.7600,top10E=0.26,eRank=177.6,q75/q25=86.60 attn_vo:H=0.7877,top10E=0.15,eRank=264.1,q75/q25=inf mlp_w1:H=0.8041,top10E=0.23,eRank=228.6,q75/q25=20.09 mlp_w2:H=0.8636,top10E=0.12,eRank=319.1,q75/q25=32.07 vo_prod:H=0.6685,top10E=0.22,eRank=124.8,q75/q25=inf train_time:750623ms step_avg:76.59ms +[2025-09-03 05:06:51] [Rank 0] step:9801/10000 train_time:750634ms step_avg:76.59ms +[2025-09-03 05:06:51] [Rank 0] step:9801/10000 train_time:750634ms step_avg:76.59ms +[2025-09-03 05:06:53] [Rank 0] step:9821/10000 train_time:752127ms step_avg:76.58ms +[2025-09-03 05:06:53] [Rank 0] step:9821/10000 train_time:752127ms step_avg:76.58ms +[2025-09-03 05:06:54] [Rank 0] step:9841/10000 train_time:753797ms step_avg:76.60ms +[2025-09-03 05:06:54] [Rank 0] step:9841/10000 train_time:753797ms step_avg:76.60ms +[2025-09-03 05:06:56] [Rank 0] step:9861/10000 train_time:755445ms step_avg:76.61ms +[2025-09-03 05:06:56] [Rank 0] step:9861/10000 train_time:755445ms step_avg:76.61ms +[2025-09-03 05:06:58] [Rank 0] step:9881/10000 train_time:757088ms step_avg:76.62ms +[2025-09-03 05:06:58] [Rank 0] step:9881/10000 train_time:757088ms step_avg:76.62ms +[2025-09-03 05:06:59] [Rank 0] step:9901/10000 train_time:758748ms step_avg:76.63ms +[2025-09-03 05:06:59] [Rank 0] step:9901/10000 train_time:758748ms step_avg:76.63ms +[2025-09-03 05:07:01] [Rank 0] step:9921/10000 train_time:760394ms step_avg:76.64ms +[2025-09-03 05:07:01] [Rank 0] step:9921/10000 train_time:760394ms step_avg:76.64ms +[2025-09-03 05:07:03] [Rank 0] step:9941/10000 train_time:762056ms step_avg:76.66ms +[2025-09-03 05:07:03] [Rank 0] step:9941/10000 train_time:762056ms step_avg:76.66ms +[2025-09-03 05:07:04] [Rank 0] step:9961/10000 train_time:763709ms step_avg:76.67ms +[2025-09-03 05:07:04] [Rank 0] step:9961/10000 train_time:763709ms step_avg:76.67ms +[2025-09-03 05:07:06] [Rank 0] step:9981/10000 train_time:765363ms step_avg:76.68ms +[2025-09-03 05:07:06] [Rank 0] step:9981/10000 train_time:765363ms step_avg:76.68ms +[2025-09-03 05:07:07] [Rank 0] step:10000/10000 train_time:766945ms step_avg:76.69ms +[2025-09-03 05:07:07] [Rank 0] step:10000/10000 train_time:766945ms step_avg:76.69ms +[2025-09-03 05:07:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:07:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:07:19] [Rank 0] PRINT: step:10000/10000 val_loss:3.7595 svd_entropy: attn_qk:H=0.7602,top10E=0.26,eRank=177.7,q75/q25=86.57 attn_vo:H=0.7879,top10E=0.15,eRank=264.3,q75/q25=inf mlp_w1:H=0.8042,top10E=0.23,eRank=228.8,q75/q25=20.09 mlp_w2:H=0.8637,top10E=0.12,eRank=319.4,q75/q25=32.03 vo_prod:H=0.6688,top10E=0.22,eRank=125.0,q75/q25=inf train_time:767201ms step_avg:76.72ms +[2025-09-03 05:07:19] [Rank 0] PRINT: step:10000/10000 val_loss:3.7595 svd_entropy: attn_qk:H=0.7602,top10E=0.26,eRank=177.7,q75/q25=86.57 attn_vo:H=0.7879,top10E=0.15,eRank=264.3,q75/q25=inf mlp_w1:H=0.8042,top10E=0.23,eRank=228.8,q75/q25=20.09 mlp_w2:H=0.8637,top10E=0.12,eRank=319.4,q75/q25=32.03 vo_prod:H=0.6688,top10E=0.22,eRank=125.0,q75/q25=inf train_time:767201ms step_avg:76.72ms +[2025-09-03 05:07:19] [Rank 0] PRINT: --- Training Finished: Wed Sep 3 05:07:19 2025 --- +[2025-09-03 05:07:19] [Rank 0] PRINT: --- Training Finished: Wed Sep 3 05:07:19 2025 --- +[2025-09-03 05:07:19] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14336 MiB +[2025-09-03 05:07:19] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14336 MiB diff --git a/logs_svd_qkvo/mode_15_param_qkvo_seed_41/config.json b/logs_svd_qkvo/mode_15_param_qkvo_seed_41/config.json new file mode 100644 index 0000000000000000000000000000000000000000..406dbf4c4028f0fed0684c784d5b115a49e1dcd0 --- /dev/null +++ b/logs_svd_qkvo/mode_15_param_qkvo_seed_41/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 41, + "optimizer_mode": 15, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "4f711cf8-cb23-4b4c-bec9-7479afadccfb", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_15_param_qkvo_seed_41/training_log_4f711cf8-cb23-4b4c-bec9-7479afadccfb.txt b/logs_svd_qkvo/mode_15_param_qkvo_seed_41/training_log_4f711cf8-cb23-4b4c-bec9-7479afadccfb.txt new file mode 100644 index 0000000000000000000000000000000000000000..ccb138de82dee06b6dbcfdc7c9bdfabf84256af8 --- /dev/null +++ b/logs_svd_qkvo/mode_15_param_qkvo_seed_41/training_log_4f711cf8-cb23-4b4c-bec9-7479afadccfb.txt @@ -0,0 +1,2984 @@ +[2025-09-02 05:12:32] [Rank 0] PRINT: --- Script Start: Tue Sep 2 05:12:32 2025 --- +[2025-09-02 05:12:32] [Rank 0] PRINT: --- Script Start: Tue Sep 2 05:12:32 2025 --- +[2025-09-02 05:12:32] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=41, optimizer_mode=15, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 05:12:32] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=41, optimizer_mode=15, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 05:12:32] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 05:12:32] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 05:12:32] [Rank 0] PRINT: Using fixed seed: 41 +[2025-09-02 05:12:32] [Rank 0] PRINT: Using fixed seed: 41 +[2025-09-02 05:12:32] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_15_param_qkvo_seed_41 +[2025-09-02 05:12:32] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_15_param_qkvo_seed_41 +[2025-09-02 05:12:32] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 05:12:32] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 05:12:32] [Rank 0] PRINT: Constructing model... +[2025-09-02 05:12:32] [Rank 0] PRINT: Constructing model... +[2025-09-02 05:12:34] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 05:12:34] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 05:12:34] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 05:12:34] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 05:12:34] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 05:12:34] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 05:12:34] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 15 +[2025-09-02 05:12:34] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 15 +[2025-09-02 05:12:34] [Rank 0] PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 05:12:34] [Rank 0] PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 05:12:34] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 05:12:34] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 05:12:34] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 05:12:34] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 05:12:34] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 05:12:34] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 05:12:34] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 05:12:34] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 05:12:34] [Rank 0] PRINT: Starting warmup... +[2025-09-02 05:12:34] [Rank 0] PRINT: Starting warmup... +[2025-09-02 05:14:50] [Rank 0] PRINT: Warmup complete. +[2025-09-02 05:14:50] [Rank 0] PRINT: Warmup complete. +[2025-09-02 05:14:51] [Rank 0] PRINT: Starting training... +[2025-09-02 05:14:51] [Rank 0] PRINT: Starting training... +[2025-09-02 05:14:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:14:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:15:07] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9248,top10E=0.05,eRank=465.9,q75/q25=10.27 attn_vo:H=0.4624,top10E=0.02,eRank=232.9,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 05:15:07] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9248,top10E=0.05,eRank=465.9,q75/q25=10.27 attn_vo:H=0.4624,top10E=0.02,eRank=232.9,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 05:15:09] [Rank 0] step:21/10000 train_time:1778ms step_avg:84.68ms +[2025-09-02 05:15:09] [Rank 0] step:21/10000 train_time:1778ms step_avg:84.68ms +[2025-09-02 05:15:11] [Rank 0] step:41/10000 train_time:3182ms step_avg:77.61ms +[2025-09-02 05:15:11] [Rank 0] step:41/10000 train_time:3182ms step_avg:77.61ms +[2025-09-02 05:15:12] [Rank 0] step:61/10000 train_time:4592ms step_avg:75.28ms +[2025-09-02 05:15:12] [Rank 0] step:61/10000 train_time:4592ms step_avg:75.28ms +[2025-09-02 05:15:14] [Rank 0] step:81/10000 train_time:6006ms step_avg:74.14ms +[2025-09-02 05:15:14] [Rank 0] step:81/10000 train_time:6006ms step_avg:74.14ms +[2025-09-02 05:15:15] [Rank 0] step:101/10000 train_time:7419ms step_avg:73.45ms +[2025-09-02 05:15:15] [Rank 0] step:101/10000 train_time:7419ms step_avg:73.45ms +[2025-09-02 05:15:17] [Rank 0] step:121/10000 train_time:8833ms step_avg:73.00ms +[2025-09-02 05:15:17] [Rank 0] step:121/10000 train_time:8833ms step_avg:73.00ms +[2025-09-02 05:15:18] [Rank 0] step:141/10000 train_time:10248ms step_avg:72.68ms +[2025-09-02 05:15:18] [Rank 0] step:141/10000 train_time:10248ms step_avg:72.68ms +[2025-09-02 05:15:19] [Rank 0] step:161/10000 train_time:11664ms step_avg:72.45ms +[2025-09-02 05:15:19] [Rank 0] step:161/10000 train_time:11664ms step_avg:72.45ms +[2025-09-02 05:15:21] [Rank 0] step:181/10000 train_time:13082ms step_avg:72.28ms +[2025-09-02 05:15:21] [Rank 0] step:181/10000 train_time:13082ms step_avg:72.28ms +[2025-09-02 05:15:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:15:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:15:34] [Rank 0] PRINT: step:200/10000 val_loss:6.5566 svd_entropy: attn_qk:H=0.4476,top10E=0.80,eRank=37.5,q75/q25=12.04 attn_vo:H=0.5358,top10E=0.65,eRank=109.9,q75/q25=100.16 mlp_w1:H=0.4382,top10E=0.74,eRank=28.0,q75/q25=2.69 mlp_w2:H=0.1913,top10E=0.95,eRank=4.7,q75/q25=321.70 vo_prod:H=0.2320,top10E=0.97,eRank=6.3,q75/q25=647.43 train_time:14641ms step_avg:73.20ms +[2025-09-02 05:15:34] [Rank 0] PRINT: step:200/10000 val_loss:6.5566 svd_entropy: attn_qk:H=0.4476,top10E=0.80,eRank=37.5,q75/q25=12.04 attn_vo:H=0.5358,top10E=0.65,eRank=109.9,q75/q25=100.16 mlp_w1:H=0.4382,top10E=0.74,eRank=28.0,q75/q25=2.69 mlp_w2:H=0.1913,top10E=0.95,eRank=4.7,q75/q25=321.70 vo_prod:H=0.2320,top10E=0.97,eRank=6.3,q75/q25=647.43 train_time:14641ms step_avg:73.20ms +[2025-09-02 05:15:34] [Rank 0] step:201/10000 train_time:14652ms step_avg:72.90ms +[2025-09-02 05:15:34] [Rank 0] step:201/10000 train_time:14652ms step_avg:72.90ms +[2025-09-02 05:15:36] [Rank 0] step:221/10000 train_time:15948ms step_avg:72.16ms +[2025-09-02 05:15:36] [Rank 0] step:221/10000 train_time:15948ms step_avg:72.16ms +[2025-09-02 05:15:37] [Rank 0] step:241/10000 train_time:17364ms step_avg:72.05ms +[2025-09-02 05:15:37] [Rank 0] step:241/10000 train_time:17364ms step_avg:72.05ms +[2025-09-02 05:15:38] [Rank 0] step:261/10000 train_time:18779ms step_avg:71.95ms +[2025-09-02 05:15:38] [Rank 0] step:261/10000 train_time:18779ms step_avg:71.95ms +[2025-09-02 05:15:40] [Rank 0] step:281/10000 train_time:20196ms step_avg:71.87ms +[2025-09-02 05:15:40] [Rank 0] step:281/10000 train_time:20196ms step_avg:71.87ms +[2025-09-02 05:15:41] [Rank 0] step:301/10000 train_time:21613ms step_avg:71.81ms +[2025-09-02 05:15:41] [Rank 0] step:301/10000 train_time:21613ms step_avg:71.81ms +[2025-09-02 05:15:43] [Rank 0] step:321/10000 train_time:23032ms step_avg:71.75ms +[2025-09-02 05:15:43] [Rank 0] step:321/10000 train_time:23032ms step_avg:71.75ms +[2025-09-02 05:15:44] [Rank 0] step:341/10000 train_time:24451ms step_avg:71.70ms +[2025-09-02 05:15:44] [Rank 0] step:341/10000 train_time:24451ms step_avg:71.70ms +[2025-09-02 05:15:46] [Rank 0] step:361/10000 train_time:25868ms step_avg:71.66ms +[2025-09-02 05:15:46] [Rank 0] step:361/10000 train_time:25868ms step_avg:71.66ms +[2025-09-02 05:15:47] [Rank 0] step:381/10000 train_time:27287ms step_avg:71.62ms +[2025-09-02 05:15:47] [Rank 0] step:381/10000 train_time:27287ms step_avg:71.62ms +[2025-09-02 05:15:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:15:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:16:00] [Rank 0] PRINT: step:400/10000 val_loss:6.0192 svd_entropy: attn_qk:H=0.5052,top10E=0.70,eRank=45.5,q75/q25=13.47 attn_vo:H=0.5636,top10E=0.57,eRank=85.4,q75/q25=44.52 mlp_w1:H=0.4773,top10E=0.68,eRank=42.4,q75/q25=3.18 mlp_w2:H=0.5173,top10E=0.63,eRank=32.8,q75/q25=17.17 vo_prod:H=0.3771,top10E=0.87,eRank=14.4,q75/q25=318.32 train_time:28846ms step_avg:72.12ms +[2025-09-02 05:16:00] [Rank 0] PRINT: step:400/10000 val_loss:6.0192 svd_entropy: attn_qk:H=0.5052,top10E=0.70,eRank=45.5,q75/q25=13.47 attn_vo:H=0.5636,top10E=0.57,eRank=85.4,q75/q25=44.52 mlp_w1:H=0.4773,top10E=0.68,eRank=42.4,q75/q25=3.18 mlp_w2:H=0.5173,top10E=0.63,eRank=32.8,q75/q25=17.17 vo_prod:H=0.3771,top10E=0.87,eRank=14.4,q75/q25=318.32 train_time:28846ms step_avg:72.12ms +[2025-09-02 05:16:00] [Rank 0] step:401/10000 train_time:28857ms step_avg:71.96ms +[2025-09-02 05:16:00] [Rank 0] step:401/10000 train_time:28857ms step_avg:71.96ms +[2025-09-02 05:16:02] [Rank 0] step:421/10000 train_time:30148ms step_avg:71.61ms +[2025-09-02 05:16:02] [Rank 0] step:421/10000 train_time:30148ms step_avg:71.61ms +[2025-09-02 05:16:03] [Rank 0] step:441/10000 train_time:31567ms step_avg:71.58ms +[2025-09-02 05:16:03] [Rank 0] step:441/10000 train_time:31567ms step_avg:71.58ms +[2025-09-02 05:16:05] [Rank 0] step:461/10000 train_time:32986ms step_avg:71.55ms +[2025-09-02 05:16:05] [Rank 0] step:461/10000 train_time:32986ms step_avg:71.55ms +[2025-09-02 05:16:06] [Rank 0] step:481/10000 train_time:34405ms step_avg:71.53ms +[2025-09-02 05:16:06] [Rank 0] step:481/10000 train_time:34405ms step_avg:71.53ms +[2025-09-02 05:16:07] [Rank 0] step:501/10000 train_time:35823ms step_avg:71.50ms +[2025-09-02 05:16:07] [Rank 0] step:501/10000 train_time:35823ms step_avg:71.50ms +[2025-09-02 05:16:09] [Rank 0] step:521/10000 train_time:37242ms step_avg:71.48ms +[2025-09-02 05:16:09] [Rank 0] step:521/10000 train_time:37242ms step_avg:71.48ms +[2025-09-02 05:16:10] [Rank 0] step:541/10000 train_time:38662ms step_avg:71.46ms +[2025-09-02 05:16:10] [Rank 0] step:541/10000 train_time:38662ms step_avg:71.46ms +[2025-09-02 05:16:12] [Rank 0] step:561/10000 train_time:40081ms step_avg:71.45ms +[2025-09-02 05:16:12] [Rank 0] step:561/10000 train_time:40081ms step_avg:71.45ms +[2025-09-02 05:16:13] [Rank 0] step:581/10000 train_time:41500ms step_avg:71.43ms +[2025-09-02 05:16:13] [Rank 0] step:581/10000 train_time:41500ms step_avg:71.43ms +[2025-09-02 05:16:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:16:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:16:26] [Rank 0] PRINT: step:600/10000 val_loss:5.7131 svd_entropy: attn_qk:H=0.5422,top10E=0.62,eRank=52.9,q75/q25=15.19 attn_vo:H=0.5973,top10E=0.48,eRank=91.5,q75/q25=31.27 mlp_w1:H=0.5168,top10E=0.63,eRank=53.2,q75/q25=3.65 mlp_w2:H=0.6163,top10E=0.47,eRank=61.5,q75/q25=12.69 vo_prod:H=0.4541,top10E=0.73,eRank=22.7,q75/q25=250.56 train_time:43062ms step_avg:71.77ms +[2025-09-02 05:16:26] [Rank 0] PRINT: step:600/10000 val_loss:5.7131 svd_entropy: attn_qk:H=0.5422,top10E=0.62,eRank=52.9,q75/q25=15.19 attn_vo:H=0.5973,top10E=0.48,eRank=91.5,q75/q25=31.27 mlp_w1:H=0.5168,top10E=0.63,eRank=53.2,q75/q25=3.65 mlp_w2:H=0.6163,top10E=0.47,eRank=61.5,q75/q25=12.69 vo_prod:H=0.4541,top10E=0.73,eRank=22.7,q75/q25=250.56 train_time:43062ms step_avg:71.77ms +[2025-09-02 05:16:26] [Rank 0] step:601/10000 train_time:43074ms step_avg:71.67ms +[2025-09-02 05:16:26] [Rank 0] step:601/10000 train_time:43074ms step_avg:71.67ms +[2025-09-02 05:16:28] [Rank 0] step:621/10000 train_time:44374ms step_avg:71.46ms +[2025-09-02 05:16:28] [Rank 0] step:621/10000 train_time:44374ms step_avg:71.46ms +[2025-09-02 05:16:29] [Rank 0] step:641/10000 train_time:45793ms step_avg:71.44ms +[2025-09-02 05:16:29] [Rank 0] step:641/10000 train_time:45793ms step_avg:71.44ms +[2025-09-02 05:16:31] [Rank 0] step:661/10000 train_time:47210ms step_avg:71.42ms +[2025-09-02 05:16:31] [Rank 0] step:661/10000 train_time:47210ms step_avg:71.42ms +[2025-09-02 05:16:32] [Rank 0] step:681/10000 train_time:48631ms step_avg:71.41ms +[2025-09-02 05:16:32] [Rank 0] step:681/10000 train_time:48631ms step_avg:71.41ms +[2025-09-02 05:16:34] [Rank 0] step:701/10000 train_time:50051ms step_avg:71.40ms +[2025-09-02 05:16:34] [Rank 0] step:701/10000 train_time:50051ms step_avg:71.40ms +[2025-09-02 05:16:35] [Rank 0] step:721/10000 train_time:51470ms step_avg:71.39ms +[2025-09-02 05:16:35] [Rank 0] step:721/10000 train_time:51470ms step_avg:71.39ms +[2025-09-02 05:16:36] [Rank 0] step:741/10000 train_time:52889ms step_avg:71.38ms +[2025-09-02 05:16:36] [Rank 0] step:741/10000 train_time:52889ms step_avg:71.38ms +[2025-09-02 05:16:38] [Rank 0] step:761/10000 train_time:54321ms step_avg:71.38ms +[2025-09-02 05:16:38] [Rank 0] step:761/10000 train_time:54321ms step_avg:71.38ms +[2025-09-02 05:16:39] [Rank 0] step:781/10000 train_time:55753ms step_avg:71.39ms +[2025-09-02 05:16:39] [Rank 0] step:781/10000 train_time:55753ms step_avg:71.39ms +[2025-09-02 05:16:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:16:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:16:52] [Rank 0] PRINT: step:800/10000 val_loss:5.4828 svd_entropy: attn_qk:H=0.5686,top10E=0.56,eRank=58.9,q75/q25=17.21 attn_vo:H=0.6248,top10E=0.43,eRank=99.8,q75/q25=29.26 mlp_w1:H=0.5469,top10E=0.59,eRank=61.6,q75/q25=4.07 mlp_w2:H=0.6721,top10E=0.39,eRank=88.1,q75/q25=9.75 vo_prod:H=0.4987,top10E=0.64,eRank=30.0,q75/q25=298.03 train_time:57329ms step_avg:71.66ms +[2025-09-02 05:16:52] [Rank 0] PRINT: step:800/10000 val_loss:5.4828 svd_entropy: attn_qk:H=0.5686,top10E=0.56,eRank=58.9,q75/q25=17.21 attn_vo:H=0.6248,top10E=0.43,eRank=99.8,q75/q25=29.26 mlp_w1:H=0.5469,top10E=0.59,eRank=61.6,q75/q25=4.07 mlp_w2:H=0.6721,top10E=0.39,eRank=88.1,q75/q25=9.75 vo_prod:H=0.4987,top10E=0.64,eRank=30.0,q75/q25=298.03 train_time:57329ms step_avg:71.66ms +[2025-09-02 05:16:53] [Rank 0] step:801/10000 train_time:57340ms step_avg:71.59ms +[2025-09-02 05:16:53] [Rank 0] step:801/10000 train_time:57340ms step_avg:71.59ms +[2025-09-02 05:16:54] [Rank 0] step:821/10000 train_time:58646ms step_avg:71.43ms +[2025-09-02 05:16:54] [Rank 0] step:821/10000 train_time:58646ms step_avg:71.43ms +[2025-09-02 05:16:55] [Rank 0] step:841/10000 train_time:60075ms step_avg:71.43ms +[2025-09-02 05:16:55] [Rank 0] step:841/10000 train_time:60075ms step_avg:71.43ms +[2025-09-02 05:16:57] [Rank 0] step:861/10000 train_time:61506ms step_avg:71.44ms +[2025-09-02 05:16:57] [Rank 0] step:861/10000 train_time:61506ms step_avg:71.44ms +[2025-09-02 05:16:58] [Rank 0] step:881/10000 train_time:63040ms step_avg:71.55ms +[2025-09-02 05:16:58] [Rank 0] step:881/10000 train_time:63040ms step_avg:71.55ms +[2025-09-02 05:17:00] [Rank 0] step:901/10000 train_time:64478ms step_avg:71.56ms +[2025-09-02 05:17:00] [Rank 0] step:901/10000 train_time:64478ms step_avg:71.56ms +[2025-09-02 05:17:01] [Rank 0] step:921/10000 train_time:66011ms step_avg:71.67ms +[2025-09-02 05:17:01] [Rank 0] step:921/10000 train_time:66011ms step_avg:71.67ms +[2025-09-02 05:17:03] [Rank 0] step:941/10000 train_time:67444ms step_avg:71.67ms +[2025-09-02 05:17:03] [Rank 0] step:941/10000 train_time:67444ms step_avg:71.67ms +[2025-09-02 05:17:04] [Rank 0] step:961/10000 train_time:68876ms step_avg:71.67ms +[2025-09-02 05:17:04] [Rank 0] step:961/10000 train_time:68876ms step_avg:71.67ms +[2025-09-02 05:17:06] [Rank 0] step:981/10000 train_time:70309ms step_avg:71.67ms +[2025-09-02 05:17:06] [Rank 0] step:981/10000 train_time:70309ms step_avg:71.67ms +[2025-09-02 05:17:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:17:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:17:19] [Rank 0] PRINT: step:1000/10000 val_loss:5.3140 svd_entropy: attn_qk:H=0.5888,top10E=0.52,eRank=64.4,q75/q25=19.66 attn_vo:H=0.6470,top10E=0.39,eRank=108.6,q75/q25=33.09 mlp_w1:H=0.5727,top10E=0.55,eRank=69.2,q75/q25=4.48 mlp_w2:H=0.7062,top10E=0.34,eRank=110.3,q75/q25=10.64 vo_prod:H=0.5281,top10E=0.58,eRank=36.3,q75/q25=553.63 train_time:71888ms step_avg:71.89ms +[2025-09-02 05:17:19] [Rank 0] PRINT: step:1000/10000 val_loss:5.3140 svd_entropy: attn_qk:H=0.5888,top10E=0.52,eRank=64.4,q75/q25=19.66 attn_vo:H=0.6470,top10E=0.39,eRank=108.6,q75/q25=33.09 mlp_w1:H=0.5727,top10E=0.55,eRank=69.2,q75/q25=4.48 mlp_w2:H=0.7062,top10E=0.34,eRank=110.3,q75/q25=10.64 vo_prod:H=0.5281,top10E=0.58,eRank=36.3,q75/q25=553.63 train_time:71888ms step_avg:71.89ms +[2025-09-02 05:17:19] [Rank 0] step:1001/10000 train_time:71898ms step_avg:71.83ms +[2025-09-02 05:17:19] [Rank 0] step:1001/10000 train_time:71898ms step_avg:71.83ms +[2025-09-02 05:17:20] [Rank 0] step:1021/10000 train_time:73197ms step_avg:71.69ms +[2025-09-02 05:17:20] [Rank 0] step:1021/10000 train_time:73197ms step_avg:71.69ms +[2025-09-02 05:17:22] [Rank 0] step:1041/10000 train_time:74626ms step_avg:71.69ms +[2025-09-02 05:17:22] [Rank 0] step:1041/10000 train_time:74626ms step_avg:71.69ms +[2025-09-02 05:17:23] [Rank 0] step:1061/10000 train_time:76057ms step_avg:71.68ms +[2025-09-02 05:17:23] [Rank 0] step:1061/10000 train_time:76057ms step_avg:71.68ms +[2025-09-02 05:17:25] [Rank 0] step:1081/10000 train_time:77488ms step_avg:71.68ms +[2025-09-02 05:17:25] [Rank 0] step:1081/10000 train_time:77488ms step_avg:71.68ms +[2025-09-02 05:17:26] [Rank 0] step:1101/10000 train_time:78918ms step_avg:71.68ms +[2025-09-02 05:17:26] [Rank 0] step:1101/10000 train_time:78918ms step_avg:71.68ms +[2025-09-02 05:17:27] [Rank 0] step:1121/10000 train_time:80351ms step_avg:71.68ms +[2025-09-02 05:17:27] [Rank 0] step:1121/10000 train_time:80351ms step_avg:71.68ms +[2025-09-02 05:17:29] [Rank 0] step:1141/10000 train_time:81784ms step_avg:71.68ms +[2025-09-02 05:17:29] [Rank 0] step:1141/10000 train_time:81784ms step_avg:71.68ms +[2025-09-02 05:17:30] [Rank 0] step:1161/10000 train_time:83217ms step_avg:71.68ms +[2025-09-02 05:17:30] [Rank 0] step:1161/10000 train_time:83217ms step_avg:71.68ms +[2025-09-02 05:17:32] [Rank 0] step:1181/10000 train_time:84649ms step_avg:71.68ms +[2025-09-02 05:17:32] [Rank 0] step:1181/10000 train_time:84649ms step_avg:71.68ms +[2025-09-02 05:17:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:17:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:17:45] [Rank 0] PRINT: step:1200/10000 val_loss:5.1583 svd_entropy: attn_qk:H=0.6054,top10E=0.49,eRank=69.7,q75/q25=22.72 attn_vo:H=0.6668,top10E=0.36,eRank=117.9,q75/q25=42.59 mlp_w1:H=0.5941,top10E=0.52,eRank=76.4,q75/q25=4.93 mlp_w2:H=0.7309,top10E=0.30,eRank=129.9,q75/q25=12.41 vo_prod:H=0.5527,top10E=0.53,eRank=42.6,q75/q25=1231.50 train_time:86226ms step_avg:71.85ms +[2025-09-02 05:17:45] [Rank 0] PRINT: step:1200/10000 val_loss:5.1583 svd_entropy: attn_qk:H=0.6054,top10E=0.49,eRank=69.7,q75/q25=22.72 attn_vo:H=0.6668,top10E=0.36,eRank=117.9,q75/q25=42.59 mlp_w1:H=0.5941,top10E=0.52,eRank=76.4,q75/q25=4.93 mlp_w2:H=0.7309,top10E=0.30,eRank=129.9,q75/q25=12.41 vo_prod:H=0.5527,top10E=0.53,eRank=42.6,q75/q25=1231.50 train_time:86226ms step_avg:71.85ms +[2025-09-02 05:17:45] [Rank 0] step:1201/10000 train_time:86236ms step_avg:71.80ms +[2025-09-02 05:17:45] [Rank 0] step:1201/10000 train_time:86236ms step_avg:71.80ms +[2025-09-02 05:17:46] [Rank 0] step:1221/10000 train_time:87540ms step_avg:71.70ms +[2025-09-02 05:17:46] [Rank 0] step:1221/10000 train_time:87540ms step_avg:71.70ms +[2025-09-02 05:17:48] [Rank 0] step:1241/10000 train_time:88973ms step_avg:71.69ms +[2025-09-02 05:17:48] [Rank 0] step:1241/10000 train_time:88973ms step_avg:71.69ms +[2025-09-02 05:17:49] [Rank 0] step:1261/10000 train_time:90407ms step_avg:71.69ms +[2025-09-02 05:17:49] [Rank 0] step:1261/10000 train_time:90407ms step_avg:71.69ms +[2025-09-02 05:17:51] [Rank 0] step:1281/10000 train_time:91838ms step_avg:71.69ms +[2025-09-02 05:17:51] [Rank 0] step:1281/10000 train_time:91838ms step_avg:71.69ms +[2025-09-02 05:17:52] [Rank 0] step:1301/10000 train_time:93272ms step_avg:71.69ms +[2025-09-02 05:17:52] [Rank 0] step:1301/10000 train_time:93272ms step_avg:71.69ms +[2025-09-02 05:17:54] [Rank 0] step:1321/10000 train_time:94705ms step_avg:71.69ms +[2025-09-02 05:17:54] [Rank 0] step:1321/10000 train_time:94705ms step_avg:71.69ms +[2025-09-02 05:17:55] [Rank 0] step:1341/10000 train_time:96139ms step_avg:71.69ms +[2025-09-02 05:17:55] [Rank 0] step:1341/10000 train_time:96139ms step_avg:71.69ms +[2025-09-02 05:17:56] [Rank 0] step:1361/10000 train_time:97572ms step_avg:71.69ms +[2025-09-02 05:17:56] [Rank 0] step:1361/10000 train_time:97572ms step_avg:71.69ms +[2025-09-02 05:17:58] [Rank 0] step:1381/10000 train_time:99009ms step_avg:71.69ms +[2025-09-02 05:17:58] [Rank 0] step:1381/10000 train_time:99009ms step_avg:71.69ms +[2025-09-02 05:17:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:17:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:18:11] [Rank 0] PRINT: step:1400/10000 val_loss:5.0272 svd_entropy: attn_qk:H=0.6194,top10E=0.46,eRank=74.8,q75/q25=26.73 attn_vo:H=0.6837,top10E=0.33,eRank=127.2,q75/q25=54.64 mlp_w1:H=0.6129,top10E=0.50,eRank=83.5,q75/q25=5.42 mlp_w2:H=0.7497,top10E=0.27,eRank=147.3,q75/q25=14.65 vo_prod:H=0.5723,top10E=0.50,eRank=48.4,q75/q25=2375.65 train_time:100587ms step_avg:71.85ms +[2025-09-02 05:18:11] [Rank 0] PRINT: step:1400/10000 val_loss:5.0272 svd_entropy: attn_qk:H=0.6194,top10E=0.46,eRank=74.8,q75/q25=26.73 attn_vo:H=0.6837,top10E=0.33,eRank=127.2,q75/q25=54.64 mlp_w1:H=0.6129,top10E=0.50,eRank=83.5,q75/q25=5.42 mlp_w2:H=0.7497,top10E=0.27,eRank=147.3,q75/q25=14.65 vo_prod:H=0.5723,top10E=0.50,eRank=48.4,q75/q25=2375.65 train_time:100587ms step_avg:71.85ms +[2025-09-02 05:18:11] [Rank 0] step:1401/10000 train_time:100597ms step_avg:71.80ms +[2025-09-02 05:18:11] [Rank 0] step:1401/10000 train_time:100597ms step_avg:71.80ms +[2025-09-02 05:18:12] [Rank 0] step:1421/10000 train_time:101893ms step_avg:71.70ms +[2025-09-02 05:18:12] [Rank 0] step:1421/10000 train_time:101893ms step_avg:71.70ms +[2025-09-02 05:18:14] [Rank 0] step:1441/10000 train_time:103325ms step_avg:71.70ms +[2025-09-02 05:18:14] [Rank 0] step:1441/10000 train_time:103325ms step_avg:71.70ms +[2025-09-02 05:18:15] [Rank 0] step:1461/10000 train_time:104757ms step_avg:71.70ms +[2025-09-02 05:18:15] [Rank 0] step:1461/10000 train_time:104757ms step_avg:71.70ms +[2025-09-02 05:18:17] [Rank 0] step:1481/10000 train_time:106190ms step_avg:71.70ms +[2025-09-02 05:18:17] [Rank 0] step:1481/10000 train_time:106190ms step_avg:71.70ms +[2025-09-02 05:18:18] [Rank 0] step:1501/10000 train_time:107632ms step_avg:71.71ms +[2025-09-02 05:18:18] [Rank 0] step:1501/10000 train_time:107632ms step_avg:71.71ms +[2025-09-02 05:18:20] [Rank 0] step:1521/10000 train_time:109076ms step_avg:71.71ms +[2025-09-02 05:18:20] [Rank 0] step:1521/10000 train_time:109076ms step_avg:71.71ms +[2025-09-02 05:18:21] [Rank 0] step:1541/10000 train_time:110520ms step_avg:71.72ms +[2025-09-02 05:18:21] [Rank 0] step:1541/10000 train_time:110520ms step_avg:71.72ms +[2025-09-02 05:18:23] [Rank 0] step:1561/10000 train_time:111965ms step_avg:71.73ms +[2025-09-02 05:18:23] [Rank 0] step:1561/10000 train_time:111965ms step_avg:71.73ms +[2025-09-02 05:18:24] [Rank 0] step:1581/10000 train_time:113411ms step_avg:71.73ms +[2025-09-02 05:18:24] [Rank 0] step:1581/10000 train_time:113411ms step_avg:71.73ms +[2025-09-02 05:18:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:18:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:18:37] [Rank 0] PRINT: step:1600/10000 val_loss:4.8931 svd_entropy: attn_qk:H=0.6313,top10E=0.44,eRank=79.2,q75/q25=31.22 attn_vo:H=0.6983,top10E=0.31,eRank=136.3,q75/q25=66.56 mlp_w1:H=0.6290,top10E=0.48,eRank=90.2,q75/q25=5.99 mlp_w2:H=0.7647,top10E=0.25,eRank=162.9,q75/q25=17.10 vo_prod:H=0.5886,top10E=0.46,eRank=53.9,q75/q25=3840.02 train_time:115002ms step_avg:71.88ms +[2025-09-02 05:18:37] [Rank 0] PRINT: step:1600/10000 val_loss:4.8931 svd_entropy: attn_qk:H=0.6313,top10E=0.44,eRank=79.2,q75/q25=31.22 attn_vo:H=0.6983,top10E=0.31,eRank=136.3,q75/q25=66.56 mlp_w1:H=0.6290,top10E=0.48,eRank=90.2,q75/q25=5.99 mlp_w2:H=0.7647,top10E=0.25,eRank=162.9,q75/q25=17.10 vo_prod:H=0.5886,top10E=0.46,eRank=53.9,q75/q25=3840.02 train_time:115002ms step_avg:71.88ms +[2025-09-02 05:18:37] [Rank 0] step:1601/10000 train_time:115012ms step_avg:71.84ms +[2025-09-02 05:18:37] [Rank 0] step:1601/10000 train_time:115012ms step_avg:71.84ms +[2025-09-02 05:18:38] [Rank 0] step:1621/10000 train_time:116317ms step_avg:71.76ms +[2025-09-02 05:18:38] [Rank 0] step:1621/10000 train_time:116317ms step_avg:71.76ms +[2025-09-02 05:18:40] [Rank 0] step:1641/10000 train_time:117760ms step_avg:71.76ms +[2025-09-02 05:18:40] [Rank 0] step:1641/10000 train_time:117760ms step_avg:71.76ms +[2025-09-02 05:18:41] [Rank 0] step:1661/10000 train_time:119203ms step_avg:71.77ms +[2025-09-02 05:18:41] [Rank 0] step:1661/10000 train_time:119203ms step_avg:71.77ms +[2025-09-02 05:18:43] [Rank 0] step:1681/10000 train_time:120645ms step_avg:71.77ms +[2025-09-02 05:18:43] [Rank 0] step:1681/10000 train_time:120645ms step_avg:71.77ms +[2025-09-02 05:18:44] [Rank 0] step:1701/10000 train_time:122089ms step_avg:71.77ms +[2025-09-02 05:18:44] [Rank 0] step:1701/10000 train_time:122089ms step_avg:71.77ms +[2025-09-02 05:18:46] [Rank 0] step:1721/10000 train_time:123533ms step_avg:71.78ms +[2025-09-02 05:18:46] [Rank 0] step:1721/10000 train_time:123533ms step_avg:71.78ms +[2025-09-02 05:18:47] [Rank 0] step:1741/10000 train_time:124976ms step_avg:71.78ms +[2025-09-02 05:18:47] [Rank 0] step:1741/10000 train_time:124976ms step_avg:71.78ms +[2025-09-02 05:18:49] [Rank 0] step:1761/10000 train_time:126420ms step_avg:71.79ms +[2025-09-02 05:18:49] [Rank 0] step:1761/10000 train_time:126420ms step_avg:71.79ms +[2025-09-02 05:18:50] [Rank 0] step:1781/10000 train_time:127863ms step_avg:71.79ms +[2025-09-02 05:18:50] [Rank 0] step:1781/10000 train_time:127863ms step_avg:71.79ms +[2025-09-02 05:18:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:18:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:19:03] [Rank 0] PRINT: step:1800/10000 val_loss:4.7882 svd_entropy: attn_qk:H=0.6415,top10E=0.42,eRank=83.4,q75/q25=36.59 attn_vo:H=0.7107,top10E=0.30,eRank=144.9,q75/q25=77.06 mlp_w1:H=0.6430,top10E=0.46,eRank=96.7,q75/q25=6.60 mlp_w2:H=0.7770,top10E=0.23,eRank=176.9,q75/q25=19.42 vo_prod:H=0.6024,top10E=0.44,eRank=59.0,q75/q25=5623.64 train_time:129452ms step_avg:71.92ms +[2025-09-02 05:19:03] [Rank 0] PRINT: step:1800/10000 val_loss:4.7882 svd_entropy: attn_qk:H=0.6415,top10E=0.42,eRank=83.4,q75/q25=36.59 attn_vo:H=0.7107,top10E=0.30,eRank=144.9,q75/q25=77.06 mlp_w1:H=0.6430,top10E=0.46,eRank=96.7,q75/q25=6.60 mlp_w2:H=0.7770,top10E=0.23,eRank=176.9,q75/q25=19.42 vo_prod:H=0.6024,top10E=0.44,eRank=59.0,q75/q25=5623.64 train_time:129452ms step_avg:71.92ms +[2025-09-02 05:19:03] [Rank 0] step:1801/10000 train_time:129463ms step_avg:71.88ms +[2025-09-02 05:19:03] [Rank 0] step:1801/10000 train_time:129463ms step_avg:71.88ms +[2025-09-02 05:19:05] [Rank 0] step:1821/10000 train_time:130794ms step_avg:71.83ms +[2025-09-02 05:19:05] [Rank 0] step:1821/10000 train_time:130794ms step_avg:71.83ms +[2025-09-02 05:19:06] [Rank 0] step:1841/10000 train_time:132235ms step_avg:71.83ms +[2025-09-02 05:19:06] [Rank 0] step:1841/10000 train_time:132235ms step_avg:71.83ms +[2025-09-02 05:19:07] [Rank 0] step:1861/10000 train_time:133679ms step_avg:71.83ms +[2025-09-02 05:19:07] [Rank 0] step:1861/10000 train_time:133679ms step_avg:71.83ms +[2025-09-02 05:19:09] [Rank 0] step:1881/10000 train_time:135122ms step_avg:71.84ms +[2025-09-02 05:19:09] [Rank 0] step:1881/10000 train_time:135122ms step_avg:71.84ms +[2025-09-02 05:19:10] [Rank 0] step:1901/10000 train_time:136565ms step_avg:71.84ms +[2025-09-02 05:19:10] [Rank 0] step:1901/10000 train_time:136565ms step_avg:71.84ms +[2025-09-02 05:19:12] [Rank 0] step:1921/10000 train_time:138009ms step_avg:71.84ms +[2025-09-02 05:19:12] [Rank 0] step:1921/10000 train_time:138009ms step_avg:71.84ms +[2025-09-02 05:19:13] [Rank 0] step:1941/10000 train_time:139452ms step_avg:71.85ms +[2025-09-02 05:19:13] [Rank 0] step:1941/10000 train_time:139452ms step_avg:71.85ms +[2025-09-02 05:19:15] [Rank 0] step:1961/10000 train_time:140896ms step_avg:71.85ms +[2025-09-02 05:19:15] [Rank 0] step:1961/10000 train_time:140896ms step_avg:71.85ms +[2025-09-02 05:19:16] [Rank 0] step:1981/10000 train_time:142340ms step_avg:71.85ms +[2025-09-02 05:19:16] [Rank 0] step:1981/10000 train_time:142340ms step_avg:71.85ms +[2025-09-02 05:19:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:19:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:19:29] [Rank 0] PRINT: step:2000/10000 val_loss:4.7211 svd_entropy: attn_qk:H=0.6504,top10E=0.41,eRank=87.3,q75/q25=42.09 attn_vo:H=0.7216,top10E=0.28,eRank=153.0,q75/q25=86.02 mlp_w1:H=0.6552,top10E=0.44,eRank=102.7,q75/q25=7.32 mlp_w2:H=0.7865,top10E=0.22,eRank=188.7,q75/q25=21.63 vo_prod:H=0.6147,top10E=0.42,eRank=64.0,q75/q25=7408.18 train_time:143928ms step_avg:71.96ms +[2025-09-02 05:19:29] [Rank 0] PRINT: step:2000/10000 val_loss:4.7211 svd_entropy: attn_qk:H=0.6504,top10E=0.41,eRank=87.3,q75/q25=42.09 attn_vo:H=0.7216,top10E=0.28,eRank=153.0,q75/q25=86.02 mlp_w1:H=0.6552,top10E=0.44,eRank=102.7,q75/q25=7.32 mlp_w2:H=0.7865,top10E=0.22,eRank=188.7,q75/q25=21.63 vo_prod:H=0.6147,top10E=0.42,eRank=64.0,q75/q25=7408.18 train_time:143928ms step_avg:71.96ms +[2025-09-02 05:19:29] [Rank 0] step:2001/10000 train_time:143939ms step_avg:71.93ms +[2025-09-02 05:19:29] [Rank 0] step:2001/10000 train_time:143939ms step_avg:71.93ms +[2025-09-02 05:19:31] [Rank 0] step:2021/10000 train_time:145256ms step_avg:71.87ms +[2025-09-02 05:19:31] [Rank 0] step:2021/10000 train_time:145256ms step_avg:71.87ms +[2025-09-02 05:19:32] [Rank 0] step:2041/10000 train_time:146816ms step_avg:71.93ms +[2025-09-02 05:19:32] [Rank 0] step:2041/10000 train_time:146816ms step_avg:71.93ms +[2025-09-02 05:19:34] [Rank 0] step:2061/10000 train_time:148262ms step_avg:71.94ms +[2025-09-02 05:19:34] [Rank 0] step:2061/10000 train_time:148262ms step_avg:71.94ms +[2025-09-02 05:19:35] [Rank 0] step:2081/10000 train_time:149707ms step_avg:71.94ms +[2025-09-02 05:19:35] [Rank 0] step:2081/10000 train_time:149707ms step_avg:71.94ms +[2025-09-02 05:19:37] [Rank 0] step:2101/10000 train_time:151150ms step_avg:71.94ms +[2025-09-02 05:19:37] [Rank 0] step:2101/10000 train_time:151150ms step_avg:71.94ms +[2025-09-02 05:19:38] [Rank 0] step:2121/10000 train_time:152594ms step_avg:71.94ms +[2025-09-02 05:19:38] [Rank 0] step:2121/10000 train_time:152594ms step_avg:71.94ms +[2025-09-02 05:19:39] [Rank 0] step:2141/10000 train_time:154039ms step_avg:71.95ms +[2025-09-02 05:19:39] [Rank 0] step:2141/10000 train_time:154039ms step_avg:71.95ms +[2025-09-02 05:19:41] [Rank 0] step:2161/10000 train_time:155483ms step_avg:71.95ms +[2025-09-02 05:19:41] [Rank 0] step:2161/10000 train_time:155483ms step_avg:71.95ms +[2025-09-02 05:19:42] [Rank 0] step:2181/10000 train_time:156927ms step_avg:71.95ms +[2025-09-02 05:19:42] [Rank 0] step:2181/10000 train_time:156927ms step_avg:71.95ms +[2025-09-02 05:19:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:19:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:19:55] [Rank 0] PRINT: step:2200/10000 val_loss:4.6421 svd_entropy: attn_qk:H=0.6580,top10E=0.39,eRank=91.0,q75/q25=47.53 attn_vo:H=0.7309,top10E=0.27,eRank=160.5,q75/q25=92.00 mlp_w1:H=0.6657,top10E=0.43,eRank=108.2,q75/q25=7.99 mlp_w2:H=0.7945,top10E=0.20,eRank=199.2,q75/q25=23.77 vo_prod:H=0.6252,top10E=0.40,eRank=68.5,q75/q25=8907.87 train_time:158516ms step_avg:72.05ms +[2025-09-02 05:19:55] [Rank 0] PRINT: step:2200/10000 val_loss:4.6421 svd_entropy: attn_qk:H=0.6580,top10E=0.39,eRank=91.0,q75/q25=47.53 attn_vo:H=0.7309,top10E=0.27,eRank=160.5,q75/q25=92.00 mlp_w1:H=0.6657,top10E=0.43,eRank=108.2,q75/q25=7.99 mlp_w2:H=0.7945,top10E=0.20,eRank=199.2,q75/q25=23.77 vo_prod:H=0.6252,top10E=0.40,eRank=68.5,q75/q25=8907.87 train_time:158516ms step_avg:72.05ms +[2025-09-02 05:19:55] [Rank 0] step:2201/10000 train_time:158527ms step_avg:72.03ms +[2025-09-02 05:19:55] [Rank 0] step:2201/10000 train_time:158527ms step_avg:72.03ms +[2025-09-02 05:19:57] [Rank 0] step:2221/10000 train_time:159852ms step_avg:71.97ms +[2025-09-02 05:19:57] [Rank 0] step:2221/10000 train_time:159852ms step_avg:71.97ms +[2025-09-02 05:19:58] [Rank 0] step:2241/10000 train_time:161330ms step_avg:71.99ms +[2025-09-02 05:19:58] [Rank 0] step:2241/10000 train_time:161330ms step_avg:71.99ms +[2025-09-02 05:20:00] [Rank 0] step:2261/10000 train_time:162818ms step_avg:72.01ms +[2025-09-02 05:20:00] [Rank 0] step:2261/10000 train_time:162818ms step_avg:72.01ms +[2025-09-02 05:20:01] [Rank 0] step:2281/10000 train_time:164306ms step_avg:72.03ms +[2025-09-02 05:20:01] [Rank 0] step:2281/10000 train_time:164306ms step_avg:72.03ms +[2025-09-02 05:20:03] [Rank 0] step:2301/10000 train_time:165795ms step_avg:72.05ms +[2025-09-02 05:20:03] [Rank 0] step:2301/10000 train_time:165795ms step_avg:72.05ms +[2025-09-02 05:20:04] [Rank 0] step:2321/10000 train_time:167284ms step_avg:72.07ms +[2025-09-02 05:20:04] [Rank 0] step:2321/10000 train_time:167284ms step_avg:72.07ms +[2025-09-02 05:20:06] [Rank 0] step:2341/10000 train_time:168774ms step_avg:72.09ms +[2025-09-02 05:20:06] [Rank 0] step:2341/10000 train_time:168774ms step_avg:72.09ms +[2025-09-02 05:20:07] [Rank 0] step:2361/10000 train_time:170264ms step_avg:72.12ms +[2025-09-02 05:20:07] [Rank 0] step:2361/10000 train_time:170264ms step_avg:72.12ms +[2025-09-02 05:20:09] [Rank 0] step:2381/10000 train_time:171756ms step_avg:72.14ms +[2025-09-02 05:20:09] [Rank 0] step:2381/10000 train_time:171756ms step_avg:72.14ms +[2025-09-02 05:20:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:20:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:20:22] [Rank 0] PRINT: step:2400/10000 val_loss:4.5669 svd_entropy: attn_qk:H=0.6641,top10E=0.38,eRank=93.8,q75/q25=52.97 attn_vo:H=0.7391,top10E=0.26,eRank=167.5,q75/q25=97.60 mlp_w1:H=0.6758,top10E=0.41,eRank=113.8,q75/q25=8.75 mlp_w2:H=0.8015,top10E=0.20,eRank=208.9,q75/q25=25.64 vo_prod:H=0.6342,top10E=0.39,eRank=72.7,q75/q25=10351.77 train_time:173395ms step_avg:72.25ms +[2025-09-02 05:20:22] [Rank 0] PRINT: step:2400/10000 val_loss:4.5669 svd_entropy: attn_qk:H=0.6641,top10E=0.38,eRank=93.8,q75/q25=52.97 attn_vo:H=0.7391,top10E=0.26,eRank=167.5,q75/q25=97.60 mlp_w1:H=0.6758,top10E=0.41,eRank=113.8,q75/q25=8.75 mlp_w2:H=0.8015,top10E=0.20,eRank=208.9,q75/q25=25.64 vo_prod:H=0.6342,top10E=0.39,eRank=72.7,q75/q25=10351.77 train_time:173395ms step_avg:72.25ms +[2025-09-02 05:20:22] [Rank 0] step:2401/10000 train_time:173406ms step_avg:72.22ms +[2025-09-02 05:20:22] [Rank 0] step:2401/10000 train_time:173406ms step_avg:72.22ms +[2025-09-02 05:20:23] [Rank 0] step:2421/10000 train_time:174775ms step_avg:72.19ms +[2025-09-02 05:20:23] [Rank 0] step:2421/10000 train_time:174775ms step_avg:72.19ms +[2025-09-02 05:20:25] [Rank 0] step:2441/10000 train_time:176262ms step_avg:72.21ms +[2025-09-02 05:20:25] [Rank 0] step:2441/10000 train_time:176262ms step_avg:72.21ms +[2025-09-02 05:20:26] [Rank 0] step:2461/10000 train_time:177750ms step_avg:72.23ms +[2025-09-02 05:20:26] [Rank 0] step:2461/10000 train_time:177750ms step_avg:72.23ms +[2025-09-02 05:20:28] [Rank 0] step:2481/10000 train_time:179238ms step_avg:72.24ms +[2025-09-02 05:20:28] [Rank 0] step:2481/10000 train_time:179238ms step_avg:72.24ms +[2025-09-02 05:20:29] [Rank 0] step:2501/10000 train_time:180776ms step_avg:72.28ms +[2025-09-02 05:20:29] [Rank 0] step:2501/10000 train_time:180776ms step_avg:72.28ms +[2025-09-02 05:20:31] [Rank 0] step:2521/10000 train_time:182264ms step_avg:72.30ms +[2025-09-02 05:20:31] [Rank 0] step:2521/10000 train_time:182264ms step_avg:72.30ms +[2025-09-02 05:20:32] [Rank 0] step:2541/10000 train_time:183753ms step_avg:72.32ms +[2025-09-02 05:20:32] [Rank 0] step:2541/10000 train_time:183753ms step_avg:72.32ms +[2025-09-02 05:20:34] [Rank 0] step:2561/10000 train_time:185242ms step_avg:72.33ms +[2025-09-02 05:20:34] [Rank 0] step:2561/10000 train_time:185242ms step_avg:72.33ms +[2025-09-02 05:20:35] [Rank 0] step:2581/10000 train_time:186731ms step_avg:72.35ms +[2025-09-02 05:20:35] [Rank 0] step:2581/10000 train_time:186731ms step_avg:72.35ms +[2025-09-02 05:20:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:20:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:20:48] [Rank 0] PRINT: step:2600/10000 val_loss:4.5097 svd_entropy: attn_qk:H=0.6702,top10E=0.37,eRank=96.9,q75/q25=58.54 attn_vo:H=0.7465,top10E=0.25,eRank=174.1,q75/q25=100.81 mlp_w1:H=0.6839,top10E=0.40,eRank=118.7,q75/q25=9.47 mlp_w2:H=0.8073,top10E=0.19,eRank=217.5,q75/q25=27.50 vo_prod:H=0.6424,top10E=0.37,eRank=76.7,q75/q25=11721.17 train_time:188370ms step_avg:72.45ms +[2025-09-02 05:20:48] [Rank 0] PRINT: step:2600/10000 val_loss:4.5097 svd_entropy: attn_qk:H=0.6702,top10E=0.37,eRank=96.9,q75/q25=58.54 attn_vo:H=0.7465,top10E=0.25,eRank=174.1,q75/q25=100.81 mlp_w1:H=0.6839,top10E=0.40,eRank=118.7,q75/q25=9.47 mlp_w2:H=0.8073,top10E=0.19,eRank=217.5,q75/q25=27.50 vo_prod:H=0.6424,top10E=0.37,eRank=76.7,q75/q25=11721.17 train_time:188370ms step_avg:72.45ms +[2025-09-02 05:20:48] [Rank 0] step:2601/10000 train_time:188382ms step_avg:72.43ms +[2025-09-02 05:20:48] [Rank 0] step:2601/10000 train_time:188382ms step_avg:72.43ms +[2025-09-02 05:20:50] [Rank 0] step:2621/10000 train_time:189742ms step_avg:72.39ms +[2025-09-02 05:20:50] [Rank 0] step:2621/10000 train_time:189742ms step_avg:72.39ms +[2025-09-02 05:20:51] [Rank 0] step:2641/10000 train_time:191229ms step_avg:72.41ms +[2025-09-02 05:20:51] [Rank 0] step:2641/10000 train_time:191229ms step_avg:72.41ms +[2025-09-02 05:20:53] [Rank 0] step:2661/10000 train_time:192715ms step_avg:72.42ms +[2025-09-02 05:20:53] [Rank 0] step:2661/10000 train_time:192715ms step_avg:72.42ms +[2025-09-02 05:20:54] [Rank 0] step:2681/10000 train_time:194201ms step_avg:72.44ms +[2025-09-02 05:20:54] [Rank 0] step:2681/10000 train_time:194201ms step_avg:72.44ms +[2025-09-02 05:20:56] [Rank 0] step:2701/10000 train_time:195688ms step_avg:72.45ms +[2025-09-02 05:20:56] [Rank 0] step:2701/10000 train_time:195688ms step_avg:72.45ms +[2025-09-02 05:20:57] [Rank 0] step:2721/10000 train_time:197176ms step_avg:72.46ms +[2025-09-02 05:20:57] [Rank 0] step:2721/10000 train_time:197176ms step_avg:72.46ms +[2025-09-02 05:20:59] [Rank 0] step:2741/10000 train_time:198666ms step_avg:72.48ms +[2025-09-02 05:20:59] [Rank 0] step:2741/10000 train_time:198666ms step_avg:72.48ms +[2025-09-02 05:21:00] [Rank 0] step:2761/10000 train_time:200155ms step_avg:72.49ms +[2025-09-02 05:21:00] [Rank 0] step:2761/10000 train_time:200155ms step_avg:72.49ms +[2025-09-02 05:21:02] [Rank 0] step:2781/10000 train_time:201644ms step_avg:72.51ms +[2025-09-02 05:21:02] [Rank 0] step:2781/10000 train_time:201644ms step_avg:72.51ms +[2025-09-02 05:21:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:21:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:21:15] [Rank 0] PRINT: step:2800/10000 val_loss:4.4704 svd_entropy: attn_qk:H=0.6760,top10E=0.36,eRank=100.0,q75/q25=63.85 attn_vo:H=0.7533,top10E=0.24,eRank=180.5,q75/q25=103.09 mlp_w1:H=0.6916,top10E=0.39,eRank=123.5,q75/q25=10.21 mlp_w2:H=0.8126,top10E=0.18,eRank=225.4,q75/q25=29.03 vo_prod:H=0.6503,top10E=0.36,eRank=80.7,q75/q25=12435.60 train_time:203284ms step_avg:72.60ms +[2025-09-02 05:21:15] [Rank 0] PRINT: step:2800/10000 val_loss:4.4704 svd_entropy: attn_qk:H=0.6760,top10E=0.36,eRank=100.0,q75/q25=63.85 attn_vo:H=0.7533,top10E=0.24,eRank=180.5,q75/q25=103.09 mlp_w1:H=0.6916,top10E=0.39,eRank=123.5,q75/q25=10.21 mlp_w2:H=0.8126,top10E=0.18,eRank=225.4,q75/q25=29.03 vo_prod:H=0.6503,top10E=0.36,eRank=80.7,q75/q25=12435.60 train_time:203284ms step_avg:72.60ms +[2025-09-02 05:21:15] [Rank 0] step:2801/10000 train_time:203294ms step_avg:72.58ms +[2025-09-02 05:21:15] [Rank 0] step:2801/10000 train_time:203294ms step_avg:72.58ms +[2025-09-02 05:21:17] [Rank 0] step:2821/10000 train_time:204645ms step_avg:72.54ms +[2025-09-02 05:21:17] [Rank 0] step:2821/10000 train_time:204645ms step_avg:72.54ms +[2025-09-02 05:21:18] [Rank 0] step:2841/10000 train_time:206131ms step_avg:72.56ms +[2025-09-02 05:21:18] [Rank 0] step:2841/10000 train_time:206131ms step_avg:72.56ms +[2025-09-02 05:21:20] [Rank 0] step:2861/10000 train_time:207619ms step_avg:72.57ms +[2025-09-02 05:21:20] [Rank 0] step:2861/10000 train_time:207619ms step_avg:72.57ms +[2025-09-02 05:21:21] [Rank 0] step:2881/10000 train_time:209107ms step_avg:72.58ms +[2025-09-02 05:21:21] [Rank 0] step:2881/10000 train_time:209107ms step_avg:72.58ms +[2025-09-02 05:21:22] [Rank 0] step:2901/10000 train_time:210594ms step_avg:72.59ms +[2025-09-02 05:21:22] [Rank 0] step:2901/10000 train_time:210594ms step_avg:72.59ms +[2025-09-02 05:21:24] [Rank 0] step:2921/10000 train_time:212083ms step_avg:72.61ms +[2025-09-02 05:21:24] [Rank 0] step:2921/10000 train_time:212083ms step_avg:72.61ms +[2025-09-02 05:21:25] [Rank 0] step:2941/10000 train_time:213571ms step_avg:72.62ms +[2025-09-02 05:21:25] [Rank 0] step:2941/10000 train_time:213571ms step_avg:72.62ms +[2025-09-02 05:21:27] [Rank 0] step:2961/10000 train_time:215060ms step_avg:72.63ms +[2025-09-02 05:21:27] [Rank 0] step:2961/10000 train_time:215060ms step_avg:72.63ms +[2025-09-02 05:21:28] [Rank 0] step:2981/10000 train_time:216555ms step_avg:72.64ms +[2025-09-02 05:21:28] [Rank 0] step:2981/10000 train_time:216555ms step_avg:72.64ms +[2025-09-02 05:21:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:21:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:21:41] [Rank 0] PRINT: step:3000/10000 val_loss:4.4274 svd_entropy: attn_qk:H=0.6811,top10E=0.36,eRank=102.8,q75/q25=68.86 attn_vo:H=0.7593,top10E=0.23,eRank=186.4,q75/q25=103.83 mlp_w1:H=0.6988,top10E=0.38,eRank=128.2,q75/q25=10.97 mlp_w2:H=0.8169,top10E=0.17,eRank=232.3,q75/q25=30.46 vo_prod:H=0.6571,top10E=0.35,eRank=84.4,q75/q25=13281.43 train_time:218201ms step_avg:72.73ms +[2025-09-02 05:21:41] [Rank 0] PRINT: step:3000/10000 val_loss:4.4274 svd_entropy: attn_qk:H=0.6811,top10E=0.36,eRank=102.8,q75/q25=68.86 attn_vo:H=0.7593,top10E=0.23,eRank=186.4,q75/q25=103.83 mlp_w1:H=0.6988,top10E=0.38,eRank=128.2,q75/q25=10.97 mlp_w2:H=0.8169,top10E=0.17,eRank=232.3,q75/q25=30.46 vo_prod:H=0.6571,top10E=0.35,eRank=84.4,q75/q25=13281.43 train_time:218201ms step_avg:72.73ms +[2025-09-02 05:21:42] [Rank 0] step:3001/10000 train_time:218211ms step_avg:72.71ms +[2025-09-02 05:21:42] [Rank 0] step:3001/10000 train_time:218211ms step_avg:72.71ms +[2025-09-02 05:21:43] [Rank 0] step:3021/10000 train_time:219580ms step_avg:72.68ms +[2025-09-02 05:21:43] [Rank 0] step:3021/10000 train_time:219580ms step_avg:72.68ms +[2025-09-02 05:21:45] [Rank 0] step:3041/10000 train_time:221075ms step_avg:72.70ms +[2025-09-02 05:21:45] [Rank 0] step:3041/10000 train_time:221075ms step_avg:72.70ms +[2025-09-02 05:21:46] [Rank 0] step:3061/10000 train_time:222572ms step_avg:72.71ms +[2025-09-02 05:21:46] [Rank 0] step:3061/10000 train_time:222572ms step_avg:72.71ms +[2025-09-02 05:21:48] [Rank 0] step:3081/10000 train_time:224068ms step_avg:72.73ms +[2025-09-02 05:21:48] [Rank 0] step:3081/10000 train_time:224068ms step_avg:72.73ms +[2025-09-02 05:21:49] [Rank 0] step:3101/10000 train_time:225565ms step_avg:72.74ms +[2025-09-02 05:21:49] [Rank 0] step:3101/10000 train_time:225565ms step_avg:72.74ms +[2025-09-02 05:21:51] [Rank 0] step:3121/10000 train_time:227062ms step_avg:72.75ms +[2025-09-02 05:21:51] [Rank 0] step:3121/10000 train_time:227062ms step_avg:72.75ms +[2025-09-02 05:21:52] [Rank 0] step:3141/10000 train_time:228559ms step_avg:72.77ms +[2025-09-02 05:21:52] [Rank 0] step:3141/10000 train_time:228559ms step_avg:72.77ms +[2025-09-02 05:21:54] [Rank 0] step:3161/10000 train_time:230057ms step_avg:72.78ms +[2025-09-02 05:21:54] [Rank 0] step:3161/10000 train_time:230057ms step_avg:72.78ms +[2025-09-02 05:21:55] [Rank 0] step:3181/10000 train_time:231556ms step_avg:72.79ms +[2025-09-02 05:21:55] [Rank 0] step:3181/10000 train_time:231556ms step_avg:72.79ms +[2025-09-02 05:21:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:21:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:22:08] [Rank 0] PRINT: step:3200/10000 val_loss:4.3922 svd_entropy: attn_qk:H=0.6859,top10E=0.35,eRank=105.5,q75/q25=72.86 attn_vo:H=0.7647,top10E=0.23,eRank=191.9,q75/q25=105.30 mlp_w1:H=0.7050,top10E=0.37,eRank=132.5,q75/q25=11.70 mlp_w2:H=0.8209,top10E=0.17,eRank=238.8,q75/q25=31.79 vo_prod:H=0.6632,top10E=0.34,eRank=87.9,q75/q25=13644.72 train_time:233204ms step_avg:72.88ms +[2025-09-02 05:22:08] [Rank 0] PRINT: step:3200/10000 val_loss:4.3922 svd_entropy: attn_qk:H=0.6859,top10E=0.35,eRank=105.5,q75/q25=72.86 attn_vo:H=0.7647,top10E=0.23,eRank=191.9,q75/q25=105.30 mlp_w1:H=0.7050,top10E=0.37,eRank=132.5,q75/q25=11.70 mlp_w2:H=0.8209,top10E=0.17,eRank=238.8,q75/q25=31.79 vo_prod:H=0.6632,top10E=0.34,eRank=87.9,q75/q25=13644.72 train_time:233204ms step_avg:72.88ms +[2025-09-02 05:22:08] [Rank 0] step:3201/10000 train_time:233215ms step_avg:72.86ms +[2025-09-02 05:22:08] [Rank 0] step:3201/10000 train_time:233215ms step_avg:72.86ms +[2025-09-02 05:22:10] [Rank 0] step:3221/10000 train_time:234562ms step_avg:72.82ms +[2025-09-02 05:22:10] [Rank 0] step:3221/10000 train_time:234562ms step_avg:72.82ms +[2025-09-02 05:22:11] [Rank 0] step:3241/10000 train_time:236055ms step_avg:72.83ms +[2025-09-02 05:22:11] [Rank 0] step:3241/10000 train_time:236055ms step_avg:72.83ms +[2025-09-02 05:22:13] [Rank 0] step:3261/10000 train_time:237549ms step_avg:72.85ms +[2025-09-02 05:22:13] [Rank 0] step:3261/10000 train_time:237549ms step_avg:72.85ms +[2025-09-02 05:22:14] [Rank 0] step:3281/10000 train_time:239045ms step_avg:72.86ms +[2025-09-02 05:22:14] [Rank 0] step:3281/10000 train_time:239045ms step_avg:72.86ms +[2025-09-02 05:22:16] [Rank 0] step:3301/10000 train_time:240540ms step_avg:72.87ms +[2025-09-02 05:22:16] [Rank 0] step:3301/10000 train_time:240540ms step_avg:72.87ms +[2025-09-02 05:22:17] [Rank 0] step:3321/10000 train_time:242036ms step_avg:72.88ms +[2025-09-02 05:22:17] [Rank 0] step:3321/10000 train_time:242036ms step_avg:72.88ms +[2025-09-02 05:22:19] [Rank 0] step:3341/10000 train_time:243532ms step_avg:72.89ms +[2025-09-02 05:22:19] [Rank 0] step:3341/10000 train_time:243532ms step_avg:72.89ms +[2025-09-02 05:22:20] [Rank 0] step:3361/10000 train_time:245027ms step_avg:72.90ms +[2025-09-02 05:22:20] [Rank 0] step:3361/10000 train_time:245027ms step_avg:72.90ms +[2025-09-02 05:22:22] [Rank 0] step:3381/10000 train_time:246523ms step_avg:72.91ms +[2025-09-02 05:22:22] [Rank 0] step:3381/10000 train_time:246523ms step_avg:72.91ms +[2025-09-02 05:22:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:22:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:22:35] [Rank 0] PRINT: step:3400/10000 val_loss:4.3517 svd_entropy: attn_qk:H=0.6907,top10E=0.34,eRank=108.4,q75/q25=77.60 attn_vo:H=0.7699,top10E=0.22,eRank=197.4,q75/q25=105.01 mlp_w1:H=0.7107,top10E=0.37,eRank=136.6,q75/q25=12.43 mlp_w2:H=0.8247,top10E=0.16,eRank=245.2,q75/q25=33.08 vo_prod:H=0.6692,top10E=0.33,eRank=91.5,q75/q25=13968.68 train_time:248170ms step_avg:72.99ms +[2025-09-02 05:22:35] [Rank 0] PRINT: step:3400/10000 val_loss:4.3517 svd_entropy: attn_qk:H=0.6907,top10E=0.34,eRank=108.4,q75/q25=77.60 attn_vo:H=0.7699,top10E=0.22,eRank=197.4,q75/q25=105.01 mlp_w1:H=0.7107,top10E=0.37,eRank=136.6,q75/q25=12.43 mlp_w2:H=0.8247,top10E=0.16,eRank=245.2,q75/q25=33.08 vo_prod:H=0.6692,top10E=0.33,eRank=91.5,q75/q25=13968.68 train_time:248170ms step_avg:72.99ms +[2025-09-02 05:22:35] [Rank 0] step:3401/10000 train_time:248180ms step_avg:72.97ms +[2025-09-02 05:22:35] [Rank 0] step:3401/10000 train_time:248180ms step_avg:72.97ms +[2025-09-02 05:22:36] [Rank 0] step:3421/10000 train_time:249535ms step_avg:72.94ms +[2025-09-02 05:22:36] [Rank 0] step:3421/10000 train_time:249535ms step_avg:72.94ms +[2025-09-02 05:22:38] [Rank 0] step:3441/10000 train_time:251027ms step_avg:72.95ms +[2025-09-02 05:22:38] [Rank 0] step:3441/10000 train_time:251027ms step_avg:72.95ms +[2025-09-02 05:22:39] [Rank 0] step:3461/10000 train_time:252521ms step_avg:72.96ms +[2025-09-02 05:22:39] [Rank 0] step:3461/10000 train_time:252521ms step_avg:72.96ms +[2025-09-02 05:22:41] [Rank 0] step:3481/10000 train_time:254015ms step_avg:72.97ms +[2025-09-02 05:22:41] [Rank 0] step:3481/10000 train_time:254015ms step_avg:72.97ms +[2025-09-02 05:22:42] [Rank 0] step:3501/10000 train_time:255512ms step_avg:72.98ms +[2025-09-02 05:22:42] [Rank 0] step:3501/10000 train_time:255512ms step_avg:72.98ms +[2025-09-02 05:22:44] [Rank 0] step:3521/10000 train_time:257007ms step_avg:72.99ms +[2025-09-02 05:22:44] [Rank 0] step:3521/10000 train_time:257007ms step_avg:72.99ms +[2025-09-02 05:22:45] [Rank 0] step:3541/10000 train_time:258501ms step_avg:73.00ms +[2025-09-02 05:22:45] [Rank 0] step:3541/10000 train_time:258501ms step_avg:73.00ms +[2025-09-02 05:22:47] [Rank 0] step:3561/10000 train_time:259999ms step_avg:73.01ms +[2025-09-02 05:22:47] [Rank 0] step:3561/10000 train_time:259999ms step_avg:73.01ms +[2025-09-02 05:22:48] [Rank 0] step:3581/10000 train_time:261494ms step_avg:73.02ms +[2025-09-02 05:22:48] [Rank 0] step:3581/10000 train_time:261494ms step_avg:73.02ms +[2025-09-02 05:22:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:22:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:23:01] [Rank 0] PRINT: step:3600/10000 val_loss:4.3390 svd_entropy: attn_qk:H=0.6951,top10E=0.33,eRank=111.1,q75/q25=80.76 attn_vo:H=0.7746,top10E=0.21,eRank=202.4,q75/q25=104.55 mlp_w1:H=0.7163,top10E=0.36,eRank=140.7,q75/q25=13.14 mlp_w2:H=0.8280,top10E=0.16,eRank=250.8,q75/q25=34.23 vo_prod:H=0.6746,top10E=0.32,eRank=94.8,q75/q25=13725.29 train_time:263140ms step_avg:73.09ms +[2025-09-02 05:23:01] [Rank 0] PRINT: step:3600/10000 val_loss:4.3390 svd_entropy: attn_qk:H=0.6951,top10E=0.33,eRank=111.1,q75/q25=80.76 attn_vo:H=0.7746,top10E=0.21,eRank=202.4,q75/q25=104.55 mlp_w1:H=0.7163,top10E=0.36,eRank=140.7,q75/q25=13.14 mlp_w2:H=0.8280,top10E=0.16,eRank=250.8,q75/q25=34.23 vo_prod:H=0.6746,top10E=0.32,eRank=94.8,q75/q25=13725.29 train_time:263140ms step_avg:73.09ms +[2025-09-02 05:23:01] [Rank 0] step:3601/10000 train_time:263151ms step_avg:73.08ms +[2025-09-02 05:23:01] [Rank 0] step:3601/10000 train_time:263151ms step_avg:73.08ms +[2025-09-02 05:23:03] [Rank 0] step:3621/10000 train_time:264505ms step_avg:73.05ms +[2025-09-02 05:23:03] [Rank 0] step:3621/10000 train_time:264505ms step_avg:73.05ms +[2025-09-02 05:23:04] [Rank 0] step:3641/10000 train_time:266000ms step_avg:73.06ms +[2025-09-02 05:23:04] [Rank 0] step:3641/10000 train_time:266000ms step_avg:73.06ms +[2025-09-02 05:23:06] [Rank 0] step:3661/10000 train_time:267496ms step_avg:73.07ms +[2025-09-02 05:23:06] [Rank 0] step:3661/10000 train_time:267496ms step_avg:73.07ms +[2025-09-02 05:23:07] [Rank 0] step:3681/10000 train_time:268992ms step_avg:73.08ms +[2025-09-02 05:23:07] [Rank 0] step:3681/10000 train_time:268992ms step_avg:73.08ms +[2025-09-02 05:23:09] [Rank 0] step:3701/10000 train_time:270489ms step_avg:73.09ms +[2025-09-02 05:23:09] [Rank 0] step:3701/10000 train_time:270489ms step_avg:73.09ms +[2025-09-02 05:23:10] [Rank 0] step:3721/10000 train_time:272012ms step_avg:73.10ms +[2025-09-02 05:23:10] [Rank 0] step:3721/10000 train_time:272012ms step_avg:73.10ms +[2025-09-02 05:23:12] [Rank 0] step:3741/10000 train_time:273544ms step_avg:73.12ms +[2025-09-02 05:23:12] [Rank 0] step:3741/10000 train_time:273544ms step_avg:73.12ms +[2025-09-02 05:23:14] [Rank 0] step:3761/10000 train_time:275075ms step_avg:73.14ms +[2025-09-02 05:23:14] [Rank 0] step:3761/10000 train_time:275075ms step_avg:73.14ms +[2025-09-02 05:23:15] [Rank 0] step:3781/10000 train_time:276607ms step_avg:73.16ms +[2025-09-02 05:23:15] [Rank 0] step:3781/10000 train_time:276607ms step_avg:73.16ms +[2025-09-02 05:23:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:23:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:23:28] [Rank 0] PRINT: step:3800/10000 val_loss:4.2808 svd_entropy: attn_qk:H=0.6988,top10E=0.33,eRank=113.4,q75/q25=84.99 attn_vo:H=0.7788,top10E=0.21,eRank=207.2,q75/q25=103.75 mlp_w1:H=0.7213,top10E=0.35,eRank=144.5,q75/q25=13.86 mlp_w2:H=0.8309,top10E=0.16,eRank=256.0,q75/q25=35.23 vo_prod:H=0.6794,top10E=0.32,eRank=97.9,q75/q25=13776.26 train_time:278295ms step_avg:73.24ms +[2025-09-02 05:23:28] [Rank 0] PRINT: step:3800/10000 val_loss:4.2808 svd_entropy: attn_qk:H=0.6988,top10E=0.33,eRank=113.4,q75/q25=84.99 attn_vo:H=0.7788,top10E=0.21,eRank=207.2,q75/q25=103.75 mlp_w1:H=0.7213,top10E=0.35,eRank=144.5,q75/q25=13.86 mlp_w2:H=0.8309,top10E=0.16,eRank=256.0,q75/q25=35.23 vo_prod:H=0.6794,top10E=0.32,eRank=97.9,q75/q25=13776.26 train_time:278295ms step_avg:73.24ms +[2025-09-02 05:23:28] [Rank 0] step:3801/10000 train_time:278305ms step_avg:73.22ms +[2025-09-02 05:23:28] [Rank 0] step:3801/10000 train_time:278305ms step_avg:73.22ms +[2025-09-02 05:23:30] [Rank 0] step:3821/10000 train_time:279695ms step_avg:73.20ms +[2025-09-02 05:23:30] [Rank 0] step:3821/10000 train_time:279695ms step_avg:73.20ms +[2025-09-02 05:23:31] [Rank 0] step:3841/10000 train_time:281229ms step_avg:73.22ms +[2025-09-02 05:23:31] [Rank 0] step:3841/10000 train_time:281229ms step_avg:73.22ms +[2025-09-02 05:23:33] [Rank 0] step:3861/10000 train_time:282762ms step_avg:73.24ms +[2025-09-02 05:23:33] [Rank 0] step:3861/10000 train_time:282762ms step_avg:73.24ms +[2025-09-02 05:23:34] [Rank 0] step:3881/10000 train_time:284294ms step_avg:73.25ms +[2025-09-02 05:23:34] [Rank 0] step:3881/10000 train_time:284294ms step_avg:73.25ms +[2025-09-02 05:23:36] [Rank 0] step:3901/10000 train_time:285824ms step_avg:73.27ms +[2025-09-02 05:23:36] [Rank 0] step:3901/10000 train_time:285824ms step_avg:73.27ms +[2025-09-02 05:23:37] [Rank 0] step:3921/10000 train_time:287357ms step_avg:73.29ms +[2025-09-02 05:23:37] [Rank 0] step:3921/10000 train_time:287357ms step_avg:73.29ms +[2025-09-02 05:23:39] [Rank 0] step:3941/10000 train_time:288955ms step_avg:73.32ms +[2025-09-02 05:23:39] [Rank 0] step:3941/10000 train_time:288955ms step_avg:73.32ms +[2025-09-02 05:23:41] [Rank 0] step:3961/10000 train_time:290487ms step_avg:73.34ms +[2025-09-02 05:23:41] [Rank 0] step:3961/10000 train_time:290487ms step_avg:73.34ms +[2025-09-02 05:23:42] [Rank 0] step:3981/10000 train_time:292020ms step_avg:73.35ms +[2025-09-02 05:23:42] [Rank 0] step:3981/10000 train_time:292020ms step_avg:73.35ms +[2025-09-02 05:23:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:23:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:23:55] [Rank 0] PRINT: step:4000/10000 val_loss:4.2555 svd_entropy: attn_qk:H=0.7027,top10E=0.32,eRank=115.9,q75/q25=88.07 attn_vo:H=0.7828,top10E=0.20,eRank=211.8,q75/q25=102.00 mlp_w1:H=0.7261,top10E=0.34,eRank=148.4,q75/q25=14.49 mlp_w2:H=0.8338,top10E=0.15,eRank=261.0,q75/q25=36.36 vo_prod:H=0.6841,top10E=0.31,eRank=101.0,q75/q25=13254.40 train_time:293704ms step_avg:73.43ms +[2025-09-02 05:23:55] [Rank 0] PRINT: step:4000/10000 val_loss:4.2555 svd_entropy: attn_qk:H=0.7027,top10E=0.32,eRank=115.9,q75/q25=88.07 attn_vo:H=0.7828,top10E=0.20,eRank=211.8,q75/q25=102.00 mlp_w1:H=0.7261,top10E=0.34,eRank=148.4,q75/q25=14.49 mlp_w2:H=0.8338,top10E=0.15,eRank=261.0,q75/q25=36.36 vo_prod:H=0.6841,top10E=0.31,eRank=101.0,q75/q25=13254.40 train_time:293704ms step_avg:73.43ms +[2025-09-02 05:23:55] [Rank 0] step:4001/10000 train_time:293715ms step_avg:73.41ms +[2025-09-02 05:23:55] [Rank 0] step:4001/10000 train_time:293715ms step_avg:73.41ms +[2025-09-02 05:23:57] [Rank 0] step:4021/10000 train_time:295108ms step_avg:73.39ms +[2025-09-02 05:23:57] [Rank 0] step:4021/10000 train_time:295108ms step_avg:73.39ms +[2025-09-02 05:23:58] [Rank 0] step:4041/10000 train_time:296640ms step_avg:73.41ms +[2025-09-02 05:23:58] [Rank 0] step:4041/10000 train_time:296640ms step_avg:73.41ms +[2025-09-02 05:24:00] [Rank 0] step:4061/10000 train_time:298172ms step_avg:73.42ms +[2025-09-02 05:24:00] [Rank 0] step:4061/10000 train_time:298172ms step_avg:73.42ms +[2025-09-02 05:24:02] [Rank 0] step:4081/10000 train_time:299814ms step_avg:73.47ms +[2025-09-02 05:24:02] [Rank 0] step:4081/10000 train_time:299814ms step_avg:73.47ms +[2025-09-02 05:24:03] [Rank 0] step:4101/10000 train_time:301350ms step_avg:73.48ms +[2025-09-02 05:24:03] [Rank 0] step:4101/10000 train_time:301350ms step_avg:73.48ms +[2025-09-02 05:24:05] [Rank 0] step:4121/10000 train_time:302884ms step_avg:73.50ms +[2025-09-02 05:24:05] [Rank 0] step:4121/10000 train_time:302884ms step_avg:73.50ms +[2025-09-02 05:24:06] [Rank 0] step:4141/10000 train_time:304422ms step_avg:73.51ms +[2025-09-02 05:24:06] [Rank 0] step:4141/10000 train_time:304422ms step_avg:73.51ms +[2025-09-02 05:24:08] [Rank 0] step:4161/10000 train_time:305956ms step_avg:73.53ms +[2025-09-02 05:24:08] [Rank 0] step:4161/10000 train_time:305956ms step_avg:73.53ms +[2025-09-02 05:24:09] [Rank 0] step:4181/10000 train_time:307493ms step_avg:73.55ms +[2025-09-02 05:24:09] [Rank 0] step:4181/10000 train_time:307493ms step_avg:73.55ms +[2025-09-02 05:24:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:24:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:24:22] [Rank 0] PRINT: step:4200/10000 val_loss:4.2384 svd_entropy: attn_qk:H=0.7064,top10E=0.32,eRank=118.4,q75/q25=91.14 attn_vo:H=0.7866,top10E=0.20,eRank=216.3,q75/q25=100.20 mlp_w1:H=0.7305,top10E=0.34,eRank=152.0,q75/q25=15.21 mlp_w2:H=0.8362,top10E=0.15,eRank=265.6,q75/q25=37.64 vo_prod:H=0.6882,top10E=0.30,eRank=103.8,q75/q25=12859.80 train_time:309183ms step_avg:73.61ms +[2025-09-02 05:24:22] [Rank 0] PRINT: step:4200/10000 val_loss:4.2384 svd_entropy: attn_qk:H=0.7064,top10E=0.32,eRank=118.4,q75/q25=91.14 attn_vo:H=0.7866,top10E=0.20,eRank=216.3,q75/q25=100.20 mlp_w1:H=0.7305,top10E=0.34,eRank=152.0,q75/q25=15.21 mlp_w2:H=0.8362,top10E=0.15,eRank=265.6,q75/q25=37.64 vo_prod:H=0.6882,top10E=0.30,eRank=103.8,q75/q25=12859.80 train_time:309183ms step_avg:73.61ms +[2025-09-02 05:24:22] [Rank 0] step:4201/10000 train_time:309193ms step_avg:73.60ms +[2025-09-02 05:24:22] [Rank 0] step:4201/10000 train_time:309193ms step_avg:73.60ms +[2025-09-02 05:24:24] [Rank 0] step:4221/10000 train_time:310579ms step_avg:73.58ms +[2025-09-02 05:24:24] [Rank 0] step:4221/10000 train_time:310579ms step_avg:73.58ms +[2025-09-02 05:24:25] [Rank 0] step:4241/10000 train_time:312112ms step_avg:73.59ms +[2025-09-02 05:24:25] [Rank 0] step:4241/10000 train_time:312112ms step_avg:73.59ms +[2025-09-02 05:24:27] [Rank 0] step:4261/10000 train_time:313645ms step_avg:73.61ms +[2025-09-02 05:24:27] [Rank 0] step:4261/10000 train_time:313645ms step_avg:73.61ms +[2025-09-02 05:24:29] [Rank 0] step:4281/10000 train_time:315175ms step_avg:73.62ms +[2025-09-02 05:24:29] [Rank 0] step:4281/10000 train_time:315175ms step_avg:73.62ms +[2025-09-02 05:24:30] [Rank 0] step:4301/10000 train_time:316707ms step_avg:73.64ms +[2025-09-02 05:24:30] [Rank 0] step:4301/10000 train_time:316707ms step_avg:73.64ms +[2025-09-02 05:24:32] [Rank 0] step:4321/10000 train_time:318242ms step_avg:73.65ms +[2025-09-02 05:24:32] [Rank 0] step:4321/10000 train_time:318242ms step_avg:73.65ms +[2025-09-02 05:24:33] [Rank 0] step:4341/10000 train_time:319772ms step_avg:73.66ms +[2025-09-02 05:24:33] [Rank 0] step:4341/10000 train_time:319772ms step_avg:73.66ms +[2025-09-02 05:24:35] [Rank 0] step:4361/10000 train_time:321305ms step_avg:73.68ms +[2025-09-02 05:24:35] [Rank 0] step:4361/10000 train_time:321305ms step_avg:73.68ms +[2025-09-02 05:24:36] [Rank 0] step:4381/10000 train_time:322840ms step_avg:73.69ms +[2025-09-02 05:24:36] [Rank 0] step:4381/10000 train_time:322840ms step_avg:73.69ms +[2025-09-02 05:24:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:24:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:24:49] [Rank 0] PRINT: step:4400/10000 val_loss:4.2145 svd_entropy: attn_qk:H=0.7100,top10E=0.31,eRank=120.8,q75/q25=93.94 attn_vo:H=0.7900,top10E=0.20,eRank=220.4,q75/q25=98.27 mlp_w1:H=0.7345,top10E=0.33,eRank=155.4,q75/q25=15.88 mlp_w2:H=0.8386,top10E=0.15,eRank=270.0,q75/q25=38.43 vo_prod:H=0.6920,top10E=0.30,eRank=106.5,q75/q25=12561.05 train_time:324535ms step_avg:73.76ms +[2025-09-02 05:24:49] [Rank 0] PRINT: step:4400/10000 val_loss:4.2145 svd_entropy: attn_qk:H=0.7100,top10E=0.31,eRank=120.8,q75/q25=93.94 attn_vo:H=0.7900,top10E=0.20,eRank=220.4,q75/q25=98.27 mlp_w1:H=0.7345,top10E=0.33,eRank=155.4,q75/q25=15.88 mlp_w2:H=0.8386,top10E=0.15,eRank=270.0,q75/q25=38.43 vo_prod:H=0.6920,top10E=0.30,eRank=106.5,q75/q25=12561.05 train_time:324535ms step_avg:73.76ms +[2025-09-02 05:24:49] [Rank 0] step:4401/10000 train_time:324545ms step_avg:73.74ms +[2025-09-02 05:24:49] [Rank 0] step:4401/10000 train_time:324545ms step_avg:73.74ms +[2025-09-02 05:24:51] [Rank 0] step:4421/10000 train_time:325941ms step_avg:73.73ms +[2025-09-02 05:24:51] [Rank 0] step:4421/10000 train_time:325941ms step_avg:73.73ms +[2025-09-02 05:24:52] [Rank 0] step:4441/10000 train_time:327469ms step_avg:73.74ms +[2025-09-02 05:24:52] [Rank 0] step:4441/10000 train_time:327469ms step_avg:73.74ms +[2025-09-02 05:24:54] [Rank 0] step:4461/10000 train_time:329005ms step_avg:73.75ms +[2025-09-02 05:24:54] [Rank 0] step:4461/10000 train_time:329005ms step_avg:73.75ms +[2025-09-02 05:24:56] [Rank 0] step:4481/10000 train_time:330544ms step_avg:73.77ms +[2025-09-02 05:24:56] [Rank 0] step:4481/10000 train_time:330544ms step_avg:73.77ms +[2025-09-02 05:24:57] [Rank 0] step:4501/10000 train_time:332081ms step_avg:73.78ms +[2025-09-02 05:24:57] [Rank 0] step:4501/10000 train_time:332081ms step_avg:73.78ms +[2025-09-02 05:24:59] [Rank 0] step:4521/10000 train_time:333616ms step_avg:73.79ms +[2025-09-02 05:24:59] [Rank 0] step:4521/10000 train_time:333616ms step_avg:73.79ms +[2025-09-02 05:25:00] [Rank 0] step:4541/10000 train_time:335154ms step_avg:73.81ms +[2025-09-02 05:25:00] [Rank 0] step:4541/10000 train_time:335154ms step_avg:73.81ms +[2025-09-02 05:25:02] [Rank 0] step:4561/10000 train_time:336693ms step_avg:73.82ms +[2025-09-02 05:25:02] [Rank 0] step:4561/10000 train_time:336693ms step_avg:73.82ms +[2025-09-02 05:25:03] [Rank 0] step:4581/10000 train_time:338232ms step_avg:73.83ms +[2025-09-02 05:25:03] [Rank 0] step:4581/10000 train_time:338232ms step_avg:73.83ms +[2025-09-02 05:25:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:25:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:25:16] [Rank 0] PRINT: step:4600/10000 val_loss:4.1798 svd_entropy: attn_qk:H=0.7135,top10E=0.31,eRank=123.2,q75/q25=96.13 attn_vo:H=0.7933,top10E=0.19,eRank=224.5,q75/q25=96.35 mlp_w1:H=0.7383,top10E=0.33,eRank=158.8,q75/q25=16.53 mlp_w2:H=0.8409,top10E=0.14,eRank=274.4,q75/q25=39.19 vo_prod:H=0.6958,top10E=0.30,eRank=109.2,q75/q25=12108.16 train_time:339926ms step_avg:73.90ms +[2025-09-02 05:25:16] [Rank 0] PRINT: step:4600/10000 val_loss:4.1798 svd_entropy: attn_qk:H=0.7135,top10E=0.31,eRank=123.2,q75/q25=96.13 attn_vo:H=0.7933,top10E=0.19,eRank=224.5,q75/q25=96.35 mlp_w1:H=0.7383,top10E=0.33,eRank=158.8,q75/q25=16.53 mlp_w2:H=0.8409,top10E=0.14,eRank=274.4,q75/q25=39.19 vo_prod:H=0.6958,top10E=0.30,eRank=109.2,q75/q25=12108.16 train_time:339926ms step_avg:73.90ms +[2025-09-02 05:25:16] [Rank 0] step:4601/10000 train_time:339936ms step_avg:73.88ms +[2025-09-02 05:25:16] [Rank 0] step:4601/10000 train_time:339936ms step_avg:73.88ms +[2025-09-02 05:25:18] [Rank 0] step:4621/10000 train_time:341342ms step_avg:73.87ms +[2025-09-02 05:25:18] [Rank 0] step:4621/10000 train_time:341342ms step_avg:73.87ms +[2025-09-02 05:25:20] [Rank 0] step:4641/10000 train_time:342879ms step_avg:73.88ms +[2025-09-02 05:25:20] [Rank 0] step:4641/10000 train_time:342879ms step_avg:73.88ms +[2025-09-02 05:25:21] [Rank 0] step:4661/10000 train_time:344417ms step_avg:73.89ms +[2025-09-02 05:25:21] [Rank 0] step:4661/10000 train_time:344417ms step_avg:73.89ms +[2025-09-02 05:25:23] [Rank 0] step:4681/10000 train_time:345955ms step_avg:73.91ms +[2025-09-02 05:25:23] [Rank 0] step:4681/10000 train_time:345955ms step_avg:73.91ms +[2025-09-02 05:25:24] [Rank 0] step:4701/10000 train_time:347493ms step_avg:73.92ms +[2025-09-02 05:25:24] [Rank 0] step:4701/10000 train_time:347493ms step_avg:73.92ms +[2025-09-02 05:25:26] [Rank 0] step:4721/10000 train_time:349031ms step_avg:73.93ms +[2025-09-02 05:25:26] [Rank 0] step:4721/10000 train_time:349031ms step_avg:73.93ms +[2025-09-02 05:25:27] [Rank 0] step:4741/10000 train_time:350571ms step_avg:73.94ms +[2025-09-02 05:25:27] [Rank 0] step:4741/10000 train_time:350571ms step_avg:73.94ms +[2025-09-02 05:25:29] [Rank 0] step:4761/10000 train_time:352111ms step_avg:73.96ms +[2025-09-02 05:25:29] [Rank 0] step:4761/10000 train_time:352111ms step_avg:73.96ms +[2025-09-02 05:25:30] [Rank 0] step:4781/10000 train_time:353648ms step_avg:73.97ms +[2025-09-02 05:25:30] [Rank 0] step:4781/10000 train_time:353648ms step_avg:73.97ms +[2025-09-02 05:25:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:25:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:25:43] [Rank 0] PRINT: step:4800/10000 val_loss:4.1672 svd_entropy: attn_qk:H=0.7166,top10E=0.30,eRank=125.4,q75/q25=98.55 attn_vo:H=0.7964,top10E=0.19,eRank=228.4,q75/q25=94.04 mlp_w1:H=0.7418,top10E=0.32,eRank=161.9,q75/q25=17.05 mlp_w2:H=0.8429,top10E=0.14,eRank=278.3,q75/q25=40.13 vo_prod:H=0.6995,top10E=0.29,eRank=112.0,q75/q25=11389.32 train_time:355343ms step_avg:74.03ms +[2025-09-02 05:25:43] [Rank 0] PRINT: step:4800/10000 val_loss:4.1672 svd_entropy: attn_qk:H=0.7166,top10E=0.30,eRank=125.4,q75/q25=98.55 attn_vo:H=0.7964,top10E=0.19,eRank=228.4,q75/q25=94.04 mlp_w1:H=0.7418,top10E=0.32,eRank=161.9,q75/q25=17.05 mlp_w2:H=0.8429,top10E=0.14,eRank=278.3,q75/q25=40.13 vo_prod:H=0.6995,top10E=0.29,eRank=112.0,q75/q25=11389.32 train_time:355343ms step_avg:74.03ms +[2025-09-02 05:25:43] [Rank 0] step:4801/10000 train_time:355353ms step_avg:74.02ms +[2025-09-02 05:25:43] [Rank 0] step:4801/10000 train_time:355353ms step_avg:74.02ms +[2025-09-02 05:25:45] [Rank 0] step:4821/10000 train_time:356763ms step_avg:74.00ms +[2025-09-02 05:25:45] [Rank 0] step:4821/10000 train_time:356763ms step_avg:74.00ms +[2025-09-02 05:25:47] [Rank 0] step:4841/10000 train_time:358300ms step_avg:74.01ms +[2025-09-02 05:25:47] [Rank 0] step:4841/10000 train_time:358300ms step_avg:74.01ms +[2025-09-02 05:25:48] [Rank 0] step:4861/10000 train_time:359841ms step_avg:74.03ms +[2025-09-02 05:25:48] [Rank 0] step:4861/10000 train_time:359841ms step_avg:74.03ms +[2025-09-02 05:25:50] [Rank 0] step:4881/10000 train_time:361378ms step_avg:74.04ms +[2025-09-02 05:25:50] [Rank 0] step:4881/10000 train_time:361378ms step_avg:74.04ms +[2025-09-02 05:25:51] [Rank 0] step:4901/10000 train_time:362916ms step_avg:74.05ms +[2025-09-02 05:25:51] [Rank 0] step:4901/10000 train_time:362916ms step_avg:74.05ms +[2025-09-02 05:25:53] [Rank 0] step:4921/10000 train_time:364459ms step_avg:74.06ms +[2025-09-02 05:25:53] [Rank 0] step:4921/10000 train_time:364459ms step_avg:74.06ms +[2025-09-02 05:25:54] [Rank 0] step:4941/10000 train_time:366002ms step_avg:74.07ms +[2025-09-02 05:25:54] [Rank 0] step:4941/10000 train_time:366002ms step_avg:74.07ms +[2025-09-02 05:25:56] [Rank 0] step:4961/10000 train_time:367540ms step_avg:74.09ms +[2025-09-02 05:25:56] [Rank 0] step:4961/10000 train_time:367540ms step_avg:74.09ms +[2025-09-02 05:25:57] [Rank 0] step:4981/10000 train_time:369082ms step_avg:74.10ms +[2025-09-02 05:25:57] [Rank 0] step:4981/10000 train_time:369082ms step_avg:74.10ms +[2025-09-02 05:25:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:25:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:26:11] [Rank 0] PRINT: step:5000/10000 val_loss:4.1422 svd_entropy: attn_qk:H=0.7195,top10E=0.30,eRank=127.6,q75/q25=99.90 attn_vo:H=0.7993,top10E=0.18,eRank=232.1,q75/q25=91.65 mlp_w1:H=0.7452,top10E=0.32,eRank=165.0,q75/q25=17.72 mlp_w2:H=0.8448,top10E=0.14,eRank=282.1,q75/q25=40.75 vo_prod:H=0.7027,top10E=0.29,eRank=114.4,q75/q25=11007.15 train_time:370777ms step_avg:74.16ms +[2025-09-02 05:26:11] [Rank 0] PRINT: step:5000/10000 val_loss:4.1422 svd_entropy: attn_qk:H=0.7195,top10E=0.30,eRank=127.6,q75/q25=99.90 attn_vo:H=0.7993,top10E=0.18,eRank=232.1,q75/q25=91.65 mlp_w1:H=0.7452,top10E=0.32,eRank=165.0,q75/q25=17.72 mlp_w2:H=0.8448,top10E=0.14,eRank=282.1,q75/q25=40.75 vo_prod:H=0.7027,top10E=0.29,eRank=114.4,q75/q25=11007.15 train_time:370777ms step_avg:74.16ms +[2025-09-02 05:26:11] [Rank 0] step:5001/10000 train_time:370787ms step_avg:74.14ms +[2025-09-02 05:26:11] [Rank 0] step:5001/10000 train_time:370787ms step_avg:74.14ms +[2025-09-02 05:26:12] [Rank 0] step:5021/10000 train_time:372185ms step_avg:74.13ms +[2025-09-02 05:26:12] [Rank 0] step:5021/10000 train_time:372185ms step_avg:74.13ms +[2025-09-02 05:26:14] [Rank 0] step:5041/10000 train_time:373723ms step_avg:74.14ms +[2025-09-02 05:26:14] [Rank 0] step:5041/10000 train_time:373723ms step_avg:74.14ms +[2025-09-02 05:26:15] [Rank 0] step:5061/10000 train_time:375261ms step_avg:74.15ms +[2025-09-02 05:26:15] [Rank 0] step:5061/10000 train_time:375261ms step_avg:74.15ms +[2025-09-02 05:26:17] [Rank 0] step:5081/10000 train_time:376799ms step_avg:74.16ms +[2025-09-02 05:26:17] [Rank 0] step:5081/10000 train_time:376799ms step_avg:74.16ms +[2025-09-02 05:26:18] [Rank 0] step:5101/10000 train_time:378338ms step_avg:74.17ms +[2025-09-02 05:26:18] [Rank 0] step:5101/10000 train_time:378338ms step_avg:74.17ms +[2025-09-02 05:26:20] [Rank 0] step:5121/10000 train_time:379878ms step_avg:74.18ms +[2025-09-02 05:26:20] [Rank 0] step:5121/10000 train_time:379878ms step_avg:74.18ms +[2025-09-02 05:26:21] [Rank 0] step:5141/10000 train_time:381422ms step_avg:74.19ms +[2025-09-02 05:26:21] [Rank 0] step:5141/10000 train_time:381422ms step_avg:74.19ms +[2025-09-02 05:26:23] [Rank 0] step:5161/10000 train_time:382963ms step_avg:74.20ms +[2025-09-02 05:26:23] [Rank 0] step:5161/10000 train_time:382963ms step_avg:74.20ms +[2025-09-02 05:26:24] [Rank 0] step:5181/10000 train_time:384505ms step_avg:74.21ms +[2025-09-02 05:26:24] [Rank 0] step:5181/10000 train_time:384505ms step_avg:74.21ms +[2025-09-02 05:26:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:26:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:26:38] [Rank 0] PRINT: step:5200/10000 val_loss:4.1206 svd_entropy: attn_qk:H=0.7222,top10E=0.29,eRank=129.6,q75/q25=101.78 attn_vo:H=0.8021,top10E=0.18,eRank=235.7,q75/q25=89.87 mlp_w1:H=0.7484,top10E=0.31,eRank=168.1,q75/q25=18.35 mlp_w2:H=0.8466,top10E=0.14,eRank=285.7,q75/q25=41.61 vo_prod:H=0.7059,top10E=0.28,eRank=116.9,q75/q25=10316.28 train_time:386227ms step_avg:74.27ms +[2025-09-02 05:26:38] [Rank 0] PRINT: step:5200/10000 val_loss:4.1206 svd_entropy: attn_qk:H=0.7222,top10E=0.29,eRank=129.6,q75/q25=101.78 attn_vo:H=0.8021,top10E=0.18,eRank=235.7,q75/q25=89.87 mlp_w1:H=0.7484,top10E=0.31,eRank=168.1,q75/q25=18.35 mlp_w2:H=0.8466,top10E=0.14,eRank=285.7,q75/q25=41.61 vo_prod:H=0.7059,top10E=0.28,eRank=116.9,q75/q25=10316.28 train_time:386227ms step_avg:74.27ms +[2025-09-02 05:26:38] [Rank 0] step:5201/10000 train_time:386237ms step_avg:74.26ms +[2025-09-02 05:26:38] [Rank 0] step:5201/10000 train_time:386237ms step_avg:74.26ms +[2025-09-02 05:26:39] [Rank 0] step:5221/10000 train_time:387656ms step_avg:74.25ms +[2025-09-02 05:26:39] [Rank 0] step:5221/10000 train_time:387656ms step_avg:74.25ms +[2025-09-02 05:26:41] [Rank 0] step:5241/10000 train_time:389225ms step_avg:74.27ms +[2025-09-02 05:26:41] [Rank 0] step:5241/10000 train_time:389225ms step_avg:74.27ms +[2025-09-02 05:26:42] [Rank 0] step:5261/10000 train_time:390797ms step_avg:74.28ms +[2025-09-02 05:26:42] [Rank 0] step:5261/10000 train_time:390797ms step_avg:74.28ms +[2025-09-02 05:26:44] [Rank 0] step:5281/10000 train_time:392368ms step_avg:74.30ms +[2025-09-02 05:26:44] [Rank 0] step:5281/10000 train_time:392368ms step_avg:74.30ms +[2025-09-02 05:26:46] [Rank 0] step:5301/10000 train_time:393947ms step_avg:74.32ms +[2025-09-02 05:26:46] [Rank 0] step:5301/10000 train_time:393947ms step_avg:74.32ms +[2025-09-02 05:26:47] [Rank 0] step:5321/10000 train_time:395515ms step_avg:74.33ms +[2025-09-02 05:26:47] [Rank 0] step:5321/10000 train_time:395515ms step_avg:74.33ms +[2025-09-02 05:26:49] [Rank 0] step:5341/10000 train_time:397145ms step_avg:74.36ms +[2025-09-02 05:26:49] [Rank 0] step:5341/10000 train_time:397145ms step_avg:74.36ms +[2025-09-02 05:26:50] [Rank 0] step:5361/10000 train_time:398719ms step_avg:74.37ms +[2025-09-02 05:26:50] [Rank 0] step:5361/10000 train_time:398719ms step_avg:74.37ms +[2025-09-02 05:26:52] [Rank 0] step:5381/10000 train_time:400292ms step_avg:74.39ms +[2025-09-02 05:26:52] [Rank 0] step:5381/10000 train_time:400292ms step_avg:74.39ms +[2025-09-02 05:26:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:26:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:27:05] [Rank 0] PRINT: step:5400/10000 val_loss:4.1015 svd_entropy: attn_qk:H=0.7248,top10E=0.29,eRank=131.5,q75/q25=102.95 attn_vo:H=0.8046,top10E=0.18,eRank=239.1,q75/q25=87.21 mlp_w1:H=0.7516,top10E=0.31,eRank=171.1,q75/q25=19.02 mlp_w2:H=0.8482,top10E=0.14,eRank=288.9,q75/q25=42.52 vo_prod:H=0.7087,top10E=0.28,eRank=119.1,q75/q25=9756.76 train_time:402021ms step_avg:74.45ms +[2025-09-02 05:27:05] [Rank 0] PRINT: step:5400/10000 val_loss:4.1015 svd_entropy: attn_qk:H=0.7248,top10E=0.29,eRank=131.5,q75/q25=102.95 attn_vo:H=0.8046,top10E=0.18,eRank=239.1,q75/q25=87.21 mlp_w1:H=0.7516,top10E=0.31,eRank=171.1,q75/q25=19.02 mlp_w2:H=0.8482,top10E=0.14,eRank=288.9,q75/q25=42.52 vo_prod:H=0.7087,top10E=0.28,eRank=119.1,q75/q25=9756.76 train_time:402021ms step_avg:74.45ms +[2025-09-02 05:27:05] [Rank 0] step:5401/10000 train_time:402031ms step_avg:74.44ms +[2025-09-02 05:27:05] [Rank 0] step:5401/10000 train_time:402031ms step_avg:74.44ms +[2025-09-02 05:27:07] [Rank 0] step:5421/10000 train_time:403455ms step_avg:74.42ms +[2025-09-02 05:27:07] [Rank 0] step:5421/10000 train_time:403455ms step_avg:74.42ms +[2025-09-02 05:27:08] [Rank 0] step:5441/10000 train_time:405019ms step_avg:74.44ms +[2025-09-02 05:27:08] [Rank 0] step:5441/10000 train_time:405019ms step_avg:74.44ms +[2025-09-02 05:27:10] [Rank 0] step:5461/10000 train_time:406589ms step_avg:74.45ms +[2025-09-02 05:27:10] [Rank 0] step:5461/10000 train_time:406589ms step_avg:74.45ms +[2025-09-02 05:27:11] [Rank 0] step:5481/10000 train_time:408161ms step_avg:74.47ms +[2025-09-02 05:27:11] [Rank 0] step:5481/10000 train_time:408161ms step_avg:74.47ms +[2025-09-02 05:27:13] [Rank 0] step:5501/10000 train_time:409737ms step_avg:74.48ms +[2025-09-02 05:27:13] [Rank 0] step:5501/10000 train_time:409737ms step_avg:74.48ms +[2025-09-02 05:27:15] [Rank 0] step:5521/10000 train_time:411314ms step_avg:74.50ms +[2025-09-02 05:27:15] [Rank 0] step:5521/10000 train_time:411314ms step_avg:74.50ms +[2025-09-02 05:27:16] [Rank 0] step:5541/10000 train_time:412884ms step_avg:74.51ms +[2025-09-02 05:27:16] [Rank 0] step:5541/10000 train_time:412884ms step_avg:74.51ms +[2025-09-02 05:27:18] [Rank 0] step:5561/10000 train_time:414455ms step_avg:74.53ms +[2025-09-02 05:27:18] [Rank 0] step:5561/10000 train_time:414455ms step_avg:74.53ms +[2025-09-02 05:27:19] [Rank 0] step:5581/10000 train_time:416030ms step_avg:74.54ms +[2025-09-02 05:27:19] [Rank 0] step:5581/10000 train_time:416030ms step_avg:74.54ms +[2025-09-02 05:27:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:27:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:27:32] [Rank 0] PRINT: step:5600/10000 val_loss:4.0898 svd_entropy: attn_qk:H=0.7272,top10E=0.29,eRank=133.4,q75/q25=104.09 attn_vo:H=0.8070,top10E=0.18,eRank=242.3,q75/q25=85.05 mlp_w1:H=0.7544,top10E=0.30,eRank=173.9,q75/q25=19.57 mlp_w2:H=0.8498,top10E=0.13,eRank=292.1,q75/q25=43.16 vo_prod:H=0.7115,top10E=0.28,eRank=121.4,q75/q25=9095.34 train_time:417757ms step_avg:74.60ms +[2025-09-02 05:27:32] [Rank 0] PRINT: step:5600/10000 val_loss:4.0898 svd_entropy: attn_qk:H=0.7272,top10E=0.29,eRank=133.4,q75/q25=104.09 attn_vo:H=0.8070,top10E=0.18,eRank=242.3,q75/q25=85.05 mlp_w1:H=0.7544,top10E=0.30,eRank=173.9,q75/q25=19.57 mlp_w2:H=0.8498,top10E=0.13,eRank=292.1,q75/q25=43.16 vo_prod:H=0.7115,top10E=0.28,eRank=121.4,q75/q25=9095.34 train_time:417757ms step_avg:74.60ms +[2025-09-02 05:27:33] [Rank 0] step:5601/10000 train_time:417768ms step_avg:74.59ms +[2025-09-02 05:27:33] [Rank 0] step:5601/10000 train_time:417768ms step_avg:74.59ms +[2025-09-02 05:27:34] [Rank 0] step:5621/10000 train_time:419192ms step_avg:74.58ms +[2025-09-02 05:27:34] [Rank 0] step:5621/10000 train_time:419192ms step_avg:74.58ms +[2025-09-02 05:27:36] [Rank 0] step:5641/10000 train_time:420763ms step_avg:74.59ms +[2025-09-02 05:27:36] [Rank 0] step:5641/10000 train_time:420763ms step_avg:74.59ms +[2025-09-02 05:27:37] [Rank 0] step:5661/10000 train_time:422331ms step_avg:74.60ms +[2025-09-02 05:27:37] [Rank 0] step:5661/10000 train_time:422331ms step_avg:74.60ms +[2025-09-02 05:27:39] [Rank 0] step:5681/10000 train_time:423906ms step_avg:74.62ms +[2025-09-02 05:27:39] [Rank 0] step:5681/10000 train_time:423906ms step_avg:74.62ms +[2025-09-02 05:27:40] [Rank 0] step:5701/10000 train_time:425473ms step_avg:74.63ms +[2025-09-02 05:27:40] [Rank 0] step:5701/10000 train_time:425473ms step_avg:74.63ms +[2025-09-02 05:27:42] [Rank 0] step:5721/10000 train_time:427048ms step_avg:74.65ms +[2025-09-02 05:27:42] [Rank 0] step:5721/10000 train_time:427048ms step_avg:74.65ms +[2025-09-02 05:27:44] [Rank 0] step:5741/10000 train_time:428618ms step_avg:74.66ms +[2025-09-02 05:27:44] [Rank 0] step:5741/10000 train_time:428618ms step_avg:74.66ms +[2025-09-02 05:27:45] [Rank 0] step:5761/10000 train_time:430190ms step_avg:74.67ms +[2025-09-02 05:27:45] [Rank 0] step:5761/10000 train_time:430190ms step_avg:74.67ms +[2025-09-02 05:27:47] [Rank 0] step:5781/10000 train_time:431763ms step_avg:74.69ms +[2025-09-02 05:27:47] [Rank 0] step:5781/10000 train_time:431763ms step_avg:74.69ms +[2025-09-02 05:27:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:27:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:28:00] [Rank 0] PRINT: step:5800/10000 val_loss:4.0791 svd_entropy: attn_qk:H=0.7296,top10E=0.28,eRank=135.3,q75/q25=105.48 attn_vo:H=0.8093,top10E=0.17,eRank=245.4,q75/q25=82.95 mlp_w1:H=0.7572,top10E=0.30,eRank=176.8,q75/q25=20.10 mlp_w2:H=0.8513,top10E=0.13,eRank=295.2,q75/q25=43.71 vo_prod:H=0.7142,top10E=0.27,eRank=123.6,q75/q25=8660.99 train_time:433493ms step_avg:74.74ms +[2025-09-02 05:28:00] [Rank 0] PRINT: step:5800/10000 val_loss:4.0791 svd_entropy: attn_qk:H=0.7296,top10E=0.28,eRank=135.3,q75/q25=105.48 attn_vo:H=0.8093,top10E=0.17,eRank=245.4,q75/q25=82.95 mlp_w1:H=0.7572,top10E=0.30,eRank=176.8,q75/q25=20.10 mlp_w2:H=0.8513,top10E=0.13,eRank=295.2,q75/q25=43.71 vo_prod:H=0.7142,top10E=0.27,eRank=123.6,q75/q25=8660.99 train_time:433493ms step_avg:74.74ms +[2025-09-02 05:28:00] [Rank 0] step:5801/10000 train_time:433504ms step_avg:74.73ms +[2025-09-02 05:28:00] [Rank 0] step:5801/10000 train_time:433504ms step_avg:74.73ms +[2025-09-02 05:28:01] [Rank 0] step:5821/10000 train_time:434932ms step_avg:74.72ms +[2025-09-02 05:28:01] [Rank 0] step:5821/10000 train_time:434932ms step_avg:74.72ms +[2025-09-02 05:28:03] [Rank 0] step:5841/10000 train_time:436501ms step_avg:74.73ms +[2025-09-02 05:28:03] [Rank 0] step:5841/10000 train_time:436501ms step_avg:74.73ms +[2025-09-02 05:28:04] [Rank 0] step:5861/10000 train_time:438073ms step_avg:74.74ms +[2025-09-02 05:28:04] [Rank 0] step:5861/10000 train_time:438073ms step_avg:74.74ms +[2025-09-02 05:28:06] [Rank 0] step:5881/10000 train_time:439647ms step_avg:74.76ms +[2025-09-02 05:28:06] [Rank 0] step:5881/10000 train_time:439647ms step_avg:74.76ms +[2025-09-02 05:28:08] [Rank 0] step:5901/10000 train_time:441219ms step_avg:74.77ms +[2025-09-02 05:28:08] [Rank 0] step:5901/10000 train_time:441219ms step_avg:74.77ms +[2025-09-02 05:28:09] [Rank 0] step:5921/10000 train_time:442792ms step_avg:74.78ms +[2025-09-02 05:28:09] [Rank 0] step:5921/10000 train_time:442792ms step_avg:74.78ms +[2025-09-02 05:28:11] [Rank 0] step:5941/10000 train_time:444368ms step_avg:74.80ms +[2025-09-02 05:28:11] [Rank 0] step:5941/10000 train_time:444368ms step_avg:74.80ms +[2025-09-02 05:28:12] [Rank 0] step:5961/10000 train_time:445948ms step_avg:74.81ms +[2025-09-02 05:28:12] [Rank 0] step:5961/10000 train_time:445948ms step_avg:74.81ms +[2025-09-02 05:28:14] [Rank 0] step:5981/10000 train_time:447523ms step_avg:74.82ms +[2025-09-02 05:28:14] [Rank 0] step:5981/10000 train_time:447523ms step_avg:74.82ms +[2025-09-02 05:28:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:28:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:28:27] [Rank 0] PRINT: step:6000/10000 val_loss:4.0532 svd_entropy: attn_qk:H=0.7319,top10E=0.28,eRank=137.2,q75/q25=106.14 attn_vo:H=0.8115,top10E=0.17,eRank=248.5,q75/q25=80.81 mlp_w1:H=0.7601,top10E=0.30,eRank=179.6,q75/q25=20.54 mlp_w2:H=0.8529,top10E=0.13,eRank=298.5,q75/q25=43.76 vo_prod:H=0.7167,top10E=0.27,eRank=125.7,q75/q25=8041.11 train_time:449257ms step_avg:74.88ms +[2025-09-02 05:28:27] [Rank 0] PRINT: step:6000/10000 val_loss:4.0532 svd_entropy: attn_qk:H=0.7319,top10E=0.28,eRank=137.2,q75/q25=106.14 attn_vo:H=0.8115,top10E=0.17,eRank=248.5,q75/q25=80.81 mlp_w1:H=0.7601,top10E=0.30,eRank=179.6,q75/q25=20.54 mlp_w2:H=0.8529,top10E=0.13,eRank=298.5,q75/q25=43.76 vo_prod:H=0.7167,top10E=0.27,eRank=125.7,q75/q25=8041.11 train_time:449257ms step_avg:74.88ms +[2025-09-02 05:28:27] [Rank 0] step:6001/10000 train_time:449267ms step_avg:74.87ms +[2025-09-02 05:28:27] [Rank 0] step:6001/10000 train_time:449267ms step_avg:74.87ms +[2025-09-02 05:28:29] [Rank 0] step:6021/10000 train_time:450687ms step_avg:74.85ms +[2025-09-02 05:28:29] [Rank 0] step:6021/10000 train_time:450687ms step_avg:74.85ms +[2025-09-02 05:28:30] [Rank 0] step:6041/10000 train_time:452261ms step_avg:74.87ms +[2025-09-02 05:28:30] [Rank 0] step:6041/10000 train_time:452261ms step_avg:74.87ms +[2025-09-02 05:28:32] [Rank 0] step:6061/10000 train_time:453841ms step_avg:74.88ms +[2025-09-02 05:28:32] [Rank 0] step:6061/10000 train_time:453841ms step_avg:74.88ms +[2025-09-02 05:28:34] [Rank 0] step:6081/10000 train_time:455419ms step_avg:74.89ms +[2025-09-02 05:28:34] [Rank 0] step:6081/10000 train_time:455419ms step_avg:74.89ms +[2025-09-02 05:28:35] [Rank 0] step:6101/10000 train_time:456998ms step_avg:74.91ms +[2025-09-02 05:28:35] [Rank 0] step:6101/10000 train_time:456998ms step_avg:74.91ms +[2025-09-02 05:28:37] [Rank 0] step:6121/10000 train_time:458837ms step_avg:74.96ms +[2025-09-02 05:28:37] [Rank 0] step:6121/10000 train_time:458837ms step_avg:74.96ms +[2025-09-02 05:28:39] [Rank 0] step:6141/10000 train_time:460422ms step_avg:74.98ms +[2025-09-02 05:28:39] [Rank 0] step:6141/10000 train_time:460422ms step_avg:74.98ms +[2025-09-02 05:28:40] [Rank 0] step:6161/10000 train_time:461998ms step_avg:74.99ms +[2025-09-02 05:28:40] [Rank 0] step:6161/10000 train_time:461998ms step_avg:74.99ms +[2025-09-02 05:28:42] [Rank 0] step:6181/10000 train_time:463574ms step_avg:75.00ms +[2025-09-02 05:28:42] [Rank 0] step:6181/10000 train_time:463574ms step_avg:75.00ms +[2025-09-02 05:28:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:28:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:28:55] [Rank 0] PRINT: step:6200/10000 val_loss:4.0381 svd_entropy: attn_qk:H=0.7341,top10E=0.28,eRank=139.0,q75/q25=107.20 attn_vo:H=0.8136,top10E=0.17,eRank=251.4,q75/q25=78.55 mlp_w1:H=0.7626,top10E=0.29,eRank=182.3,q75/q25=20.93 mlp_w2:H=0.8544,top10E=0.13,eRank=301.6,q75/q25=43.93 vo_prod:H=0.7192,top10E=0.27,eRank=127.8,q75/q25=7544.16 train_time:465311ms step_avg:75.05ms +[2025-09-02 05:28:55] [Rank 0] PRINT: step:6200/10000 val_loss:4.0381 svd_entropy: attn_qk:H=0.7341,top10E=0.28,eRank=139.0,q75/q25=107.20 attn_vo:H=0.8136,top10E=0.17,eRank=251.4,q75/q25=78.55 mlp_w1:H=0.7626,top10E=0.29,eRank=182.3,q75/q25=20.93 mlp_w2:H=0.8544,top10E=0.13,eRank=301.6,q75/q25=43.93 vo_prod:H=0.7192,top10E=0.27,eRank=127.8,q75/q25=7544.16 train_time:465311ms step_avg:75.05ms +[2025-09-02 05:28:55] [Rank 0] step:6201/10000 train_time:465322ms step_avg:75.04ms +[2025-09-02 05:28:55] [Rank 0] step:6201/10000 train_time:465322ms step_avg:75.04ms +[2025-09-02 05:28:57] [Rank 0] step:6221/10000 train_time:466757ms step_avg:75.03ms +[2025-09-02 05:28:57] [Rank 0] step:6221/10000 train_time:466757ms step_avg:75.03ms +[2025-09-02 05:28:58] [Rank 0] step:6241/10000 train_time:468328ms step_avg:75.04ms +[2025-09-02 05:28:58] [Rank 0] step:6241/10000 train_time:468328ms step_avg:75.04ms +[2025-09-02 05:29:00] [Rank 0] step:6261/10000 train_time:469902ms step_avg:75.05ms +[2025-09-02 05:29:00] [Rank 0] step:6261/10000 train_time:469902ms step_avg:75.05ms +[2025-09-02 05:29:01] [Rank 0] step:6281/10000 train_time:471481ms step_avg:75.06ms +[2025-09-02 05:29:01] [Rank 0] step:6281/10000 train_time:471481ms step_avg:75.06ms +[2025-09-02 05:29:03] [Rank 0] step:6301/10000 train_time:473061ms step_avg:75.08ms +[2025-09-02 05:29:03] [Rank 0] step:6301/10000 train_time:473061ms step_avg:75.08ms +[2025-09-02 05:29:04] [Rank 0] step:6321/10000 train_time:474635ms step_avg:75.09ms +[2025-09-02 05:29:04] [Rank 0] step:6321/10000 train_time:474635ms step_avg:75.09ms +[2025-09-02 05:29:06] [Rank 0] step:6341/10000 train_time:476213ms step_avg:75.10ms +[2025-09-02 05:29:06] [Rank 0] step:6341/10000 train_time:476213ms step_avg:75.10ms +[2025-09-02 05:29:08] [Rank 0] step:6361/10000 train_time:477792ms step_avg:75.11ms +[2025-09-02 05:29:08] [Rank 0] step:6361/10000 train_time:477792ms step_avg:75.11ms +[2025-09-02 05:29:09] [Rank 0] step:6381/10000 train_time:479375ms step_avg:75.13ms +[2025-09-02 05:29:09] [Rank 0] step:6381/10000 train_time:479375ms step_avg:75.13ms +[2025-09-02 05:29:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:29:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:29:22] [Rank 0] PRINT: step:6400/10000 val_loss:4.0209 svd_entropy: attn_qk:H=0.7361,top10E=0.28,eRank=140.6,q75/q25=107.45 attn_vo:H=0.8154,top10E=0.17,eRank=254.0,q75/q25=76.68 mlp_w1:H=0.7648,top10E=0.29,eRank=184.7,q75/q25=21.28 mlp_w2:H=0.8556,top10E=0.13,eRank=304.2,q75/q25=44.44 vo_prod:H=0.7215,top10E=0.26,eRank=129.8,q75/q25=7037.35 train_time:481109ms step_avg:75.17ms +[2025-09-02 05:29:22] [Rank 0] PRINT: step:6400/10000 val_loss:4.0209 svd_entropy: attn_qk:H=0.7361,top10E=0.28,eRank=140.6,q75/q25=107.45 attn_vo:H=0.8154,top10E=0.17,eRank=254.0,q75/q25=76.68 mlp_w1:H=0.7648,top10E=0.29,eRank=184.7,q75/q25=21.28 mlp_w2:H=0.8556,top10E=0.13,eRank=304.2,q75/q25=44.44 vo_prod:H=0.7215,top10E=0.26,eRank=129.8,q75/q25=7037.35 train_time:481109ms step_avg:75.17ms +[2025-09-02 05:29:22] [Rank 0] step:6401/10000 train_time:481120ms step_avg:75.16ms +[2025-09-02 05:29:22] [Rank 0] step:6401/10000 train_time:481120ms step_avg:75.16ms +[2025-09-02 05:29:24] [Rank 0] step:6421/10000 train_time:482557ms step_avg:75.15ms +[2025-09-02 05:29:24] [Rank 0] step:6421/10000 train_time:482557ms step_avg:75.15ms +[2025-09-02 05:29:26] [Rank 0] step:6441/10000 train_time:484131ms step_avg:75.16ms +[2025-09-02 05:29:26] [Rank 0] step:6441/10000 train_time:484131ms step_avg:75.16ms +[2025-09-02 05:29:27] [Rank 0] step:6461/10000 train_time:485709ms step_avg:75.18ms +[2025-09-02 05:29:27] [Rank 0] step:6461/10000 train_time:485709ms step_avg:75.18ms +[2025-09-02 05:29:29] [Rank 0] step:6481/10000 train_time:487294ms step_avg:75.19ms +[2025-09-02 05:29:29] [Rank 0] step:6481/10000 train_time:487294ms step_avg:75.19ms +[2025-09-02 05:29:30] [Rank 0] step:6501/10000 train_time:488868ms step_avg:75.20ms +[2025-09-02 05:29:30] [Rank 0] step:6501/10000 train_time:488868ms step_avg:75.20ms +[2025-09-02 05:29:32] [Rank 0] step:6521/10000 train_time:490440ms step_avg:75.21ms +[2025-09-02 05:29:32] [Rank 0] step:6521/10000 train_time:490440ms step_avg:75.21ms +[2025-09-02 05:29:33] [Rank 0] step:6541/10000 train_time:492016ms step_avg:75.22ms +[2025-09-02 05:29:33] [Rank 0] step:6541/10000 train_time:492016ms step_avg:75.22ms +[2025-09-02 05:29:35] [Rank 0] step:6561/10000 train_time:493596ms step_avg:75.23ms +[2025-09-02 05:29:35] [Rank 0] step:6561/10000 train_time:493596ms step_avg:75.23ms +[2025-09-02 05:29:37] [Rank 0] step:6581/10000 train_time:495170ms step_avg:75.24ms +[2025-09-02 05:29:37] [Rank 0] step:6581/10000 train_time:495170ms step_avg:75.24ms +[2025-09-02 05:29:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:29:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:29:50] [Rank 0] PRINT: step:6600/10000 val_loss:4.0070 svd_entropy: attn_qk:H=0.7378,top10E=0.27,eRank=142.1,q75/q25=108.23 attn_vo:H=0.8172,top10E=0.17,eRank=256.4,q75/q25=74.98 mlp_w1:H=0.7669,top10E=0.29,eRank=187.0,q75/q25=21.66 mlp_w2:H=0.8567,top10E=0.13,eRank=306.7,q75/q25=44.65 vo_prod:H=0.7236,top10E=0.26,eRank=131.7,q75/q25=6696.20 train_time:496903ms step_avg:75.29ms +[2025-09-02 05:29:50] [Rank 0] PRINT: step:6600/10000 val_loss:4.0070 svd_entropy: attn_qk:H=0.7378,top10E=0.27,eRank=142.1,q75/q25=108.23 attn_vo:H=0.8172,top10E=0.17,eRank=256.4,q75/q25=74.98 mlp_w1:H=0.7669,top10E=0.29,eRank=187.0,q75/q25=21.66 mlp_w2:H=0.8567,top10E=0.13,eRank=306.7,q75/q25=44.65 vo_prod:H=0.7236,top10E=0.26,eRank=131.7,q75/q25=6696.20 train_time:496903ms step_avg:75.29ms +[2025-09-02 05:29:50] [Rank 0] step:6601/10000 train_time:496914ms step_avg:75.28ms +[2025-09-02 05:29:50] [Rank 0] step:6601/10000 train_time:496914ms step_avg:75.28ms +[2025-09-02 05:29:51] [Rank 0] step:6621/10000 train_time:498344ms step_avg:75.27ms +[2025-09-02 05:29:51] [Rank 0] step:6621/10000 train_time:498344ms step_avg:75.27ms +[2025-09-02 05:29:53] [Rank 0] step:6641/10000 train_time:499924ms step_avg:75.28ms +[2025-09-02 05:29:53] [Rank 0] step:6641/10000 train_time:499924ms step_avg:75.28ms +[2025-09-02 05:29:55] [Rank 0] step:6661/10000 train_time:501498ms step_avg:75.29ms +[2025-09-02 05:29:55] [Rank 0] step:6661/10000 train_time:501498ms step_avg:75.29ms +[2025-09-02 05:29:56] [Rank 0] step:6681/10000 train_time:503092ms step_avg:75.30ms +[2025-09-02 05:29:56] [Rank 0] step:6681/10000 train_time:503092ms step_avg:75.30ms +[2025-09-02 05:29:58] [Rank 0] step:6701/10000 train_time:504704ms step_avg:75.32ms +[2025-09-02 05:29:58] [Rank 0] step:6701/10000 train_time:504704ms step_avg:75.32ms +[2025-09-02 05:29:59] [Rank 0] step:6721/10000 train_time:506308ms step_avg:75.33ms +[2025-09-02 05:29:59] [Rank 0] step:6721/10000 train_time:506308ms step_avg:75.33ms +[2025-09-02 05:30:01] [Rank 0] step:6741/10000 train_time:507910ms step_avg:75.35ms +[2025-09-02 05:30:01] [Rank 0] step:6741/10000 train_time:507910ms step_avg:75.35ms +[2025-09-02 05:30:03] [Rank 0] step:6761/10000 train_time:509613ms step_avg:75.38ms +[2025-09-02 05:30:03] [Rank 0] step:6761/10000 train_time:509613ms step_avg:75.38ms +[2025-09-02 05:30:04] [Rank 0] step:6781/10000 train_time:511223ms step_avg:75.39ms +[2025-09-02 05:30:04] [Rank 0] step:6781/10000 train_time:511223ms step_avg:75.39ms +[2025-09-02 05:30:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:30:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:30:17] [Rank 0] PRINT: step:6800/10000 val_loss:3.9913 svd_entropy: attn_qk:H=0.7395,top10E=0.27,eRank=143.5,q75/q25=108.76 attn_vo:H=0.8187,top10E=0.16,eRank=258.6,q75/q25=73.48 mlp_w1:H=0.7689,top10E=0.28,eRank=189.2,q75/q25=21.95 mlp_w2:H=0.8578,top10E=0.13,eRank=309.0,q75/q25=45.23 vo_prod:H=0.7255,top10E=0.26,eRank=133.3,q75/q25=6269.04 train_time:513094ms step_avg:75.46ms +[2025-09-02 05:30:17] [Rank 0] PRINT: step:6800/10000 val_loss:3.9913 svd_entropy: attn_qk:H=0.7395,top10E=0.27,eRank=143.5,q75/q25=108.76 attn_vo:H=0.8187,top10E=0.16,eRank=258.6,q75/q25=73.48 mlp_w1:H=0.7689,top10E=0.28,eRank=189.2,q75/q25=21.95 mlp_w2:H=0.8578,top10E=0.13,eRank=309.0,q75/q25=45.23 vo_prod:H=0.7255,top10E=0.26,eRank=133.3,q75/q25=6269.04 train_time:513094ms step_avg:75.46ms +[2025-09-02 05:30:18] [Rank 0] step:6801/10000 train_time:513106ms step_avg:75.45ms +[2025-09-02 05:30:18] [Rank 0] step:6801/10000 train_time:513106ms step_avg:75.45ms +[2025-09-02 05:30:19] [Rank 0] step:6821/10000 train_time:514658ms step_avg:75.45ms +[2025-09-02 05:30:19] [Rank 0] step:6821/10000 train_time:514658ms step_avg:75.45ms +[2025-09-02 05:30:21] [Rank 0] step:6841/10000 train_time:516257ms step_avg:75.47ms +[2025-09-02 05:30:21] [Rank 0] step:6841/10000 train_time:516257ms step_avg:75.47ms +[2025-09-02 05:30:23] [Rank 0] step:6861/10000 train_time:517964ms step_avg:75.49ms +[2025-09-02 05:30:23] [Rank 0] step:6861/10000 train_time:517964ms step_avg:75.49ms +[2025-09-02 05:30:24] [Rank 0] step:6881/10000 train_time:519569ms step_avg:75.51ms +[2025-09-02 05:30:24] [Rank 0] step:6881/10000 train_time:519569ms step_avg:75.51ms +[2025-09-02 05:30:26] [Rank 0] step:6901/10000 train_time:521173ms step_avg:75.52ms +[2025-09-02 05:30:26] [Rank 0] step:6901/10000 train_time:521173ms step_avg:75.52ms +[2025-09-02 05:30:27] [Rank 0] step:6921/10000 train_time:522775ms step_avg:75.53ms +[2025-09-02 05:30:27] [Rank 0] step:6921/10000 train_time:522775ms step_avg:75.53ms +[2025-09-02 05:30:29] [Rank 0] step:6941/10000 train_time:524390ms step_avg:75.55ms +[2025-09-02 05:30:29] [Rank 0] step:6941/10000 train_time:524390ms step_avg:75.55ms +[2025-09-02 05:30:31] [Rank 0] step:6961/10000 train_time:526010ms step_avg:75.57ms +[2025-09-02 05:30:31] [Rank 0] step:6961/10000 train_time:526010ms step_avg:75.57ms +[2025-09-02 05:30:32] [Rank 0] step:6981/10000 train_time:527619ms step_avg:75.58ms +[2025-09-02 05:30:32] [Rank 0] step:6981/10000 train_time:527619ms step_avg:75.58ms +[2025-09-02 05:30:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:30:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:30:45] [Rank 0] PRINT: step:7000/10000 val_loss:3.9734 svd_entropy: attn_qk:H=0.7410,top10E=0.27,eRank=144.8,q75/q25=108.83 attn_vo:H=0.8201,top10E=0.16,eRank=260.7,q75/q25=72.14 mlp_w1:H=0.7706,top10E=0.28,eRank=191.2,q75/q25=22.25 mlp_w2:H=0.8588,top10E=0.12,eRank=311.1,q75/q25=45.51 vo_prod:H=0.7273,top10E=0.26,eRank=134.9,q75/q25=6016.15 train_time:529393ms step_avg:75.63ms +[2025-09-02 05:30:45] [Rank 0] PRINT: step:7000/10000 val_loss:3.9734 svd_entropy: attn_qk:H=0.7410,top10E=0.27,eRank=144.8,q75/q25=108.83 attn_vo:H=0.8201,top10E=0.16,eRank=260.7,q75/q25=72.14 mlp_w1:H=0.7706,top10E=0.28,eRank=191.2,q75/q25=22.25 mlp_w2:H=0.8588,top10E=0.12,eRank=311.1,q75/q25=45.51 vo_prod:H=0.7273,top10E=0.26,eRank=134.9,q75/q25=6016.15 train_time:529393ms step_avg:75.63ms +[2025-09-02 05:30:46] [Rank 0] step:7001/10000 train_time:529403ms step_avg:75.62ms +[2025-09-02 05:30:46] [Rank 0] step:7001/10000 train_time:529403ms step_avg:75.62ms +[2025-09-02 05:30:47] [Rank 0] step:7021/10000 train_time:530863ms step_avg:75.61ms +[2025-09-02 05:30:47] [Rank 0] step:7021/10000 train_time:530863ms step_avg:75.61ms +[2025-09-02 05:30:49] [Rank 0] step:7041/10000 train_time:532470ms step_avg:75.62ms +[2025-09-02 05:30:49] [Rank 0] step:7041/10000 train_time:532470ms step_avg:75.62ms +[2025-09-02 05:30:50] [Rank 0] step:7061/10000 train_time:534074ms step_avg:75.64ms +[2025-09-02 05:30:50] [Rank 0] step:7061/10000 train_time:534074ms step_avg:75.64ms +[2025-09-02 05:30:52] [Rank 0] step:7081/10000 train_time:535680ms step_avg:75.65ms +[2025-09-02 05:30:52] [Rank 0] step:7081/10000 train_time:535680ms step_avg:75.65ms +[2025-09-02 05:30:54] [Rank 0] step:7101/10000 train_time:537285ms step_avg:75.66ms +[2025-09-02 05:30:54] [Rank 0] step:7101/10000 train_time:537285ms step_avg:75.66ms +[2025-09-02 05:30:55] [Rank 0] step:7121/10000 train_time:538900ms step_avg:75.68ms +[2025-09-02 05:30:55] [Rank 0] step:7121/10000 train_time:538900ms step_avg:75.68ms +[2025-09-02 05:30:57] [Rank 0] step:7141/10000 train_time:540505ms step_avg:75.69ms +[2025-09-02 05:30:57] [Rank 0] step:7141/10000 train_time:540505ms step_avg:75.69ms +[2025-09-02 05:30:58] [Rank 0] step:7161/10000 train_time:542115ms step_avg:75.70ms +[2025-09-02 05:30:58] [Rank 0] step:7161/10000 train_time:542115ms step_avg:75.70ms +[2025-09-02 05:31:00] [Rank 0] step:7181/10000 train_time:543722ms step_avg:75.72ms +[2025-09-02 05:31:00] [Rank 0] step:7181/10000 train_time:543722ms step_avg:75.72ms +[2025-09-02 05:31:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:31:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:31:13] [Rank 0] PRINT: step:7200/10000 val_loss:3.9647 svd_entropy: attn_qk:H=0.7424,top10E=0.27,eRank=146.0,q75/q25=109.15 attn_vo:H=0.8214,top10E=0.16,eRank=262.5,q75/q25=70.60 mlp_w1:H=0.7723,top10E=0.28,eRank=193.1,q75/q25=22.63 mlp_w2:H=0.8596,top10E=0.12,eRank=313.1,q75/q25=45.79 vo_prod:H=0.7290,top10E=0.26,eRank=136.4,q75/q25=5686.72 train_time:545496ms step_avg:75.76ms +[2025-09-02 05:31:13] [Rank 0] PRINT: step:7200/10000 val_loss:3.9647 svd_entropy: attn_qk:H=0.7424,top10E=0.27,eRank=146.0,q75/q25=109.15 attn_vo:H=0.8214,top10E=0.16,eRank=262.5,q75/q25=70.60 mlp_w1:H=0.7723,top10E=0.28,eRank=193.1,q75/q25=22.63 mlp_w2:H=0.8596,top10E=0.12,eRank=313.1,q75/q25=45.79 vo_prod:H=0.7290,top10E=0.26,eRank=136.4,q75/q25=5686.72 train_time:545496ms step_avg:75.76ms +[2025-09-02 05:31:13] [Rank 0] step:7201/10000 train_time:545506ms step_avg:75.75ms +[2025-09-02 05:31:13] [Rank 0] step:7201/10000 train_time:545506ms step_avg:75.75ms +[2025-09-02 05:31:15] [Rank 0] step:7221/10000 train_time:546987ms step_avg:75.75ms +[2025-09-02 05:31:15] [Rank 0] step:7221/10000 train_time:546987ms step_avg:75.75ms +[2025-09-02 05:31:17] [Rank 0] step:7241/10000 train_time:548588ms step_avg:75.76ms +[2025-09-02 05:31:17] [Rank 0] step:7241/10000 train_time:548588ms step_avg:75.76ms +[2025-09-02 05:31:18] [Rank 0] step:7261/10000 train_time:550188ms step_avg:75.77ms +[2025-09-02 05:31:18] [Rank 0] step:7261/10000 train_time:550188ms step_avg:75.77ms +[2025-09-02 05:31:20] [Rank 0] step:7281/10000 train_time:551801ms step_avg:75.79ms +[2025-09-02 05:31:20] [Rank 0] step:7281/10000 train_time:551801ms step_avg:75.79ms +[2025-09-02 05:31:21] [Rank 0] step:7301/10000 train_time:553409ms step_avg:75.80ms +[2025-09-02 05:31:21] [Rank 0] step:7301/10000 train_time:553409ms step_avg:75.80ms +[2025-09-02 05:31:23] [Rank 0] step:7321/10000 train_time:555022ms step_avg:75.81ms +[2025-09-02 05:31:23] [Rank 0] step:7321/10000 train_time:555022ms step_avg:75.81ms +[2025-09-02 05:31:25] [Rank 0] step:7341/10000 train_time:556629ms step_avg:75.82ms +[2025-09-02 05:31:25] [Rank 0] step:7341/10000 train_time:556629ms step_avg:75.82ms +[2025-09-02 05:31:26] [Rank 0] step:7361/10000 train_time:558247ms step_avg:75.84ms +[2025-09-02 05:31:26] [Rank 0] step:7361/10000 train_time:558247ms step_avg:75.84ms +[2025-09-02 05:31:28] [Rank 0] step:7381/10000 train_time:559861ms step_avg:75.85ms +[2025-09-02 05:31:28] [Rank 0] step:7381/10000 train_time:559861ms step_avg:75.85ms +[2025-09-02 05:31:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:31:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:31:41] [Rank 0] PRINT: step:7400/10000 val_loss:3.9443 svd_entropy: attn_qk:H=0.7436,top10E=0.27,eRank=147.1,q75/q25=108.75 attn_vo:H=0.8225,top10E=0.16,eRank=264.2,q75/q25=69.33 mlp_w1:H=0.7737,top10E=0.28,eRank=194.8,q75/q25=22.88 mlp_w2:H=0.8604,top10E=0.12,eRank=314.8,q75/q25=45.98 vo_prod:H=0.7305,top10E=0.25,eRank=137.8,q75/q25=5425.94 train_time:561617ms step_avg:75.89ms +[2025-09-02 05:31:41] [Rank 0] PRINT: step:7400/10000 val_loss:3.9443 svd_entropy: attn_qk:H=0.7436,top10E=0.27,eRank=147.1,q75/q25=108.75 attn_vo:H=0.8225,top10E=0.16,eRank=264.2,q75/q25=69.33 mlp_w1:H=0.7737,top10E=0.28,eRank=194.8,q75/q25=22.88 mlp_w2:H=0.8604,top10E=0.12,eRank=314.8,q75/q25=45.98 vo_prod:H=0.7305,top10E=0.25,eRank=137.8,q75/q25=5425.94 train_time:561617ms step_avg:75.89ms +[2025-09-02 05:31:41] [Rank 0] step:7401/10000 train_time:561628ms step_avg:75.89ms +[2025-09-02 05:31:41] [Rank 0] step:7401/10000 train_time:561628ms step_avg:75.89ms +[2025-09-02 05:31:43] [Rank 0] step:7421/10000 train_time:563080ms step_avg:75.88ms +[2025-09-02 05:31:43] [Rank 0] step:7421/10000 train_time:563080ms step_avg:75.88ms +[2025-09-02 05:31:44] [Rank 0] step:7441/10000 train_time:564683ms step_avg:75.89ms +[2025-09-02 05:31:44] [Rank 0] step:7441/10000 train_time:564683ms step_avg:75.89ms +[2025-09-02 05:31:46] [Rank 0] step:7461/10000 train_time:566290ms step_avg:75.90ms +[2025-09-02 05:31:46] [Rank 0] step:7461/10000 train_time:566290ms step_avg:75.90ms +[2025-09-02 05:31:48] [Rank 0] step:7481/10000 train_time:568002ms step_avg:75.93ms +[2025-09-02 05:31:48] [Rank 0] step:7481/10000 train_time:568002ms step_avg:75.93ms +[2025-09-02 05:31:49] [Rank 0] step:7501/10000 train_time:569610ms step_avg:75.94ms +[2025-09-02 05:31:49] [Rank 0] step:7501/10000 train_time:569610ms step_avg:75.94ms +[2025-09-02 05:31:51] [Rank 0] step:7521/10000 train_time:571221ms step_avg:75.95ms +[2025-09-02 05:31:51] [Rank 0] step:7521/10000 train_time:571221ms step_avg:75.95ms +[2025-09-02 05:31:53] [Rank 0] step:7541/10000 train_time:572839ms step_avg:75.96ms +[2025-09-02 05:31:53] [Rank 0] step:7541/10000 train_time:572839ms step_avg:75.96ms +[2025-09-02 05:31:54] [Rank 0] step:7561/10000 train_time:574437ms step_avg:75.97ms +[2025-09-02 05:31:54] [Rank 0] step:7561/10000 train_time:574437ms step_avg:75.97ms +[2025-09-02 05:31:56] [Rank 0] step:7581/10000 train_time:576153ms step_avg:76.00ms +[2025-09-02 05:31:56] [Rank 0] step:7581/10000 train_time:576153ms step_avg:76.00ms +[2025-09-02 05:31:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:31:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:32:09] [Rank 0] PRINT: step:7600/10000 val_loss:3.9413 svd_entropy: attn_qk:H=0.7448,top10E=0.26,eRank=148.1,q75/q25=108.69 attn_vo:H=0.8236,top10E=0.16,eRank=265.7,q75/q25=67.75 mlp_w1:H=0.7750,top10E=0.27,eRank=196.4,q75/q25=22.97 mlp_w2:H=0.8611,top10E=0.12,eRank=316.4,q75/q25=45.79 vo_prod:H=0.7319,top10E=0.25,eRank=139.1,q75/q25=5221.11 train_time:577930ms step_avg:76.04ms +[2025-09-02 05:32:09] [Rank 0] PRINT: step:7600/10000 val_loss:3.9413 svd_entropy: attn_qk:H=0.7448,top10E=0.26,eRank=148.1,q75/q25=108.69 attn_vo:H=0.8236,top10E=0.16,eRank=265.7,q75/q25=67.75 mlp_w1:H=0.7750,top10E=0.27,eRank=196.4,q75/q25=22.97 mlp_w2:H=0.8611,top10E=0.12,eRank=316.4,q75/q25=45.79 vo_prod:H=0.7319,top10E=0.25,eRank=139.1,q75/q25=5221.11 train_time:577930ms step_avg:76.04ms +[2025-09-02 05:32:09] [Rank 0] step:7601/10000 train_time:577941ms step_avg:76.03ms +[2025-09-02 05:32:09] [Rank 0] step:7601/10000 train_time:577941ms step_avg:76.03ms +[2025-09-02 05:32:11] [Rank 0] step:7621/10000 train_time:579401ms step_avg:76.03ms +[2025-09-02 05:32:11] [Rank 0] step:7621/10000 train_time:579401ms step_avg:76.03ms +[2025-09-02 05:32:12] [Rank 0] step:7641/10000 train_time:581009ms step_avg:76.04ms +[2025-09-02 05:32:12] [Rank 0] step:7641/10000 train_time:581009ms step_avg:76.04ms +[2025-09-02 05:32:14] [Rank 0] step:7661/10000 train_time:582621ms step_avg:76.05ms +[2025-09-02 05:32:14] [Rank 0] step:7661/10000 train_time:582621ms step_avg:76.05ms +[2025-09-02 05:32:16] [Rank 0] step:7681/10000 train_time:584222ms step_avg:76.06ms +[2025-09-02 05:32:16] [Rank 0] step:7681/10000 train_time:584222ms step_avg:76.06ms +[2025-09-02 05:32:17] [Rank 0] step:7701/10000 train_time:585827ms step_avg:76.07ms +[2025-09-02 05:32:17] [Rank 0] step:7701/10000 train_time:585827ms step_avg:76.07ms +[2025-09-02 05:32:19] [Rank 0] step:7721/10000 train_time:587452ms step_avg:76.09ms +[2025-09-02 05:32:19] [Rank 0] step:7721/10000 train_time:587452ms step_avg:76.09ms +[2025-09-02 05:32:21] [Rank 0] step:7741/10000 train_time:589058ms step_avg:76.10ms +[2025-09-02 05:32:21] [Rank 0] step:7741/10000 train_time:589058ms step_avg:76.10ms +[2025-09-02 05:32:22] [Rank 0] step:7761/10000 train_time:590673ms step_avg:76.11ms +[2025-09-02 05:32:22] [Rank 0] step:7761/10000 train_time:590673ms step_avg:76.11ms +[2025-09-02 05:32:24] [Rank 0] step:7781/10000 train_time:592290ms step_avg:76.12ms +[2025-09-02 05:32:24] [Rank 0] step:7781/10000 train_time:592290ms step_avg:76.12ms +[2025-09-02 05:32:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:32:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:32:37] [Rank 0] PRINT: step:7800/10000 val_loss:3.9245 svd_entropy: attn_qk:H=0.7458,top10E=0.26,eRank=149.0,q75/q25=108.66 attn_vo:H=0.8245,top10E=0.16,eRank=267.1,q75/q25=66.63 mlp_w1:H=0.7764,top10E=0.27,eRank=197.9,q75/q25=23.16 mlp_w2:H=0.8618,top10E=0.12,eRank=317.9,q75/q25=46.08 vo_prod:H=0.7332,top10E=0.25,eRank=140.3,q75/q25=4987.70 train_time:594172ms step_avg:76.18ms +[2025-09-02 05:32:37] [Rank 0] PRINT: step:7800/10000 val_loss:3.9245 svd_entropy: attn_qk:H=0.7458,top10E=0.26,eRank=149.0,q75/q25=108.66 attn_vo:H=0.8245,top10E=0.16,eRank=267.1,q75/q25=66.63 mlp_w1:H=0.7764,top10E=0.27,eRank=197.9,q75/q25=23.16 mlp_w2:H=0.8618,top10E=0.12,eRank=317.9,q75/q25=46.08 vo_prod:H=0.7332,top10E=0.25,eRank=140.3,q75/q25=4987.70 train_time:594172ms step_avg:76.18ms +[2025-09-02 05:32:37] [Rank 0] step:7801/10000 train_time:594183ms step_avg:76.17ms +[2025-09-02 05:32:37] [Rank 0] step:7801/10000 train_time:594183ms step_avg:76.17ms +[2025-09-02 05:32:39] [Rank 0] step:7821/10000 train_time:595631ms step_avg:76.16ms +[2025-09-02 05:32:39] [Rank 0] step:7821/10000 train_time:595631ms step_avg:76.16ms +[2025-09-02 05:32:41] [Rank 0] step:7841/10000 train_time:597236ms step_avg:76.17ms +[2025-09-02 05:32:41] [Rank 0] step:7841/10000 train_time:597236ms step_avg:76.17ms +[2025-09-02 05:32:42] [Rank 0] step:7861/10000 train_time:598847ms step_avg:76.18ms +[2025-09-02 05:32:42] [Rank 0] step:7861/10000 train_time:598847ms step_avg:76.18ms +[2025-09-02 05:32:44] [Rank 0] step:7881/10000 train_time:600463ms step_avg:76.19ms +[2025-09-02 05:32:44] [Rank 0] step:7881/10000 train_time:600463ms step_avg:76.19ms +[2025-09-02 05:32:45] [Rank 0] step:7901/10000 train_time:602073ms step_avg:76.20ms +[2025-09-02 05:32:45] [Rank 0] step:7901/10000 train_time:602073ms step_avg:76.20ms +[2025-09-02 05:32:47] [Rank 0] step:7921/10000 train_time:603686ms step_avg:76.21ms +[2025-09-02 05:32:47] [Rank 0] step:7921/10000 train_time:603686ms step_avg:76.21ms +[2025-09-02 05:32:49] [Rank 0] step:7941/10000 train_time:605412ms step_avg:76.24ms +[2025-09-02 05:32:49] [Rank 0] step:7941/10000 train_time:605412ms step_avg:76.24ms +[2025-09-02 05:32:50] [Rank 0] step:7961/10000 train_time:607033ms step_avg:76.25ms +[2025-09-02 05:32:50] [Rank 0] step:7961/10000 train_time:607033ms step_avg:76.25ms +[2025-09-02 05:32:52] [Rank 0] step:7981/10000 train_time:608642ms step_avg:76.26ms +[2025-09-02 05:32:52] [Rank 0] step:7981/10000 train_time:608642ms step_avg:76.26ms +[2025-09-02 05:32:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:32:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:33:05] [Rank 0] PRINT: step:8000/10000 val_loss:3.9092 svd_entropy: attn_qk:H=0.7468,top10E=0.26,eRank=149.9,q75/q25=108.57 attn_vo:H=0.8254,top10E=0.16,eRank=268.4,q75/q25=65.70 mlp_w1:H=0.7774,top10E=0.27,eRank=199.2,q75/q25=23.31 mlp_w2:H=0.8625,top10E=0.12,eRank=319.5,q75/q25=46.22 vo_prod:H=0.7344,top10E=0.25,eRank=141.4,q75/q25=4826.47 train_time:610416ms step_avg:76.30ms +[2025-09-02 05:33:05] [Rank 0] PRINT: step:8000/10000 val_loss:3.9092 svd_entropy: attn_qk:H=0.7468,top10E=0.26,eRank=149.9,q75/q25=108.57 attn_vo:H=0.8254,top10E=0.16,eRank=268.4,q75/q25=65.70 mlp_w1:H=0.7774,top10E=0.27,eRank=199.2,q75/q25=23.31 mlp_w2:H=0.8625,top10E=0.12,eRank=319.5,q75/q25=46.22 vo_prod:H=0.7344,top10E=0.25,eRank=141.4,q75/q25=4826.47 train_time:610416ms step_avg:76.30ms +[2025-09-02 05:33:05] [Rank 0] step:8001/10000 train_time:610428ms step_avg:76.29ms +[2025-09-02 05:33:05] [Rank 0] step:8001/10000 train_time:610428ms step_avg:76.29ms +[2025-09-02 05:33:07] [Rank 0] step:8021/10000 train_time:611892ms step_avg:76.29ms +[2025-09-02 05:33:07] [Rank 0] step:8021/10000 train_time:611892ms step_avg:76.29ms +[2025-09-02 05:33:09] [Rank 0] step:8041/10000 train_time:613517ms step_avg:76.30ms +[2025-09-02 05:33:09] [Rank 0] step:8041/10000 train_time:613517ms step_avg:76.30ms +[2025-09-02 05:33:10] [Rank 0] step:8061/10000 train_time:615124ms step_avg:76.31ms +[2025-09-02 05:33:10] [Rank 0] step:8061/10000 train_time:615124ms step_avg:76.31ms +[2025-09-02 05:33:12] [Rank 0] step:8081/10000 train_time:616730ms step_avg:76.32ms +[2025-09-02 05:33:12] [Rank 0] step:8081/10000 train_time:616730ms step_avg:76.32ms +[2025-09-02 05:33:13] [Rank 0] step:8101/10000 train_time:618351ms step_avg:76.33ms +[2025-09-02 05:33:13] [Rank 0] step:8101/10000 train_time:618351ms step_avg:76.33ms +[2025-09-02 05:33:15] [Rank 0] step:8121/10000 train_time:619964ms step_avg:76.34ms +[2025-09-02 05:33:15] [Rank 0] step:8121/10000 train_time:619964ms step_avg:76.34ms +[2025-09-02 05:33:17] [Rank 0] step:8141/10000 train_time:621670ms step_avg:76.36ms +[2025-09-02 05:33:17] [Rank 0] step:8141/10000 train_time:621670ms step_avg:76.36ms +[2025-09-02 05:33:18] [Rank 0] step:8161/10000 train_time:623293ms step_avg:76.37ms +[2025-09-02 05:33:18] [Rank 0] step:8161/10000 train_time:623293ms step_avg:76.37ms +[2025-09-02 05:33:20] [Rank 0] step:8181/10000 train_time:624936ms step_avg:76.39ms +[2025-09-02 05:33:20] [Rank 0] step:8181/10000 train_time:624936ms step_avg:76.39ms +[2025-09-02 05:33:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:33:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:33:33] [Rank 0] PRINT: step:8200/10000 val_loss:3.8991 svd_entropy: attn_qk:H=0.7477,top10E=0.26,eRank=150.7,q75/q25=108.66 attn_vo:H=0.8262,top10E=0.16,eRank=269.6,q75/q25=64.68 mlp_w1:H=0.7785,top10E=0.27,eRank=200.5,q75/q25=23.45 mlp_w2:H=0.8631,top10E=0.12,eRank=320.7,q75/q25=46.23 vo_prod:H=0.7354,top10E=0.25,eRank=142.4,q75/q25=4629.98 train_time:626766ms step_avg:76.43ms +[2025-09-02 05:33:33] [Rank 0] PRINT: step:8200/10000 val_loss:3.8991 svd_entropy: attn_qk:H=0.7477,top10E=0.26,eRank=150.7,q75/q25=108.66 attn_vo:H=0.8262,top10E=0.16,eRank=269.6,q75/q25=64.68 mlp_w1:H=0.7785,top10E=0.27,eRank=200.5,q75/q25=23.45 mlp_w2:H=0.8631,top10E=0.12,eRank=320.7,q75/q25=46.23 vo_prod:H=0.7354,top10E=0.25,eRank=142.4,q75/q25=4629.98 train_time:626766ms step_avg:76.43ms +[2025-09-02 05:33:33] [Rank 0] step:8201/10000 train_time:626778ms step_avg:76.43ms +[2025-09-02 05:33:33] [Rank 0] step:8201/10000 train_time:626778ms step_avg:76.43ms +[2025-09-02 05:33:35] [Rank 0] step:8221/10000 train_time:628272ms step_avg:76.42ms +[2025-09-02 05:33:35] [Rank 0] step:8221/10000 train_time:628272ms step_avg:76.42ms +[2025-09-02 05:33:37] [Rank 0] step:8241/10000 train_time:629920ms step_avg:76.44ms +[2025-09-02 05:33:37] [Rank 0] step:8241/10000 train_time:629920ms step_avg:76.44ms +[2025-09-02 05:33:38] [Rank 0] step:8261/10000 train_time:631557ms step_avg:76.45ms +[2025-09-02 05:33:38] [Rank 0] step:8261/10000 train_time:631557ms step_avg:76.45ms +[2025-09-02 05:33:40] [Rank 0] step:8281/10000 train_time:633201ms step_avg:76.46ms +[2025-09-02 05:33:40] [Rank 0] step:8281/10000 train_time:633201ms step_avg:76.46ms +[2025-09-02 05:33:42] [Rank 0] step:8301/10000 train_time:634843ms step_avg:76.48ms +[2025-09-02 05:33:42] [Rank 0] step:8301/10000 train_time:634843ms step_avg:76.48ms +[2025-09-02 05:33:43] [Rank 0] step:8321/10000 train_time:636472ms step_avg:76.49ms +[2025-09-02 05:33:43] [Rank 0] step:8321/10000 train_time:636472ms step_avg:76.49ms +[2025-09-02 05:33:45] [Rank 0] step:8341/10000 train_time:638113ms step_avg:76.50ms +[2025-09-02 05:33:45] [Rank 0] step:8341/10000 train_time:638113ms step_avg:76.50ms +[2025-09-02 05:33:47] [Rank 0] step:8361/10000 train_time:639753ms step_avg:76.52ms +[2025-09-02 05:33:47] [Rank 0] step:8361/10000 train_time:639753ms step_avg:76.52ms +[2025-09-02 05:33:48] [Rank 0] step:8381/10000 train_time:641393ms step_avg:76.53ms +[2025-09-02 05:33:48] [Rank 0] step:8381/10000 train_time:641393ms step_avg:76.53ms +[2025-09-02 05:33:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:33:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:34:01] [Rank 0] PRINT: step:8400/10000 val_loss:3.8887 svd_entropy: attn_qk:H=0.7484,top10E=0.26,eRank=151.4,q75/q25=108.59 attn_vo:H=0.8269,top10E=0.16,eRank=270.6,q75/q25=63.97 mlp_w1:H=0.7795,top10E=0.27,eRank=201.7,q75/q25=23.58 mlp_w2:H=0.8636,top10E=0.12,eRank=321.9,q75/q25=46.21 vo_prod:H=0.7365,top10E=0.25,eRank=143.4,q75/q25=4498.98 train_time:643197ms step_avg:76.57ms +[2025-09-02 05:34:01] [Rank 0] PRINT: step:8400/10000 val_loss:3.8887 svd_entropy: attn_qk:H=0.7484,top10E=0.26,eRank=151.4,q75/q25=108.59 attn_vo:H=0.8269,top10E=0.16,eRank=270.6,q75/q25=63.97 mlp_w1:H=0.7795,top10E=0.27,eRank=201.7,q75/q25=23.58 mlp_w2:H=0.8636,top10E=0.12,eRank=321.9,q75/q25=46.21 vo_prod:H=0.7365,top10E=0.25,eRank=143.4,q75/q25=4498.98 train_time:643197ms step_avg:76.57ms +[2025-09-02 05:34:02] [Rank 0] step:8401/10000 train_time:643208ms step_avg:76.56ms +[2025-09-02 05:34:02] [Rank 0] step:8401/10000 train_time:643208ms step_avg:76.56ms +[2025-09-02 05:34:03] [Rank 0] step:8421/10000 train_time:644708ms step_avg:76.56ms +[2025-09-02 05:34:03] [Rank 0] step:8421/10000 train_time:644708ms step_avg:76.56ms +[2025-09-02 05:34:05] [Rank 0] step:8441/10000 train_time:646341ms step_avg:76.57ms +[2025-09-02 05:34:05] [Rank 0] step:8441/10000 train_time:646341ms step_avg:76.57ms +[2025-09-02 05:34:07] [Rank 0] step:8461/10000 train_time:648079ms step_avg:76.60ms +[2025-09-02 05:34:07] [Rank 0] step:8461/10000 train_time:648079ms step_avg:76.60ms +[2025-09-02 05:34:08] [Rank 0] step:8481/10000 train_time:649721ms step_avg:76.61ms +[2025-09-02 05:34:08] [Rank 0] step:8481/10000 train_time:649721ms step_avg:76.61ms +[2025-09-02 05:34:10] [Rank 0] step:8501/10000 train_time:651381ms step_avg:76.62ms +[2025-09-02 05:34:10] [Rank 0] step:8501/10000 train_time:651381ms step_avg:76.62ms +[2025-09-02 05:34:12] [Rank 0] step:8521/10000 train_time:653023ms step_avg:76.64ms +[2025-09-02 05:34:12] [Rank 0] step:8521/10000 train_time:653023ms step_avg:76.64ms +[2025-09-02 05:34:13] [Rank 0] step:8541/10000 train_time:654673ms step_avg:76.65ms +[2025-09-02 05:34:13] [Rank 0] step:8541/10000 train_time:654673ms step_avg:76.65ms +[2025-09-02 05:34:15] [Rank 0] step:8561/10000 train_time:656312ms step_avg:76.66ms +[2025-09-02 05:34:15] [Rank 0] step:8561/10000 train_time:656312ms step_avg:76.66ms +[2025-09-02 05:34:16] [Rank 0] step:8581/10000 train_time:657953ms step_avg:76.68ms +[2025-09-02 05:34:16] [Rank 0] step:8581/10000 train_time:657953ms step_avg:76.68ms +[2025-09-02 05:34:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:34:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:34:30] [Rank 0] PRINT: step:8600/10000 val_loss:3.8791 svd_entropy: attn_qk:H=0.7491,top10E=0.26,eRank=152.0,q75/q25=108.05 attn_vo:H=0.8275,top10E=0.16,eRank=271.5,q75/q25=63.00 mlp_w1:H=0.7803,top10E=0.27,eRank=202.7,q75/q25=23.64 mlp_w2:H=0.8641,top10E=0.12,eRank=323.0,q75/q25=46.28 vo_prod:H=0.7374,top10E=0.25,eRank=144.2,q75/q25=4404.18 train_time:659751ms step_avg:76.72ms +[2025-09-02 05:34:30] [Rank 0] PRINT: step:8600/10000 val_loss:3.8791 svd_entropy: attn_qk:H=0.7491,top10E=0.26,eRank=152.0,q75/q25=108.05 attn_vo:H=0.8275,top10E=0.16,eRank=271.5,q75/q25=63.00 mlp_w1:H=0.7803,top10E=0.27,eRank=202.7,q75/q25=23.64 mlp_w2:H=0.8641,top10E=0.12,eRank=323.0,q75/q25=46.28 vo_prod:H=0.7374,top10E=0.25,eRank=144.2,q75/q25=4404.18 train_time:659751ms step_avg:76.72ms +[2025-09-02 05:34:30] [Rank 0] step:8601/10000 train_time:659762ms step_avg:76.71ms +[2025-09-02 05:34:30] [Rank 0] step:8601/10000 train_time:659762ms step_avg:76.71ms +[2025-09-02 05:34:31] [Rank 0] step:8621/10000 train_time:661267ms step_avg:76.70ms +[2025-09-02 05:34:31] [Rank 0] step:8621/10000 train_time:661267ms step_avg:76.70ms +[2025-09-02 05:34:33] [Rank 0] step:8641/10000 train_time:662909ms step_avg:76.72ms +[2025-09-02 05:34:33] [Rank 0] step:8641/10000 train_time:662909ms step_avg:76.72ms +[2025-09-02 05:34:35] [Rank 0] step:8661/10000 train_time:664546ms step_avg:76.73ms +[2025-09-02 05:34:35] [Rank 0] step:8661/10000 train_time:664546ms step_avg:76.73ms +[2025-09-02 05:34:36] [Rank 0] step:8681/10000 train_time:666183ms step_avg:76.74ms +[2025-09-02 05:34:36] [Rank 0] step:8681/10000 train_time:666183ms step_avg:76.74ms +[2025-09-02 05:34:38] [Rank 0] step:8701/10000 train_time:667819ms step_avg:76.75ms +[2025-09-02 05:34:38] [Rank 0] step:8701/10000 train_time:667819ms step_avg:76.75ms +[2025-09-02 05:34:39] [Rank 0] step:8721/10000 train_time:669461ms step_avg:76.76ms +[2025-09-02 05:34:39] [Rank 0] step:8721/10000 train_time:669461ms step_avg:76.76ms +[2025-09-02 05:34:41] [Rank 0] step:8741/10000 train_time:671087ms step_avg:76.77ms +[2025-09-02 05:34:41] [Rank 0] step:8741/10000 train_time:671087ms step_avg:76.77ms +[2025-09-02 05:34:43] [Rank 0] step:8761/10000 train_time:672722ms step_avg:76.79ms +[2025-09-02 05:34:43] [Rank 0] step:8761/10000 train_time:672722ms step_avg:76.79ms +[2025-09-02 05:34:44] [Rank 0] step:8781/10000 train_time:674368ms step_avg:76.80ms +[2025-09-02 05:34:44] [Rank 0] step:8781/10000 train_time:674368ms step_avg:76.80ms +[2025-09-02 05:34:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:34:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:34:58] [Rank 0] PRINT: step:8800/10000 val_loss:3.8702 svd_entropy: attn_qk:H=0.7497,top10E=0.26,eRank=152.6,q75/q25=108.25 attn_vo:H=0.8280,top10E=0.15,eRank=272.4,q75/q25=62.44 mlp_w1:H=0.7811,top10E=0.27,eRank=203.8,q75/q25=23.71 mlp_w2:H=0.8645,top10E=0.12,eRank=323.9,q75/q25=46.41 vo_prod:H=0.7382,top10E=0.25,eRank=145.0,q75/q25=4213.90 train_time:676176ms step_avg:76.84ms +[2025-09-02 05:34:58] [Rank 0] PRINT: step:8800/10000 val_loss:3.8702 svd_entropy: attn_qk:H=0.7497,top10E=0.26,eRank=152.6,q75/q25=108.25 attn_vo:H=0.8280,top10E=0.15,eRank=272.4,q75/q25=62.44 mlp_w1:H=0.7811,top10E=0.27,eRank=203.8,q75/q25=23.71 mlp_w2:H=0.8645,top10E=0.12,eRank=323.9,q75/q25=46.41 vo_prod:H=0.7382,top10E=0.25,eRank=145.0,q75/q25=4213.90 train_time:676176ms step_avg:76.84ms +[2025-09-02 05:34:58] [Rank 0] step:8801/10000 train_time:676187ms step_avg:76.83ms +[2025-09-02 05:34:58] [Rank 0] step:8801/10000 train_time:676187ms step_avg:76.83ms +[2025-09-02 05:34:59] [Rank 0] step:8821/10000 train_time:677667ms step_avg:76.82ms +[2025-09-02 05:34:59] [Rank 0] step:8821/10000 train_time:677667ms step_avg:76.82ms +[2025-09-02 05:35:01] [Rank 0] step:8841/10000 train_time:679327ms step_avg:76.84ms +[2025-09-02 05:35:01] [Rank 0] step:8841/10000 train_time:679327ms step_avg:76.84ms +[2025-09-02 05:35:03] [Rank 0] step:8861/10000 train_time:680965ms step_avg:76.85ms +[2025-09-02 05:35:03] [Rank 0] step:8861/10000 train_time:680965ms step_avg:76.85ms +[2025-09-02 05:35:04] [Rank 0] step:8881/10000 train_time:682607ms step_avg:76.86ms +[2025-09-02 05:35:04] [Rank 0] step:8881/10000 train_time:682607ms step_avg:76.86ms +[2025-09-02 05:35:06] [Rank 0] step:8901/10000 train_time:684253ms step_avg:76.87ms +[2025-09-02 05:35:06] [Rank 0] step:8901/10000 train_time:684253ms step_avg:76.87ms +[2025-09-02 05:35:08] [Rank 0] step:8921/10000 train_time:685898ms step_avg:76.89ms +[2025-09-02 05:35:08] [Rank 0] step:8921/10000 train_time:685898ms step_avg:76.89ms +[2025-09-02 05:35:09] [Rank 0] step:8941/10000 train_time:687549ms step_avg:76.90ms +[2025-09-02 05:35:09] [Rank 0] step:8941/10000 train_time:687549ms step_avg:76.90ms +[2025-09-02 05:35:11] [Rank 0] step:8961/10000 train_time:689184ms step_avg:76.91ms +[2025-09-02 05:35:11] [Rank 0] step:8961/10000 train_time:689184ms step_avg:76.91ms +[2025-09-02 05:35:13] [Rank 0] step:8981/10000 train_time:690820ms step_avg:76.92ms +[2025-09-02 05:35:13] [Rank 0] step:8981/10000 train_time:690820ms step_avg:76.92ms +[2025-09-02 05:35:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:35:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:35:26] [Rank 0] PRINT: step:9000/10000 val_loss:3.8612 svd_entropy: attn_qk:H=0.7503,top10E=0.26,eRank=153.1,q75/q25=108.22 attn_vo:H=0.8285,top10E=0.15,eRank=273.1,q75/q25=61.81 mlp_w1:H=0.7817,top10E=0.27,eRank=204.5,q75/q25=23.78 mlp_w2:H=0.8648,top10E=0.12,eRank=324.7,q75/q25=46.35 vo_prod:H=0.7389,top10E=0.25,eRank=145.7,q75/q25=4088.52 train_time:692620ms step_avg:76.96ms +[2025-09-02 05:35:26] [Rank 0] PRINT: step:9000/10000 val_loss:3.8612 svd_entropy: attn_qk:H=0.7503,top10E=0.26,eRank=153.1,q75/q25=108.22 attn_vo:H=0.8285,top10E=0.15,eRank=273.1,q75/q25=61.81 mlp_w1:H=0.7817,top10E=0.27,eRank=204.5,q75/q25=23.78 mlp_w2:H=0.8648,top10E=0.12,eRank=324.7,q75/q25=46.35 vo_prod:H=0.7389,top10E=0.25,eRank=145.7,q75/q25=4088.52 train_time:692620ms step_avg:76.96ms +[2025-09-02 05:35:26] [Rank 0] step:9001/10000 train_time:692631ms step_avg:76.95ms +[2025-09-02 05:35:26] [Rank 0] step:9001/10000 train_time:692631ms step_avg:76.95ms +[2025-09-02 05:35:28] [Rank 0] step:9021/10000 train_time:694139ms step_avg:76.95ms +[2025-09-02 05:35:28] [Rank 0] step:9021/10000 train_time:694139ms step_avg:76.95ms +[2025-09-02 05:35:29] [Rank 0] step:9041/10000 train_time:695779ms step_avg:76.96ms +[2025-09-02 05:35:29] [Rank 0] step:9041/10000 train_time:695779ms step_avg:76.96ms +[2025-09-02 05:35:31] [Rank 0] step:9061/10000 train_time:697428ms step_avg:76.97ms +[2025-09-02 05:35:31] [Rank 0] step:9061/10000 train_time:697428ms step_avg:76.97ms +[2025-09-02 05:35:33] [Rank 0] step:9081/10000 train_time:699077ms step_avg:76.98ms +[2025-09-02 05:35:33] [Rank 0] step:9081/10000 train_time:699077ms step_avg:76.98ms +[2025-09-02 05:35:34] [Rank 0] step:9101/10000 train_time:700735ms step_avg:77.00ms +[2025-09-02 05:35:34] [Rank 0] step:9101/10000 train_time:700735ms step_avg:77.00ms +[2025-09-02 05:35:36] [Rank 0] step:9121/10000 train_time:702380ms step_avg:77.01ms +[2025-09-02 05:35:36] [Rank 0] step:9121/10000 train_time:702380ms step_avg:77.01ms +[2025-09-02 05:35:37] [Rank 0] step:9141/10000 train_time:704013ms step_avg:77.02ms +[2025-09-02 05:35:37] [Rank 0] step:9141/10000 train_time:704013ms step_avg:77.02ms +[2025-09-02 05:35:39] [Rank 0] step:9161/10000 train_time:705647ms step_avg:77.03ms +[2025-09-02 05:35:39] [Rank 0] step:9161/10000 train_time:705647ms step_avg:77.03ms +[2025-09-02 05:35:41] [Rank 0] step:9181/10000 train_time:707318ms step_avg:77.04ms +[2025-09-02 05:35:41] [Rank 0] step:9181/10000 train_time:707318ms step_avg:77.04ms +[2025-09-02 05:35:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:35:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:35:54] [Rank 0] PRINT: step:9200/10000 val_loss:3.8528 svd_entropy: attn_qk:H=0.7507,top10E=0.26,eRank=153.5,q75/q25=108.22 attn_vo:H=0.8289,top10E=0.15,eRank=273.7,q75/q25=61.35 mlp_w1:H=0.7823,top10E=0.27,eRank=205.3,q75/q25=23.89 mlp_w2:H=0.8651,top10E=0.12,eRank=325.4,q75/q25=46.31 vo_prod:H=0.7395,top10E=0.24,eRank=146.3,q75/q25=4018.55 train_time:709126ms step_avg:77.08ms +[2025-09-02 05:35:54] [Rank 0] PRINT: step:9200/10000 val_loss:3.8528 svd_entropy: attn_qk:H=0.7507,top10E=0.26,eRank=153.5,q75/q25=108.22 attn_vo:H=0.8289,top10E=0.15,eRank=273.7,q75/q25=61.35 mlp_w1:H=0.7823,top10E=0.27,eRank=205.3,q75/q25=23.89 mlp_w2:H=0.8651,top10E=0.12,eRank=325.4,q75/q25=46.31 vo_prod:H=0.7395,top10E=0.24,eRank=146.3,q75/q25=4018.55 train_time:709126ms step_avg:77.08ms +[2025-09-02 05:35:54] [Rank 0] step:9201/10000 train_time:709137ms step_avg:77.07ms +[2025-09-02 05:35:54] [Rank 0] step:9201/10000 train_time:709137ms step_avg:77.07ms +[2025-09-02 05:35:56] [Rank 0] step:9221/10000 train_time:710637ms step_avg:77.07ms +[2025-09-02 05:35:56] [Rank 0] step:9221/10000 train_time:710637ms step_avg:77.07ms +[2025-09-02 05:35:57] [Rank 0] step:9241/10000 train_time:712289ms step_avg:77.08ms +[2025-09-02 05:35:57] [Rank 0] step:9241/10000 train_time:712289ms step_avg:77.08ms +[2025-09-02 05:35:59] [Rank 0] step:9261/10000 train_time:713944ms step_avg:77.09ms +[2025-09-02 05:35:59] [Rank 0] step:9261/10000 train_time:713944ms step_avg:77.09ms +[2025-09-02 05:36:01] [Rank 0] step:9281/10000 train_time:715576ms step_avg:77.10ms +[2025-09-02 05:36:01] [Rank 0] step:9281/10000 train_time:715576ms step_avg:77.10ms +[2025-09-02 05:36:02] [Rank 0] step:9301/10000 train_time:717219ms step_avg:77.11ms +[2025-09-02 05:36:02] [Rank 0] step:9301/10000 train_time:717219ms step_avg:77.11ms +[2025-09-02 05:36:04] [Rank 0] step:9321/10000 train_time:718864ms step_avg:77.12ms +[2025-09-02 05:36:04] [Rank 0] step:9321/10000 train_time:718864ms step_avg:77.12ms +[2025-09-02 05:36:06] [Rank 0] step:9341/10000 train_time:720511ms step_avg:77.13ms +[2025-09-02 05:36:06] [Rank 0] step:9341/10000 train_time:720511ms step_avg:77.13ms +[2025-09-02 05:36:07] [Rank 0] step:9361/10000 train_time:722161ms step_avg:77.15ms +[2025-09-02 05:36:07] [Rank 0] step:9361/10000 train_time:722161ms step_avg:77.15ms +[2025-09-02 05:36:09] [Rank 0] step:9381/10000 train_time:723818ms step_avg:77.16ms +[2025-09-02 05:36:09] [Rank 0] step:9381/10000 train_time:723818ms step_avg:77.16ms +[2025-09-02 05:36:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:36:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:36:22] [Rank 0] PRINT: step:9400/10000 val_loss:3.8456 svd_entropy: attn_qk:H=0.7511,top10E=0.26,eRank=153.9,q75/q25=108.13 attn_vo:H=0.8293,top10E=0.15,eRank=274.2,q75/q25=60.83 mlp_w1:H=0.7828,top10E=0.26,eRank=205.9,q75/q25=23.92 mlp_w2:H=0.8654,top10E=0.12,eRank=326.0,q75/q25=46.27 vo_prod:H=0.7401,top10E=0.24,eRank=146.8,q75/q25=3935.43 train_time:725634ms step_avg:77.20ms +[2025-09-02 05:36:22] [Rank 0] PRINT: step:9400/10000 val_loss:3.8456 svd_entropy: attn_qk:H=0.7511,top10E=0.26,eRank=153.9,q75/q25=108.13 attn_vo:H=0.8293,top10E=0.15,eRank=274.2,q75/q25=60.83 mlp_w1:H=0.7828,top10E=0.26,eRank=205.9,q75/q25=23.92 mlp_w2:H=0.8654,top10E=0.12,eRank=326.0,q75/q25=46.27 vo_prod:H=0.7401,top10E=0.24,eRank=146.8,q75/q25=3935.43 train_time:725634ms step_avg:77.20ms +[2025-09-02 05:36:22] [Rank 0] step:9401/10000 train_time:725645ms step_avg:77.19ms +[2025-09-02 05:36:22] [Rank 0] step:9401/10000 train_time:725645ms step_avg:77.19ms +[2025-09-02 05:36:24] [Rank 0] step:9421/10000 train_time:727135ms step_avg:77.18ms +[2025-09-02 05:36:24] [Rank 0] step:9421/10000 train_time:727135ms step_avg:77.18ms +[2025-09-02 05:36:26] [Rank 0] step:9441/10000 train_time:728777ms step_avg:77.19ms +[2025-09-02 05:36:26] [Rank 0] step:9441/10000 train_time:728777ms step_avg:77.19ms +[2025-09-02 05:36:27] [Rank 0] step:9461/10000 train_time:730424ms step_avg:77.20ms +[2025-09-02 05:36:27] [Rank 0] step:9461/10000 train_time:730424ms step_avg:77.20ms +[2025-09-02 05:36:29] [Rank 0] step:9481/10000 train_time:732073ms step_avg:77.21ms +[2025-09-02 05:36:29] [Rank 0] step:9481/10000 train_time:732073ms step_avg:77.21ms +[2025-09-02 05:36:31] [Rank 0] step:9501/10000 train_time:733726ms step_avg:77.23ms +[2025-09-02 05:36:31] [Rank 0] step:9501/10000 train_time:733726ms step_avg:77.23ms +[2025-09-02 05:36:32] [Rank 0] step:9521/10000 train_time:735363ms step_avg:77.24ms +[2025-09-02 05:36:32] [Rank 0] step:9521/10000 train_time:735363ms step_avg:77.24ms +[2025-09-02 05:36:34] [Rank 0] step:9541/10000 train_time:737007ms step_avg:77.25ms +[2025-09-02 05:36:34] [Rank 0] step:9541/10000 train_time:737007ms step_avg:77.25ms +[2025-09-02 05:36:36] [Rank 0] step:9561/10000 train_time:738646ms step_avg:77.26ms +[2025-09-02 05:36:36] [Rank 0] step:9561/10000 train_time:738646ms step_avg:77.26ms +[2025-09-02 05:36:37] [Rank 0] step:9581/10000 train_time:740291ms step_avg:77.27ms +[2025-09-02 05:36:37] [Rank 0] step:9581/10000 train_time:740291ms step_avg:77.27ms +[2025-09-02 05:36:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:36:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:36:50] [Rank 0] PRINT: step:9600/10000 val_loss:3.8393 svd_entropy: attn_qk:H=0.7515,top10E=0.26,eRank=154.2,q75/q25=108.24 attn_vo:H=0.8296,top10E=0.15,eRank=274.7,q75/q25=60.60 mlp_w1:H=0.7832,top10E=0.26,eRank=206.4,q75/q25=23.94 mlp_w2:H=0.8656,top10E=0.12,eRank=326.5,q75/q25=46.32 vo_prod:H=0.7405,top10E=0.24,eRank=147.2,q75/q25=3848.72 train_time:742110ms step_avg:77.30ms +[2025-09-02 05:36:50] [Rank 0] PRINT: step:9600/10000 val_loss:3.8393 svd_entropy: attn_qk:H=0.7515,top10E=0.26,eRank=154.2,q75/q25=108.24 attn_vo:H=0.8296,top10E=0.15,eRank=274.7,q75/q25=60.60 mlp_w1:H=0.7832,top10E=0.26,eRank=206.4,q75/q25=23.94 mlp_w2:H=0.8656,top10E=0.12,eRank=326.5,q75/q25=46.32 vo_prod:H=0.7405,top10E=0.24,eRank=147.2,q75/q25=3848.72 train_time:742110ms step_avg:77.30ms +[2025-09-02 05:36:51] [Rank 0] step:9601/10000 train_time:742120ms step_avg:77.30ms +[2025-09-02 05:36:51] [Rank 0] step:9601/10000 train_time:742120ms step_avg:77.30ms +[2025-09-02 05:36:52] [Rank 0] step:9621/10000 train_time:743624ms step_avg:77.29ms +[2025-09-02 05:36:52] [Rank 0] step:9621/10000 train_time:743624ms step_avg:77.29ms +[2025-09-02 05:36:54] [Rank 0] step:9641/10000 train_time:745269ms step_avg:77.30ms +[2025-09-02 05:36:54] [Rank 0] step:9641/10000 train_time:745269ms step_avg:77.30ms +[2025-09-02 05:36:56] [Rank 0] step:9661/10000 train_time:746939ms step_avg:77.31ms +[2025-09-02 05:36:56] [Rank 0] step:9661/10000 train_time:746939ms step_avg:77.31ms +[2025-09-02 05:36:57] [Rank 0] step:9681/10000 train_time:748603ms step_avg:77.33ms +[2025-09-02 05:36:57] [Rank 0] step:9681/10000 train_time:748603ms step_avg:77.33ms +[2025-09-02 05:36:59] [Rank 0] step:9701/10000 train_time:750283ms step_avg:77.34ms +[2025-09-02 05:36:59] [Rank 0] step:9701/10000 train_time:750283ms step_avg:77.34ms +[2025-09-02 05:37:01] [Rank 0] step:9721/10000 train_time:751946ms step_avg:77.35ms +[2025-09-02 05:37:01] [Rank 0] step:9721/10000 train_time:751946ms step_avg:77.35ms +[2025-09-02 05:37:02] [Rank 0] step:9741/10000 train_time:753630ms step_avg:77.37ms +[2025-09-02 05:37:02] [Rank 0] step:9741/10000 train_time:753630ms step_avg:77.37ms +[2025-09-02 05:37:04] [Rank 0] step:9761/10000 train_time:755294ms step_avg:77.38ms +[2025-09-02 05:37:04] [Rank 0] step:9761/10000 train_time:755294ms step_avg:77.38ms +[2025-09-02 05:37:06] [Rank 0] step:9781/10000 train_time:756975ms step_avg:77.39ms +[2025-09-02 05:37:06] [Rank 0] step:9781/10000 train_time:756975ms step_avg:77.39ms +[2025-09-02 05:37:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:37:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:37:19] [Rank 0] PRINT: step:9800/10000 val_loss:3.8331 svd_entropy: attn_qk:H=0.7517,top10E=0.26,eRank=154.4,q75/q25=108.48 attn_vo:H=0.8298,top10E=0.15,eRank=275.0,q75/q25=60.31 mlp_w1:H=0.7835,top10E=0.26,eRank=206.9,q75/q25=23.91 mlp_w2:H=0.8658,top10E=0.12,eRank=326.9,q75/q25=46.29 vo_prod:H=0.7409,top10E=0.24,eRank=147.6,q75/q25=3795.72 train_time:758826ms step_avg:77.43ms +[2025-09-02 05:37:19] [Rank 0] PRINT: step:9800/10000 val_loss:3.8331 svd_entropy: attn_qk:H=0.7517,top10E=0.26,eRank=154.4,q75/q25=108.48 attn_vo:H=0.8298,top10E=0.15,eRank=275.0,q75/q25=60.31 mlp_w1:H=0.7835,top10E=0.26,eRank=206.9,q75/q25=23.91 mlp_w2:H=0.8658,top10E=0.12,eRank=326.9,q75/q25=46.29 vo_prod:H=0.7409,top10E=0.24,eRank=147.6,q75/q25=3795.72 train_time:758826ms step_avg:77.43ms +[2025-09-02 05:37:19] [Rank 0] step:9801/10000 train_time:758836ms step_avg:77.42ms +[2025-09-02 05:37:19] [Rank 0] step:9801/10000 train_time:758836ms step_avg:77.42ms +[2025-09-02 05:37:21] [Rank 0] step:9821/10000 train_time:760353ms step_avg:77.42ms +[2025-09-02 05:37:21] [Rank 0] step:9821/10000 train_time:760353ms step_avg:77.42ms +[2025-09-02 05:37:22] [Rank 0] step:9841/10000 train_time:762042ms step_avg:77.44ms +[2025-09-02 05:37:22] [Rank 0] step:9841/10000 train_time:762042ms step_avg:77.44ms +[2025-09-02 05:37:24] [Rank 0] step:9861/10000 train_time:763700ms step_avg:77.45ms +[2025-09-02 05:37:24] [Rank 0] step:9861/10000 train_time:763700ms step_avg:77.45ms +[2025-09-02 05:37:26] [Rank 0] step:9881/10000 train_time:765356ms step_avg:77.46ms +[2025-09-02 05:37:26] [Rank 0] step:9881/10000 train_time:765356ms step_avg:77.46ms +[2025-09-02 05:37:27] [Rank 0] step:9901/10000 train_time:767028ms step_avg:77.47ms +[2025-09-02 05:37:27] [Rank 0] step:9901/10000 train_time:767028ms step_avg:77.47ms +[2025-09-02 05:37:29] [Rank 0] step:9921/10000 train_time:768692ms step_avg:77.48ms +[2025-09-02 05:37:29] [Rank 0] step:9921/10000 train_time:768692ms step_avg:77.48ms +[2025-09-02 05:37:31] [Rank 0] step:9941/10000 train_time:770364ms step_avg:77.49ms +[2025-09-02 05:37:31] [Rank 0] step:9941/10000 train_time:770364ms step_avg:77.49ms +[2025-09-02 05:37:32] [Rank 0] step:9961/10000 train_time:772032ms step_avg:77.51ms +[2025-09-02 05:37:32] [Rank 0] step:9961/10000 train_time:772032ms step_avg:77.51ms +[2025-09-02 05:37:34] [Rank 0] step:9981/10000 train_time:773698ms step_avg:77.52ms +[2025-09-02 05:37:34] [Rank 0] step:9981/10000 train_time:773698ms step_avg:77.52ms +[2025-09-02 05:37:36] [Rank 0] step:10000/10000 train_time:775292ms step_avg:77.53ms +[2025-09-02 05:37:36] [Rank 0] step:10000/10000 train_time:775292ms step_avg:77.53ms +[2025-09-02 05:37:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:37:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 05:37:47] [Rank 0] PRINT: step:10000/10000 val_loss:3.8275 svd_entropy: attn_qk:H=0.7519,top10E=0.26,eRank=154.6,q75/q25=108.52 attn_vo:H=0.8300,top10E=0.15,eRank=275.3,q75/q25=60.16 mlp_w1:H=0.7837,top10E=0.26,eRank=207.2,q75/q25=23.92 mlp_w2:H=0.8659,top10E=0.12,eRank=327.2,q75/q25=46.29 vo_prod:H=0.7411,top10E=0.24,eRank=147.8,q75/q25=3768.23 train_time:775551ms step_avg:77.56ms +[2025-09-02 05:37:47] [Rank 0] PRINT: step:10000/10000 val_loss:3.8275 svd_entropy: attn_qk:H=0.7519,top10E=0.26,eRank=154.6,q75/q25=108.52 attn_vo:H=0.8300,top10E=0.15,eRank=275.3,q75/q25=60.16 mlp_w1:H=0.7837,top10E=0.26,eRank=207.2,q75/q25=23.92 mlp_w2:H=0.8659,top10E=0.12,eRank=327.2,q75/q25=46.29 vo_prod:H=0.7411,top10E=0.24,eRank=147.8,q75/q25=3768.23 train_time:775551ms step_avg:77.56ms +[2025-09-02 05:37:47] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 05:37:47 2025 --- +[2025-09-02 05:37:47] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 05:37:47 2025 --- +[2025-09-02 05:37:47] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14416 MiB +[2025-09-02 05:37:47] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14416 MiB diff --git a/logs_svd_qkvo/mode_15_param_qkvo_seed_42/config.json b/logs_svd_qkvo/mode_15_param_qkvo_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..29259ac2db83fb6f2463374d0b6186ab644bf714 --- /dev/null +++ b/logs_svd_qkvo/mode_15_param_qkvo_seed_42/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 15, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "b1213999-eb35-4094-819c-941740fcc9ab", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_15_param_qkvo_seed_42/training_log_b1213999-eb35-4094-819c-941740fcc9ab.txt b/logs_svd_qkvo/mode_15_param_qkvo_seed_42/training_log_b1213999-eb35-4094-819c-941740fcc9ab.txt new file mode 100644 index 0000000000000000000000000000000000000000..32b1f1b9f0267b75fcf43cc60183250890a25f15 --- /dev/null +++ b/logs_svd_qkvo/mode_15_param_qkvo_seed_42/training_log_b1213999-eb35-4094-819c-941740fcc9ab.txt @@ -0,0 +1,2984 @@ +[2025-09-02 06:26:57] [Rank 0] PRINT: --- Script Start: Tue Sep 2 06:26:57 2025 --- +[2025-09-02 06:26:57] [Rank 0] PRINT: --- Script Start: Tue Sep 2 06:26:57 2025 --- +[2025-09-02 06:26:57] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=15, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 06:26:57] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=15, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 06:26:57] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 06:26:57] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 06:26:57] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-02 06:26:57] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-02 06:26:57] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_15_param_qkvo_seed_42 +[2025-09-02 06:26:57] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_15_param_qkvo_seed_42 +[2025-09-02 06:26:57] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 06:26:57] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 06:26:57] [Rank 0] PRINT: Constructing model... +[2025-09-02 06:26:57] [Rank 0] PRINT: Constructing model... +[2025-09-02 06:26:59] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 06:26:59] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 06:26:59] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 06:26:59] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 06:26:59] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 06:26:59] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 06:26:59] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 15 +[2025-09-02 06:26:59] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 15 +[2025-09-02 06:26:59] [Rank 0] PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 06:26:59] [Rank 0] PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 06:26:59] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 06:26:59] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 06:26:59] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 06:26:59] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 06:26:59] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 06:26:59] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 06:26:59] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 06:26:59] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 06:26:59] [Rank 0] PRINT: Starting warmup... +[2025-09-02 06:26:59] [Rank 0] PRINT: Starting warmup... +[2025-09-02 06:27:45] [Rank 0] PRINT: Warmup complete. +[2025-09-02 06:27:45] [Rank 0] PRINT: Warmup complete. +[2025-09-02 06:27:45] [Rank 0] PRINT: Starting training... +[2025-09-02 06:27:45] [Rank 0] PRINT: Starting training... +[2025-09-02 06:27:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:27:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:28:01] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9248,top10E=0.05,eRank=465.9,q75/q25=10.27 attn_vo:H=0.4623,top10E=0.02,eRank=232.8,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 06:28:01] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9248,top10E=0.05,eRank=465.9,q75/q25=10.27 attn_vo:H=0.4623,top10E=0.02,eRank=232.8,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 06:28:02] [Rank 0] step:21/10000 train_time:1317ms step_avg:62.72ms +[2025-09-02 06:28:02] [Rank 0] step:21/10000 train_time:1317ms step_avg:62.72ms +[2025-09-02 06:28:04] [Rank 0] step:41/10000 train_time:2720ms step_avg:66.34ms +[2025-09-02 06:28:04] [Rank 0] step:41/10000 train_time:2720ms step_avg:66.34ms +[2025-09-02 06:28:05] [Rank 0] step:61/10000 train_time:4127ms step_avg:67.66ms +[2025-09-02 06:28:05] [Rank 0] step:61/10000 train_time:4127ms step_avg:67.66ms +[2025-09-02 06:28:07] [Rank 0] step:81/10000 train_time:5536ms step_avg:68.35ms +[2025-09-02 06:28:07] [Rank 0] step:81/10000 train_time:5536ms step_avg:68.35ms +[2025-09-02 06:28:08] [Rank 0] step:101/10000 train_time:6946ms step_avg:68.78ms +[2025-09-02 06:28:08] [Rank 0] step:101/10000 train_time:6946ms step_avg:68.78ms +[2025-09-02 06:28:09] [Rank 0] step:121/10000 train_time:8355ms step_avg:69.05ms +[2025-09-02 06:28:09] [Rank 0] step:121/10000 train_time:8355ms step_avg:69.05ms +[2025-09-02 06:28:11] [Rank 0] step:141/10000 train_time:9768ms step_avg:69.28ms +[2025-09-02 06:28:11] [Rank 0] step:141/10000 train_time:9768ms step_avg:69.28ms +[2025-09-02 06:28:12] [Rank 0] step:161/10000 train_time:11180ms step_avg:69.44ms +[2025-09-02 06:28:12] [Rank 0] step:161/10000 train_time:11180ms step_avg:69.44ms +[2025-09-02 06:28:14] [Rank 0] step:181/10000 train_time:12592ms step_avg:69.57ms +[2025-09-02 06:28:14] [Rank 0] step:181/10000 train_time:12592ms step_avg:69.57ms +[2025-09-02 06:28:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:28:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:28:27] [Rank 0] PRINT: step:200/10000 val_loss:6.3833 svd_entropy: attn_qk:H=0.4343,top10E=0.81,eRank=37.7,q75/q25=12.02 attn_vo:H=0.5895,top10E=0.59,eRank=126.5,q75/q25=75.95 mlp_w1:H=0.3990,top10E=0.75,eRank=28.6,q75/q25=2.73 mlp_w2:H=0.1752,top10E=0.93,eRank=5.0,q75/q25=532.50 vo_prod:H=0.3063,top10E=0.94,eRank=9.0,q75/q25=489.72 train_time:14147ms step_avg:70.73ms +[2025-09-02 06:28:27] [Rank 0] PRINT: step:200/10000 val_loss:6.3833 svd_entropy: attn_qk:H=0.4343,top10E=0.81,eRank=37.7,q75/q25=12.02 attn_vo:H=0.5895,top10E=0.59,eRank=126.5,q75/q25=75.95 mlp_w1:H=0.3990,top10E=0.75,eRank=28.6,q75/q25=2.73 mlp_w2:H=0.1752,top10E=0.93,eRank=5.0,q75/q25=532.50 vo_prod:H=0.3063,top10E=0.94,eRank=9.0,q75/q25=489.72 train_time:14147ms step_avg:70.73ms +[2025-09-02 06:28:27] [Rank 0] step:201/10000 train_time:14159ms step_avg:70.44ms +[2025-09-02 06:28:27] [Rank 0] step:201/10000 train_time:14159ms step_avg:70.44ms +[2025-09-02 06:28:28] [Rank 0] step:221/10000 train_time:15445ms step_avg:69.89ms +[2025-09-02 06:28:28] [Rank 0] step:221/10000 train_time:15445ms step_avg:69.89ms +[2025-09-02 06:28:30] [Rank 0] step:241/10000 train_time:16856ms step_avg:69.94ms +[2025-09-02 06:28:30] [Rank 0] step:241/10000 train_time:16856ms step_avg:69.94ms +[2025-09-02 06:28:31] [Rank 0] step:261/10000 train_time:18268ms step_avg:69.99ms +[2025-09-02 06:28:31] [Rank 0] step:261/10000 train_time:18268ms step_avg:69.99ms +[2025-09-02 06:28:33] [Rank 0] step:281/10000 train_time:19680ms step_avg:70.04ms +[2025-09-02 06:28:33] [Rank 0] step:281/10000 train_time:19680ms step_avg:70.04ms +[2025-09-02 06:28:34] [Rank 0] step:301/10000 train_time:21093ms step_avg:70.08ms +[2025-09-02 06:28:34] [Rank 0] step:301/10000 train_time:21093ms step_avg:70.08ms +[2025-09-02 06:28:35] [Rank 0] step:321/10000 train_time:22506ms step_avg:70.11ms +[2025-09-02 06:28:35] [Rank 0] step:321/10000 train_time:22506ms step_avg:70.11ms +[2025-09-02 06:28:37] [Rank 0] step:341/10000 train_time:23919ms step_avg:70.14ms +[2025-09-02 06:28:37] [Rank 0] step:341/10000 train_time:23919ms step_avg:70.14ms +[2025-09-02 06:28:38] [Rank 0] step:361/10000 train_time:25332ms step_avg:70.17ms +[2025-09-02 06:28:38] [Rank 0] step:361/10000 train_time:25332ms step_avg:70.17ms +[2025-09-02 06:28:40] [Rank 0] step:381/10000 train_time:26747ms step_avg:70.20ms +[2025-09-02 06:28:40] [Rank 0] step:381/10000 train_time:26747ms step_avg:70.20ms +[2025-09-02 06:28:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:28:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:28:53] [Rank 0] PRINT: step:400/10000 val_loss:5.8883 svd_entropy: attn_qk:H=0.5028,top10E=0.69,eRank=46.4,q75/q25=13.39 attn_vo:H=0.6214,top10E=0.46,eRank=102.6,q75/q25=28.63 mlp_w1:H=0.4381,top10E=0.70,eRank=45.1,q75/q25=3.05 mlp_w2:H=0.5126,top10E=0.62,eRank=36.6,q75/q25=24.79 vo_prod:H=0.4573,top10E=0.73,eRank=22.3,q75/q25=213.45 train_time:28304ms step_avg:70.76ms +[2025-09-02 06:28:53] [Rank 0] PRINT: step:400/10000 val_loss:5.8883 svd_entropy: attn_qk:H=0.5028,top10E=0.69,eRank=46.4,q75/q25=13.39 attn_vo:H=0.6214,top10E=0.46,eRank=102.6,q75/q25=28.63 mlp_w1:H=0.4381,top10E=0.70,eRank=45.1,q75/q25=3.05 mlp_w2:H=0.5126,top10E=0.62,eRank=36.6,q75/q25=24.79 vo_prod:H=0.4573,top10E=0.73,eRank=22.3,q75/q25=213.45 train_time:28304ms step_avg:70.76ms +[2025-09-02 06:28:53] [Rank 0] step:401/10000 train_time:28316ms step_avg:70.61ms +[2025-09-02 06:28:53] [Rank 0] step:401/10000 train_time:28316ms step_avg:70.61ms +[2025-09-02 06:28:54] [Rank 0] step:421/10000 train_time:29608ms step_avg:70.33ms +[2025-09-02 06:28:54] [Rank 0] step:421/10000 train_time:29608ms step_avg:70.33ms +[2025-09-02 06:28:56] [Rank 0] step:441/10000 train_time:31022ms step_avg:70.34ms +[2025-09-02 06:28:56] [Rank 0] step:441/10000 train_time:31022ms step_avg:70.34ms +[2025-09-02 06:28:57] [Rank 0] step:461/10000 train_time:32435ms step_avg:70.36ms +[2025-09-02 06:28:57] [Rank 0] step:461/10000 train_time:32435ms step_avg:70.36ms +[2025-09-02 06:28:59] [Rank 0] step:481/10000 train_time:33850ms step_avg:70.38ms +[2025-09-02 06:28:59] [Rank 0] step:481/10000 train_time:33850ms step_avg:70.38ms +[2025-09-02 06:29:00] [Rank 0] step:501/10000 train_time:35298ms step_avg:70.46ms +[2025-09-02 06:29:00] [Rank 0] step:501/10000 train_time:35298ms step_avg:70.46ms +[2025-09-02 06:29:02] [Rank 0] step:521/10000 train_time:36714ms step_avg:70.47ms +[2025-09-02 06:29:02] [Rank 0] step:521/10000 train_time:36714ms step_avg:70.47ms +[2025-09-02 06:29:03] [Rank 0] step:541/10000 train_time:38130ms step_avg:70.48ms +[2025-09-02 06:29:03] [Rank 0] step:541/10000 train_time:38130ms step_avg:70.48ms +[2025-09-02 06:29:04] [Rank 0] step:561/10000 train_time:39546ms step_avg:70.49ms +[2025-09-02 06:29:04] [Rank 0] step:561/10000 train_time:39546ms step_avg:70.49ms +[2025-09-02 06:29:06] [Rank 0] step:581/10000 train_time:40962ms step_avg:70.50ms +[2025-09-02 06:29:06] [Rank 0] step:581/10000 train_time:40962ms step_avg:70.50ms +[2025-09-02 06:29:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:29:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:29:19] [Rank 0] PRINT: step:600/10000 val_loss:5.5975 svd_entropy: attn_qk:H=0.5455,top10E=0.60,eRank=54.4,q75/q25=15.17 attn_vo:H=0.6567,top10E=0.38,eRank=113.5,q75/q25=26.64 mlp_w1:H=0.4735,top10E=0.66,eRank=53.7,q75/q25=3.33 mlp_w2:H=0.6015,top10E=0.50,eRank=62.9,q75/q25=18.74 vo_prod:H=0.5255,top10E=0.59,eRank=34.6,q75/q25=289.78 train_time:42519ms step_avg:70.87ms +[2025-09-02 06:29:19] [Rank 0] PRINT: step:600/10000 val_loss:5.5975 svd_entropy: attn_qk:H=0.5455,top10E=0.60,eRank=54.4,q75/q25=15.17 attn_vo:H=0.6567,top10E=0.38,eRank=113.5,q75/q25=26.64 mlp_w1:H=0.4735,top10E=0.66,eRank=53.7,q75/q25=3.33 mlp_w2:H=0.6015,top10E=0.50,eRank=62.9,q75/q25=18.74 vo_prod:H=0.5255,top10E=0.59,eRank=34.6,q75/q25=289.78 train_time:42519ms step_avg:70.87ms +[2025-09-02 06:29:19] [Rank 0] step:601/10000 train_time:42532ms step_avg:70.77ms +[2025-09-02 06:29:19] [Rank 0] step:601/10000 train_time:42532ms step_avg:70.77ms +[2025-09-02 06:29:20] [Rank 0] step:621/10000 train_time:43820ms step_avg:70.56ms +[2025-09-02 06:29:20] [Rank 0] step:621/10000 train_time:43820ms step_avg:70.56ms +[2025-09-02 06:29:21] [Rank 0] step:641/10000 train_time:45234ms step_avg:70.57ms +[2025-09-02 06:29:21] [Rank 0] step:641/10000 train_time:45234ms step_avg:70.57ms +[2025-09-02 06:29:23] [Rank 0] step:661/10000 train_time:46648ms step_avg:70.57ms +[2025-09-02 06:29:23] [Rank 0] step:661/10000 train_time:46648ms step_avg:70.57ms +[2025-09-02 06:29:24] [Rank 0] step:681/10000 train_time:48062ms step_avg:70.58ms +[2025-09-02 06:29:24] [Rank 0] step:681/10000 train_time:48062ms step_avg:70.58ms +[2025-09-02 06:29:26] [Rank 0] step:701/10000 train_time:49478ms step_avg:70.58ms +[2025-09-02 06:29:26] [Rank 0] step:701/10000 train_time:49478ms step_avg:70.58ms +[2025-09-02 06:29:27] [Rank 0] step:721/10000 train_time:50892ms step_avg:70.59ms +[2025-09-02 06:29:27] [Rank 0] step:721/10000 train_time:50892ms step_avg:70.59ms +[2025-09-02 06:29:29] [Rank 0] step:741/10000 train_time:52308ms step_avg:70.59ms +[2025-09-02 06:29:29] [Rank 0] step:741/10000 train_time:52308ms step_avg:70.59ms +[2025-09-02 06:29:30] [Rank 0] step:761/10000 train_time:53737ms step_avg:70.61ms +[2025-09-02 06:29:30] [Rank 0] step:761/10000 train_time:53737ms step_avg:70.61ms +[2025-09-02 06:29:31] [Rank 0] step:781/10000 train_time:55165ms step_avg:70.63ms +[2025-09-02 06:29:31] [Rank 0] step:781/10000 train_time:55165ms step_avg:70.63ms +[2025-09-02 06:29:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:29:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:29:44] [Rank 0] PRINT: step:800/10000 val_loss:5.3637 svd_entropy: attn_qk:H=0.5747,top10E=0.55,eRank=61.2,q75/q25=17.72 attn_vo:H=0.6833,top10E=0.33,eRank=125.9,q75/q25=33.63 mlp_w1:H=0.5042,top10E=0.62,eRank=60.2,q75/q25=3.60 mlp_w2:H=0.6516,top10E=0.42,eRank=85.0,q75/q25=18.16 vo_prod:H=0.5640,top10E=0.52,eRank=44.4,q75/q25=649.69 train_time:56737ms step_avg:70.92ms +[2025-09-02 06:29:44] [Rank 0] PRINT: step:800/10000 val_loss:5.3637 svd_entropy: attn_qk:H=0.5747,top10E=0.55,eRank=61.2,q75/q25=17.72 attn_vo:H=0.6833,top10E=0.33,eRank=125.9,q75/q25=33.63 mlp_w1:H=0.5042,top10E=0.62,eRank=60.2,q75/q25=3.60 mlp_w2:H=0.6516,top10E=0.42,eRank=85.0,q75/q25=18.16 vo_prod:H=0.5640,top10E=0.52,eRank=44.4,q75/q25=649.69 train_time:56737ms step_avg:70.92ms +[2025-09-02 06:29:45] [Rank 0] step:801/10000 train_time:56749ms step_avg:70.85ms +[2025-09-02 06:29:45] [Rank 0] step:801/10000 train_time:56749ms step_avg:70.85ms +[2025-09-02 06:29:46] [Rank 0] step:821/10000 train_time:58044ms step_avg:70.70ms +[2025-09-02 06:29:46] [Rank 0] step:821/10000 train_time:58044ms step_avg:70.70ms +[2025-09-02 06:29:47] [Rank 0] step:841/10000 train_time:59470ms step_avg:70.71ms +[2025-09-02 06:29:47] [Rank 0] step:841/10000 train_time:59470ms step_avg:70.71ms +[2025-09-02 06:29:49] [Rank 0] step:861/10000 train_time:60898ms step_avg:70.73ms +[2025-09-02 06:29:49] [Rank 0] step:861/10000 train_time:60898ms step_avg:70.73ms +[2025-09-02 06:29:50] [Rank 0] step:881/10000 train_time:62324ms step_avg:70.74ms +[2025-09-02 06:29:50] [Rank 0] step:881/10000 train_time:62324ms step_avg:70.74ms +[2025-09-02 06:29:52] [Rank 0] step:901/10000 train_time:63752ms step_avg:70.76ms +[2025-09-02 06:29:52] [Rank 0] step:901/10000 train_time:63752ms step_avg:70.76ms +[2025-09-02 06:29:53] [Rank 0] step:921/10000 train_time:65180ms step_avg:70.77ms +[2025-09-02 06:29:53] [Rank 0] step:921/10000 train_time:65180ms step_avg:70.77ms +[2025-09-02 06:29:55] [Rank 0] step:941/10000 train_time:66607ms step_avg:70.78ms +[2025-09-02 06:29:55] [Rank 0] step:941/10000 train_time:66607ms step_avg:70.78ms +[2025-09-02 06:29:56] [Rank 0] step:961/10000 train_time:68034ms step_avg:70.80ms +[2025-09-02 06:29:56] [Rank 0] step:961/10000 train_time:68034ms step_avg:70.80ms +[2025-09-02 06:29:57] [Rank 0] step:981/10000 train_time:69462ms step_avg:70.81ms +[2025-09-02 06:29:57] [Rank 0] step:981/10000 train_time:69462ms step_avg:70.81ms +[2025-09-02 06:29:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:29:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:30:10] [Rank 0] PRINT: step:1000/10000 val_loss:5.1878 svd_entropy: attn_qk:H=0.5978,top10E=0.50,eRank=67.6,q75/q25=21.00 attn_vo:H=0.7047,top10E=0.30,eRank=138.6,q75/q25=45.19 mlp_w1:H=0.5317,top10E=0.59,eRank=66.4,q75/q25=3.92 mlp_w2:H=0.6930,top10E=0.36,eRank=108.0,q75/q25=18.12 vo_prod:H=0.5901,top10E=0.47,eRank=52.7,q75/q25=1429.82 train_time:71034ms step_avg:71.03ms +[2025-09-02 06:30:10] [Rank 0] PRINT: step:1000/10000 val_loss:5.1878 svd_entropy: attn_qk:H=0.5978,top10E=0.50,eRank=67.6,q75/q25=21.00 attn_vo:H=0.7047,top10E=0.30,eRank=138.6,q75/q25=45.19 mlp_w1:H=0.5317,top10E=0.59,eRank=66.4,q75/q25=3.92 mlp_w2:H=0.6930,top10E=0.36,eRank=108.0,q75/q25=18.12 vo_prod:H=0.5901,top10E=0.47,eRank=52.7,q75/q25=1429.82 train_time:71034ms step_avg:71.03ms +[2025-09-02 06:30:10] [Rank 0] step:1001/10000 train_time:71046ms step_avg:70.97ms +[2025-09-02 06:30:10] [Rank 0] step:1001/10000 train_time:71046ms step_avg:70.97ms +[2025-09-02 06:30:12] [Rank 0] step:1021/10000 train_time:72353ms step_avg:70.86ms +[2025-09-02 06:30:12] [Rank 0] step:1021/10000 train_time:72353ms step_avg:70.86ms +[2025-09-02 06:30:13] [Rank 0] step:1041/10000 train_time:73781ms step_avg:70.88ms +[2025-09-02 06:30:13] [Rank 0] step:1041/10000 train_time:73781ms step_avg:70.88ms +[2025-09-02 06:30:15] [Rank 0] step:1061/10000 train_time:75211ms step_avg:70.89ms +[2025-09-02 06:30:15] [Rank 0] step:1061/10000 train_time:75211ms step_avg:70.89ms +[2025-09-02 06:30:16] [Rank 0] step:1081/10000 train_time:76638ms step_avg:70.90ms +[2025-09-02 06:30:16] [Rank 0] step:1081/10000 train_time:76638ms step_avg:70.90ms +[2025-09-02 06:30:18] [Rank 0] step:1101/10000 train_time:78066ms step_avg:70.90ms +[2025-09-02 06:30:18] [Rank 0] step:1101/10000 train_time:78066ms step_avg:70.90ms +[2025-09-02 06:30:19] [Rank 0] step:1121/10000 train_time:79493ms step_avg:70.91ms +[2025-09-02 06:30:19] [Rank 0] step:1121/10000 train_time:79493ms step_avg:70.91ms +[2025-09-02 06:30:20] [Rank 0] step:1141/10000 train_time:80922ms step_avg:70.92ms +[2025-09-02 06:30:20] [Rank 0] step:1141/10000 train_time:80922ms step_avg:70.92ms +[2025-09-02 06:30:22] [Rank 0] step:1161/10000 train_time:82350ms step_avg:70.93ms +[2025-09-02 06:30:22] [Rank 0] step:1161/10000 train_time:82350ms step_avg:70.93ms +[2025-09-02 06:30:23] [Rank 0] step:1181/10000 train_time:83779ms step_avg:70.94ms +[2025-09-02 06:30:23] [Rank 0] step:1181/10000 train_time:83779ms step_avg:70.94ms +[2025-09-02 06:30:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:30:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:30:36] [Rank 0] PRINT: step:1200/10000 val_loss:5.0350 svd_entropy: attn_qk:H=0.6163,top10E=0.47,eRank=73.9,q75/q25=25.50 attn_vo:H=0.7224,top10E=0.27,eRank=150.9,q75/q25=58.30 mlp_w1:H=0.5519,top10E=0.56,eRank=71.9,q75/q25=4.24 mlp_w2:H=0.7154,top10E=0.33,eRank=124.8,q75/q25=20.64 vo_prod:H=0.6108,top10E=0.43,eRank=60.3,q75/q25=2692.35 train_time:85351ms step_avg:71.13ms +[2025-09-02 06:30:36] [Rank 0] PRINT: step:1200/10000 val_loss:5.0350 svd_entropy: attn_qk:H=0.6163,top10E=0.47,eRank=73.9,q75/q25=25.50 attn_vo:H=0.7224,top10E=0.27,eRank=150.9,q75/q25=58.30 mlp_w1:H=0.5519,top10E=0.56,eRank=71.9,q75/q25=4.24 mlp_w2:H=0.7154,top10E=0.33,eRank=124.8,q75/q25=20.64 vo_prod:H=0.6108,top10E=0.43,eRank=60.3,q75/q25=2692.35 train_time:85351ms step_avg:71.13ms +[2025-09-02 06:30:36] [Rank 0] step:1201/10000 train_time:85363ms step_avg:71.08ms +[2025-09-02 06:30:36] [Rank 0] step:1201/10000 train_time:85363ms step_avg:71.08ms +[2025-09-02 06:30:38] [Rank 0] step:1221/10000 train_time:86656ms step_avg:70.97ms +[2025-09-02 06:30:38] [Rank 0] step:1221/10000 train_time:86656ms step_avg:70.97ms +[2025-09-02 06:30:39] [Rank 0] step:1241/10000 train_time:88084ms step_avg:70.98ms +[2025-09-02 06:30:39] [Rank 0] step:1241/10000 train_time:88084ms step_avg:70.98ms +[2025-09-02 06:30:41] [Rank 0] step:1261/10000 train_time:89512ms step_avg:70.99ms +[2025-09-02 06:30:41] [Rank 0] step:1261/10000 train_time:89512ms step_avg:70.99ms +[2025-09-02 06:30:42] [Rank 0] step:1281/10000 train_time:90942ms step_avg:70.99ms +[2025-09-02 06:30:42] [Rank 0] step:1281/10000 train_time:90942ms step_avg:70.99ms +[2025-09-02 06:30:44] [Rank 0] step:1301/10000 train_time:92371ms step_avg:71.00ms +[2025-09-02 06:30:44] [Rank 0] step:1301/10000 train_time:92371ms step_avg:71.00ms +[2025-09-02 06:30:45] [Rank 0] step:1321/10000 train_time:93801ms step_avg:71.01ms +[2025-09-02 06:30:45] [Rank 0] step:1321/10000 train_time:93801ms step_avg:71.01ms +[2025-09-02 06:30:46] [Rank 0] step:1341/10000 train_time:95231ms step_avg:71.02ms +[2025-09-02 06:30:46] [Rank 0] step:1341/10000 train_time:95231ms step_avg:71.02ms +[2025-09-02 06:30:48] [Rank 0] step:1361/10000 train_time:96662ms step_avg:71.02ms +[2025-09-02 06:30:48] [Rank 0] step:1361/10000 train_time:96662ms step_avg:71.02ms +[2025-09-02 06:30:49] [Rank 0] step:1381/10000 train_time:98092ms step_avg:71.03ms +[2025-09-02 06:30:49] [Rank 0] step:1381/10000 train_time:98092ms step_avg:71.03ms +[2025-09-02 06:30:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:30:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:31:02] [Rank 0] PRINT: step:1400/10000 val_loss:4.9256 svd_entropy: attn_qk:H=0.6313,top10E=0.44,eRank=79.6,q75/q25=31.12 attn_vo:H=0.7373,top10E=0.25,eRank=162.6,q75/q25=70.11 mlp_w1:H=0.5703,top10E=0.54,eRank=77.2,q75/q25=4.61 mlp_w2:H=0.7332,top10E=0.30,eRank=139.8,q75/q25=22.62 vo_prod:H=0.6285,top10E=0.40,eRank=67.8,q75/q25=4390.77 train_time:99665ms step_avg:71.19ms +[2025-09-02 06:31:02] [Rank 0] PRINT: step:1400/10000 val_loss:4.9256 svd_entropy: attn_qk:H=0.6313,top10E=0.44,eRank=79.6,q75/q25=31.12 attn_vo:H=0.7373,top10E=0.25,eRank=162.6,q75/q25=70.11 mlp_w1:H=0.5703,top10E=0.54,eRank=77.2,q75/q25=4.61 mlp_w2:H=0.7332,top10E=0.30,eRank=139.8,q75/q25=22.62 vo_prod:H=0.6285,top10E=0.40,eRank=67.8,q75/q25=4390.77 train_time:99665ms step_avg:71.19ms +[2025-09-02 06:31:02] [Rank 0] step:1401/10000 train_time:99677ms step_avg:71.15ms +[2025-09-02 06:31:02] [Rank 0] step:1401/10000 train_time:99677ms step_avg:71.15ms +[2025-09-02 06:31:04] [Rank 0] step:1421/10000 train_time:100984ms step_avg:71.07ms +[2025-09-02 06:31:04] [Rank 0] step:1421/10000 train_time:100984ms step_avg:71.07ms +[2025-09-02 06:31:05] [Rank 0] step:1441/10000 train_time:102412ms step_avg:71.07ms +[2025-09-02 06:31:05] [Rank 0] step:1441/10000 train_time:102412ms step_avg:71.07ms +[2025-09-02 06:31:07] [Rank 0] step:1461/10000 train_time:103841ms step_avg:71.08ms +[2025-09-02 06:31:07] [Rank 0] step:1461/10000 train_time:103841ms step_avg:71.08ms +[2025-09-02 06:31:08] [Rank 0] step:1481/10000 train_time:105269ms step_avg:71.08ms +[2025-09-02 06:31:08] [Rank 0] step:1481/10000 train_time:105269ms step_avg:71.08ms +[2025-09-02 06:31:10] [Rank 0] step:1501/10000 train_time:106706ms step_avg:71.09ms +[2025-09-02 06:31:10] [Rank 0] step:1501/10000 train_time:106706ms step_avg:71.09ms +[2025-09-02 06:31:11] [Rank 0] step:1521/10000 train_time:108147ms step_avg:71.10ms +[2025-09-02 06:31:11] [Rank 0] step:1521/10000 train_time:108147ms step_avg:71.10ms +[2025-09-02 06:31:12] [Rank 0] step:1541/10000 train_time:109587ms step_avg:71.11ms +[2025-09-02 06:31:12] [Rank 0] step:1541/10000 train_time:109587ms step_avg:71.11ms +[2025-09-02 06:31:14] [Rank 0] step:1561/10000 train_time:111026ms step_avg:71.13ms +[2025-09-02 06:31:14] [Rank 0] step:1561/10000 train_time:111026ms step_avg:71.13ms +[2025-09-02 06:31:15] [Rank 0] step:1581/10000 train_time:112467ms step_avg:71.14ms +[2025-09-02 06:31:15] [Rank 0] step:1581/10000 train_time:112467ms step_avg:71.14ms +[2025-09-02 06:31:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:31:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:31:28] [Rank 0] PRINT: step:1600/10000 val_loss:4.8030 svd_entropy: attn_qk:H=0.6440,top10E=0.42,eRank=84.6,q75/q25=37.90 attn_vo:H=0.7495,top10E=0.24,eRank=173.4,q75/q25=79.21 mlp_w1:H=0.5857,top10E=0.52,eRank=82.1,q75/q25=4.98 mlp_w2:H=0.7461,top10E=0.28,eRank=151.8,q75/q25=24.42 vo_prod:H=0.6428,top10E=0.37,eRank=74.5,q75/q25=6255.01 train_time:114052ms step_avg:71.28ms +[2025-09-02 06:31:28] [Rank 0] PRINT: step:1600/10000 val_loss:4.8030 svd_entropy: attn_qk:H=0.6440,top10E=0.42,eRank=84.6,q75/q25=37.90 attn_vo:H=0.7495,top10E=0.24,eRank=173.4,q75/q25=79.21 mlp_w1:H=0.5857,top10E=0.52,eRank=82.1,q75/q25=4.98 mlp_w2:H=0.7461,top10E=0.28,eRank=151.8,q75/q25=24.42 vo_prod:H=0.6428,top10E=0.37,eRank=74.5,q75/q25=6255.01 train_time:114052ms step_avg:71.28ms +[2025-09-02 06:31:28] [Rank 0] step:1601/10000 train_time:114064ms step_avg:71.25ms +[2025-09-02 06:31:28] [Rank 0] step:1601/10000 train_time:114064ms step_avg:71.25ms +[2025-09-02 06:31:30] [Rank 0] step:1621/10000 train_time:115387ms step_avg:71.18ms +[2025-09-02 06:31:30] [Rank 0] step:1621/10000 train_time:115387ms step_avg:71.18ms +[2025-09-02 06:31:31] [Rank 0] step:1641/10000 train_time:116826ms step_avg:71.19ms +[2025-09-02 06:31:31] [Rank 0] step:1641/10000 train_time:116826ms step_avg:71.19ms +[2025-09-02 06:31:33] [Rank 0] step:1661/10000 train_time:118264ms step_avg:71.20ms +[2025-09-02 06:31:33] [Rank 0] step:1661/10000 train_time:118264ms step_avg:71.20ms +[2025-09-02 06:31:34] [Rank 0] step:1681/10000 train_time:119702ms step_avg:71.21ms +[2025-09-02 06:31:34] [Rank 0] step:1681/10000 train_time:119702ms step_avg:71.21ms +[2025-09-02 06:31:36] [Rank 0] step:1701/10000 train_time:121140ms step_avg:71.22ms +[2025-09-02 06:31:36] [Rank 0] step:1701/10000 train_time:121140ms step_avg:71.22ms +[2025-09-02 06:31:37] [Rank 0] step:1721/10000 train_time:122580ms step_avg:71.23ms +[2025-09-02 06:31:37] [Rank 0] step:1721/10000 train_time:122580ms step_avg:71.23ms +[2025-09-02 06:31:38] [Rank 0] step:1741/10000 train_time:124025ms step_avg:71.24ms +[2025-09-02 06:31:38] [Rank 0] step:1741/10000 train_time:124025ms step_avg:71.24ms +[2025-09-02 06:31:40] [Rank 0] step:1761/10000 train_time:125465ms step_avg:71.25ms +[2025-09-02 06:31:40] [Rank 0] step:1761/10000 train_time:125465ms step_avg:71.25ms +[2025-09-02 06:31:41] [Rank 0] step:1781/10000 train_time:126904ms step_avg:71.25ms +[2025-09-02 06:31:41] [Rank 0] step:1781/10000 train_time:126904ms step_avg:71.25ms +[2025-09-02 06:31:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:31:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:31:54] [Rank 0] PRINT: step:1800/10000 val_loss:4.7134 svd_entropy: attn_qk:H=0.6549,top10E=0.40,eRank=89.4,q75/q25=45.23 attn_vo:H=0.7597,top10E=0.22,eRank=183.2,q75/q25=85.96 mlp_w1:H=0.5979,top10E=0.51,eRank=86.2,q75/q25=5.36 mlp_w2:H=0.7560,top10E=0.27,eRank=161.7,q75/q25=25.10 vo_prod:H=0.6549,top10E=0.35,eRank=80.7,q75/q25=7858.37 train_time:128489ms step_avg:71.38ms +[2025-09-02 06:31:54] [Rank 0] PRINT: step:1800/10000 val_loss:4.7134 svd_entropy: attn_qk:H=0.6549,top10E=0.40,eRank=89.4,q75/q25=45.23 attn_vo:H=0.7597,top10E=0.22,eRank=183.2,q75/q25=85.96 mlp_w1:H=0.5979,top10E=0.51,eRank=86.2,q75/q25=5.36 mlp_w2:H=0.7560,top10E=0.27,eRank=161.7,q75/q25=25.10 vo_prod:H=0.6549,top10E=0.35,eRank=80.7,q75/q25=7858.37 train_time:128489ms step_avg:71.38ms +[2025-09-02 06:31:54] [Rank 0] step:1801/10000 train_time:128501ms step_avg:71.35ms +[2025-09-02 06:31:54] [Rank 0] step:1801/10000 train_time:128501ms step_avg:71.35ms +[2025-09-02 06:31:56] [Rank 0] step:1821/10000 train_time:129819ms step_avg:71.29ms +[2025-09-02 06:31:56] [Rank 0] step:1821/10000 train_time:129819ms step_avg:71.29ms +[2025-09-02 06:31:57] [Rank 0] step:1841/10000 train_time:131255ms step_avg:71.30ms +[2025-09-02 06:31:57] [Rank 0] step:1841/10000 train_time:131255ms step_avg:71.30ms +[2025-09-02 06:31:59] [Rank 0] step:1861/10000 train_time:132694ms step_avg:71.30ms +[2025-09-02 06:31:59] [Rank 0] step:1861/10000 train_time:132694ms step_avg:71.30ms +[2025-09-02 06:32:00] [Rank 0] step:1881/10000 train_time:134133ms step_avg:71.31ms +[2025-09-02 06:32:00] [Rank 0] step:1881/10000 train_time:134133ms step_avg:71.31ms +[2025-09-02 06:32:02] [Rank 0] step:1901/10000 train_time:135573ms step_avg:71.32ms +[2025-09-02 06:32:02] [Rank 0] step:1901/10000 train_time:135573ms step_avg:71.32ms +[2025-09-02 06:32:03] [Rank 0] step:1921/10000 train_time:137016ms step_avg:71.33ms +[2025-09-02 06:32:03] [Rank 0] step:1921/10000 train_time:137016ms step_avg:71.33ms +[2025-09-02 06:32:04] [Rank 0] step:1941/10000 train_time:138456ms step_avg:71.33ms +[2025-09-02 06:32:04] [Rank 0] step:1941/10000 train_time:138456ms step_avg:71.33ms +[2025-09-02 06:32:06] [Rank 0] step:1961/10000 train_time:139896ms step_avg:71.34ms +[2025-09-02 06:32:06] [Rank 0] step:1961/10000 train_time:139896ms step_avg:71.34ms +[2025-09-02 06:32:07] [Rank 0] step:1981/10000 train_time:141336ms step_avg:71.35ms +[2025-09-02 06:32:07] [Rank 0] step:1981/10000 train_time:141336ms step_avg:71.35ms +[2025-09-02 06:32:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:32:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:32:20] [Rank 0] PRINT: step:2000/10000 val_loss:4.6552 svd_entropy: attn_qk:H=0.6644,top10E=0.38,eRank=93.9,q75/q25=52.67 attn_vo:H=0.7685,top10E=0.21,eRank=192.2,q75/q25=90.19 mlp_w1:H=0.6095,top10E=0.49,eRank=90.4,q75/q25=5.79 mlp_w2:H=0.7649,top10E=0.25,eRank=171.6,q75/q25=26.90 vo_prod:H=0.6658,top10E=0.34,eRank=86.8,q75/q25=9374.42 train_time:142920ms step_avg:71.46ms +[2025-09-02 06:32:20] [Rank 0] PRINT: step:2000/10000 val_loss:4.6552 svd_entropy: attn_qk:H=0.6644,top10E=0.38,eRank=93.9,q75/q25=52.67 attn_vo:H=0.7685,top10E=0.21,eRank=192.2,q75/q25=90.19 mlp_w1:H=0.6095,top10E=0.49,eRank=90.4,q75/q25=5.79 mlp_w2:H=0.7649,top10E=0.25,eRank=171.6,q75/q25=26.90 vo_prod:H=0.6658,top10E=0.34,eRank=86.8,q75/q25=9374.42 train_time:142920ms step_avg:71.46ms +[2025-09-02 06:32:20] [Rank 0] step:2001/10000 train_time:142932ms step_avg:71.43ms +[2025-09-02 06:32:20] [Rank 0] step:2001/10000 train_time:142932ms step_avg:71.43ms +[2025-09-02 06:32:22] [Rank 0] step:2021/10000 train_time:144253ms step_avg:71.38ms +[2025-09-02 06:32:22] [Rank 0] step:2021/10000 train_time:144253ms step_avg:71.38ms +[2025-09-02 06:32:23] [Rank 0] step:2041/10000 train_time:145806ms step_avg:71.44ms +[2025-09-02 06:32:23] [Rank 0] step:2041/10000 train_time:145806ms step_avg:71.44ms +[2025-09-02 06:32:25] [Rank 0] step:2061/10000 train_time:147245ms step_avg:71.44ms +[2025-09-02 06:32:25] [Rank 0] step:2061/10000 train_time:147245ms step_avg:71.44ms +[2025-09-02 06:32:26] [Rank 0] step:2081/10000 train_time:148687ms step_avg:71.45ms +[2025-09-02 06:32:26] [Rank 0] step:2081/10000 train_time:148687ms step_avg:71.45ms +[2025-09-02 06:32:28] [Rank 0] step:2101/10000 train_time:150127ms step_avg:71.46ms +[2025-09-02 06:32:28] [Rank 0] step:2101/10000 train_time:150127ms step_avg:71.46ms +[2025-09-02 06:32:29] [Rank 0] step:2121/10000 train_time:151567ms step_avg:71.46ms +[2025-09-02 06:32:29] [Rank 0] step:2121/10000 train_time:151567ms step_avg:71.46ms +[2025-09-02 06:32:31] [Rank 0] step:2141/10000 train_time:153008ms step_avg:71.47ms +[2025-09-02 06:32:31] [Rank 0] step:2141/10000 train_time:153008ms step_avg:71.47ms +[2025-09-02 06:32:32] [Rank 0] step:2161/10000 train_time:154447ms step_avg:71.47ms +[2025-09-02 06:32:32] [Rank 0] step:2161/10000 train_time:154447ms step_avg:71.47ms +[2025-09-02 06:32:34] [Rank 0] step:2181/10000 train_time:155888ms step_avg:71.48ms +[2025-09-02 06:32:34] [Rank 0] step:2181/10000 train_time:155888ms step_avg:71.48ms +[2025-09-02 06:32:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:32:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:32:47] [Rank 0] PRINT: step:2200/10000 val_loss:4.5886 svd_entropy: attn_qk:H=0.6725,top10E=0.37,eRank=98.0,q75/q25=59.96 attn_vo:H=0.7758,top10E=0.21,eRank=200.0,q75/q25=92.20 mlp_w1:H=0.6207,top10E=0.48,eRank=94.7,q75/q25=6.15 mlp_w2:H=0.7721,top10E=0.24,eRank=179.8,q75/q25=28.24 vo_prod:H=0.6747,top10E=0.32,eRank=92.2,q75/q25=9915.98 train_time:157473ms step_avg:71.58ms +[2025-09-02 06:32:47] [Rank 0] PRINT: step:2200/10000 val_loss:4.5886 svd_entropy: attn_qk:H=0.6725,top10E=0.37,eRank=98.0,q75/q25=59.96 attn_vo:H=0.7758,top10E=0.21,eRank=200.0,q75/q25=92.20 mlp_w1:H=0.6207,top10E=0.48,eRank=94.7,q75/q25=6.15 mlp_w2:H=0.7721,top10E=0.24,eRank=179.8,q75/q25=28.24 vo_prod:H=0.6747,top10E=0.32,eRank=92.2,q75/q25=9915.98 train_time:157473ms step_avg:71.58ms +[2025-09-02 06:32:47] [Rank 0] step:2201/10000 train_time:157486ms step_avg:71.55ms +[2025-09-02 06:32:47] [Rank 0] step:2201/10000 train_time:157486ms step_avg:71.55ms +[2025-09-02 06:32:48] [Rank 0] step:2221/10000 train_time:158781ms step_avg:71.49ms +[2025-09-02 06:32:48] [Rank 0] step:2221/10000 train_time:158781ms step_avg:71.49ms +[2025-09-02 06:32:50] [Rank 0] step:2241/10000 train_time:160252ms step_avg:71.51ms +[2025-09-02 06:32:50] [Rank 0] step:2241/10000 train_time:160252ms step_avg:71.51ms +[2025-09-02 06:32:51] [Rank 0] step:2261/10000 train_time:161735ms step_avg:71.53ms +[2025-09-02 06:32:51] [Rank 0] step:2261/10000 train_time:161735ms step_avg:71.53ms +[2025-09-02 06:32:53] [Rank 0] step:2281/10000 train_time:163218ms step_avg:71.56ms +[2025-09-02 06:32:53] [Rank 0] step:2281/10000 train_time:163218ms step_avg:71.56ms +[2025-09-02 06:32:54] [Rank 0] step:2301/10000 train_time:164702ms step_avg:71.58ms +[2025-09-02 06:32:54] [Rank 0] step:2301/10000 train_time:164702ms step_avg:71.58ms +[2025-09-02 06:32:55] [Rank 0] step:2321/10000 train_time:166186ms step_avg:71.60ms +[2025-09-02 06:32:55] [Rank 0] step:2321/10000 train_time:166186ms step_avg:71.60ms +[2025-09-02 06:32:57] [Rank 0] step:2341/10000 train_time:167668ms step_avg:71.62ms +[2025-09-02 06:32:57] [Rank 0] step:2341/10000 train_time:167668ms step_avg:71.62ms +[2025-09-02 06:32:58] [Rank 0] step:2361/10000 train_time:169152ms step_avg:71.64ms +[2025-09-02 06:32:58] [Rank 0] step:2361/10000 train_time:169152ms step_avg:71.64ms +[2025-09-02 06:33:00] [Rank 0] step:2381/10000 train_time:170638ms step_avg:71.67ms +[2025-09-02 06:33:00] [Rank 0] step:2381/10000 train_time:170638ms step_avg:71.67ms +[2025-09-02 06:33:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:33:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:33:13] [Rank 0] PRINT: step:2400/10000 val_loss:4.5192 svd_entropy: attn_qk:H=0.6793,top10E=0.36,eRank=101.6,q75/q25=66.69 attn_vo:H=0.7824,top10E=0.20,eRank=207.6,q75/q25=93.91 mlp_w1:H=0.6301,top10E=0.47,eRank=98.6,q75/q25=6.50 mlp_w2:H=0.7780,top10E=0.23,eRank=187.2,q75/q25=29.84 vo_prod:H=0.6829,top10E=0.31,eRank=97.4,q75/q25=10423.35 train_time:172271ms step_avg:71.78ms +[2025-09-02 06:33:13] [Rank 0] PRINT: step:2400/10000 val_loss:4.5192 svd_entropy: attn_qk:H=0.6793,top10E=0.36,eRank=101.6,q75/q25=66.69 attn_vo:H=0.7824,top10E=0.20,eRank=207.6,q75/q25=93.91 mlp_w1:H=0.6301,top10E=0.47,eRank=98.6,q75/q25=6.50 mlp_w2:H=0.7780,top10E=0.23,eRank=187.2,q75/q25=29.84 vo_prod:H=0.6829,top10E=0.31,eRank=97.4,q75/q25=10423.35 train_time:172271ms step_avg:71.78ms +[2025-09-02 06:33:13] [Rank 0] step:2401/10000 train_time:172284ms step_avg:71.75ms +[2025-09-02 06:33:13] [Rank 0] step:2401/10000 train_time:172284ms step_avg:71.75ms +[2025-09-02 06:33:15] [Rank 0] step:2421/10000 train_time:173640ms step_avg:71.72ms +[2025-09-02 06:33:15] [Rank 0] step:2421/10000 train_time:173640ms step_avg:71.72ms +[2025-09-02 06:33:16] [Rank 0] step:2441/10000 train_time:175121ms step_avg:71.74ms +[2025-09-02 06:33:16] [Rank 0] step:2441/10000 train_time:175121ms step_avg:71.74ms +[2025-09-02 06:33:18] [Rank 0] step:2461/10000 train_time:176603ms step_avg:71.76ms +[2025-09-02 06:33:18] [Rank 0] step:2461/10000 train_time:176603ms step_avg:71.76ms +[2025-09-02 06:33:19] [Rank 0] step:2481/10000 train_time:178086ms step_avg:71.78ms +[2025-09-02 06:33:19] [Rank 0] step:2481/10000 train_time:178086ms step_avg:71.78ms +[2025-09-02 06:33:21] [Rank 0] step:2501/10000 train_time:179569ms step_avg:71.80ms +[2025-09-02 06:33:21] [Rank 0] step:2501/10000 train_time:179569ms step_avg:71.80ms +[2025-09-02 06:33:22] [Rank 0] step:2521/10000 train_time:181051ms step_avg:71.82ms +[2025-09-02 06:33:22] [Rank 0] step:2521/10000 train_time:181051ms step_avg:71.82ms +[2025-09-02 06:33:24] [Rank 0] step:2541/10000 train_time:182537ms step_avg:71.84ms +[2025-09-02 06:33:24] [Rank 0] step:2541/10000 train_time:182537ms step_avg:71.84ms +[2025-09-02 06:33:25] [Rank 0] step:2561/10000 train_time:184021ms step_avg:71.86ms +[2025-09-02 06:33:25] [Rank 0] step:2561/10000 train_time:184021ms step_avg:71.86ms +[2025-09-02 06:33:27] [Rank 0] step:2581/10000 train_time:185505ms step_avg:71.87ms +[2025-09-02 06:33:27] [Rank 0] step:2581/10000 train_time:185505ms step_avg:71.87ms +[2025-09-02 06:33:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:33:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:33:40] [Rank 0] PRINT: step:2600/10000 val_loss:4.4671 svd_entropy: attn_qk:H=0.6857,top10E=0.35,eRank=105.2,q75/q25=72.99 attn_vo:H=0.7883,top10E=0.19,eRank=214.6,q75/q25=94.03 mlp_w1:H=0.6385,top10E=0.46,eRank=102.2,q75/q25=6.86 mlp_w2:H=0.7831,top10E=0.22,eRank=193.9,q75/q25=31.17 vo_prod:H=0.6903,top10E=0.30,eRank=102.4,q75/q25=10611.92 train_time:187138ms step_avg:71.98ms +[2025-09-02 06:33:40] [Rank 0] PRINT: step:2600/10000 val_loss:4.4671 svd_entropy: attn_qk:H=0.6857,top10E=0.35,eRank=105.2,q75/q25=72.99 attn_vo:H=0.7883,top10E=0.19,eRank=214.6,q75/q25=94.03 mlp_w1:H=0.6385,top10E=0.46,eRank=102.2,q75/q25=6.86 mlp_w2:H=0.7831,top10E=0.22,eRank=193.9,q75/q25=31.17 vo_prod:H=0.6903,top10E=0.30,eRank=102.4,q75/q25=10611.92 train_time:187138ms step_avg:71.98ms +[2025-09-02 06:33:40] [Rank 0] step:2601/10000 train_time:187149ms step_avg:71.95ms +[2025-09-02 06:33:40] [Rank 0] step:2601/10000 train_time:187149ms step_avg:71.95ms +[2025-09-02 06:33:42] [Rank 0] step:2621/10000 train_time:188492ms step_avg:71.92ms +[2025-09-02 06:33:42] [Rank 0] step:2621/10000 train_time:188492ms step_avg:71.92ms +[2025-09-02 06:33:43] [Rank 0] step:2641/10000 train_time:189974ms step_avg:71.93ms +[2025-09-02 06:33:43] [Rank 0] step:2641/10000 train_time:189974ms step_avg:71.93ms +[2025-09-02 06:33:45] [Rank 0] step:2661/10000 train_time:191456ms step_avg:71.95ms +[2025-09-02 06:33:45] [Rank 0] step:2661/10000 train_time:191456ms step_avg:71.95ms +[2025-09-02 06:33:46] [Rank 0] step:2681/10000 train_time:192938ms step_avg:71.96ms +[2025-09-02 06:33:46] [Rank 0] step:2681/10000 train_time:192938ms step_avg:71.96ms +[2025-09-02 06:33:47] [Rank 0] step:2701/10000 train_time:194420ms step_avg:71.98ms +[2025-09-02 06:33:47] [Rank 0] step:2701/10000 train_time:194420ms step_avg:71.98ms +[2025-09-02 06:33:49] [Rank 0] step:2721/10000 train_time:195904ms step_avg:72.00ms +[2025-09-02 06:33:49] [Rank 0] step:2721/10000 train_time:195904ms step_avg:72.00ms +[2025-09-02 06:33:50] [Rank 0] step:2741/10000 train_time:197388ms step_avg:72.01ms +[2025-09-02 06:33:50] [Rank 0] step:2741/10000 train_time:197388ms step_avg:72.01ms +[2025-09-02 06:33:52] [Rank 0] step:2761/10000 train_time:198870ms step_avg:72.03ms +[2025-09-02 06:33:52] [Rank 0] step:2761/10000 train_time:198870ms step_avg:72.03ms +[2025-09-02 06:33:53] [Rank 0] step:2781/10000 train_time:200354ms step_avg:72.04ms +[2025-09-02 06:33:53] [Rank 0] step:2781/10000 train_time:200354ms step_avg:72.04ms +[2025-09-02 06:33:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:33:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:34:07] [Rank 0] PRINT: step:2800/10000 val_loss:4.4312 svd_entropy: attn_qk:H=0.6919,top10E=0.34,eRank=108.8,q75/q25=79.34 attn_vo:H=0.7937,top10E=0.19,eRank=221.2,q75/q25=93.39 mlp_w1:H=0.6469,top10E=0.44,eRank=106.0,q75/q25=7.19 mlp_w2:H=0.7879,top10E=0.21,eRank=200.3,q75/q25=32.23 vo_prod:H=0.6970,top10E=0.29,eRank=107.1,q75/q25=10673.38 train_time:201987ms step_avg:72.14ms +[2025-09-02 06:34:07] [Rank 0] PRINT: step:2800/10000 val_loss:4.4312 svd_entropy: attn_qk:H=0.6919,top10E=0.34,eRank=108.8,q75/q25=79.34 attn_vo:H=0.7937,top10E=0.19,eRank=221.2,q75/q25=93.39 mlp_w1:H=0.6469,top10E=0.44,eRank=106.0,q75/q25=7.19 mlp_w2:H=0.7879,top10E=0.21,eRank=200.3,q75/q25=32.23 vo_prod:H=0.6970,top10E=0.29,eRank=107.1,q75/q25=10673.38 train_time:201987ms step_avg:72.14ms +[2025-09-02 06:34:07] [Rank 0] step:2801/10000 train_time:202000ms step_avg:72.12ms +[2025-09-02 06:34:07] [Rank 0] step:2801/10000 train_time:202000ms step_avg:72.12ms +[2025-09-02 06:34:08] [Rank 0] step:2821/10000 train_time:203358ms step_avg:72.09ms +[2025-09-02 06:34:08] [Rank 0] step:2821/10000 train_time:203358ms step_avg:72.09ms +[2025-09-02 06:34:10] [Rank 0] step:2841/10000 train_time:204840ms step_avg:72.10ms +[2025-09-02 06:34:10] [Rank 0] step:2841/10000 train_time:204840ms step_avg:72.10ms +[2025-09-02 06:34:11] [Rank 0] step:2861/10000 train_time:206322ms step_avg:72.12ms +[2025-09-02 06:34:11] [Rank 0] step:2861/10000 train_time:206322ms step_avg:72.12ms +[2025-09-02 06:34:13] [Rank 0] step:2881/10000 train_time:207807ms step_avg:72.13ms +[2025-09-02 06:34:13] [Rank 0] step:2881/10000 train_time:207807ms step_avg:72.13ms +[2025-09-02 06:34:14] [Rank 0] step:2901/10000 train_time:209290ms step_avg:72.14ms +[2025-09-02 06:34:14] [Rank 0] step:2901/10000 train_time:209290ms step_avg:72.14ms +[2025-09-02 06:34:16] [Rank 0] step:2921/10000 train_time:210775ms step_avg:72.16ms +[2025-09-02 06:34:16] [Rank 0] step:2921/10000 train_time:210775ms step_avg:72.16ms +[2025-09-02 06:34:17] [Rank 0] step:2941/10000 train_time:212258ms step_avg:72.17ms +[2025-09-02 06:34:17] [Rank 0] step:2941/10000 train_time:212258ms step_avg:72.17ms +[2025-09-02 06:34:19] [Rank 0] step:2961/10000 train_time:213743ms step_avg:72.19ms +[2025-09-02 06:34:19] [Rank 0] step:2961/10000 train_time:213743ms step_avg:72.19ms +[2025-09-02 06:34:20] [Rank 0] step:2981/10000 train_time:215234ms step_avg:72.20ms +[2025-09-02 06:34:20] [Rank 0] step:2981/10000 train_time:215234ms step_avg:72.20ms +[2025-09-02 06:34:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:34:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:34:33] [Rank 0] PRINT: step:3000/10000 val_loss:4.3894 svd_entropy: attn_qk:H=0.6972,top10E=0.33,eRank=112.0,q75/q25=84.55 attn_vo:H=0.7984,top10E=0.18,eRank=227.1,q75/q25=91.70 mlp_w1:H=0.6540,top10E=0.44,eRank=109.5,q75/q25=7.53 mlp_w2:H=0.7918,top10E=0.21,eRank=205.7,q75/q25=33.82 vo_prod:H=0.7029,top10E=0.28,eRank=111.4,q75/q25=10283.87 train_time:216875ms step_avg:72.29ms +[2025-09-02 06:34:33] [Rank 0] PRINT: step:3000/10000 val_loss:4.3894 svd_entropy: attn_qk:H=0.6972,top10E=0.33,eRank=112.0,q75/q25=84.55 attn_vo:H=0.7984,top10E=0.18,eRank=227.1,q75/q25=91.70 mlp_w1:H=0.6540,top10E=0.44,eRank=109.5,q75/q25=7.53 mlp_w2:H=0.7918,top10E=0.21,eRank=205.7,q75/q25=33.82 vo_prod:H=0.7029,top10E=0.28,eRank=111.4,q75/q25=10283.87 train_time:216875ms step_avg:72.29ms +[2025-09-02 06:34:34] [Rank 0] step:3001/10000 train_time:216888ms step_avg:72.27ms +[2025-09-02 06:34:34] [Rank 0] step:3001/10000 train_time:216888ms step_avg:72.27ms +[2025-09-02 06:34:35] [Rank 0] step:3021/10000 train_time:218237ms step_avg:72.24ms +[2025-09-02 06:34:35] [Rank 0] step:3021/10000 train_time:218237ms step_avg:72.24ms +[2025-09-02 06:34:37] [Rank 0] step:3041/10000 train_time:219729ms step_avg:72.26ms +[2025-09-02 06:34:37] [Rank 0] step:3041/10000 train_time:219729ms step_avg:72.26ms +[2025-09-02 06:34:38] [Rank 0] step:3061/10000 train_time:221221ms step_avg:72.27ms +[2025-09-02 06:34:38] [Rank 0] step:3061/10000 train_time:221221ms step_avg:72.27ms +[2025-09-02 06:34:40] [Rank 0] step:3081/10000 train_time:222713ms step_avg:72.29ms +[2025-09-02 06:34:40] [Rank 0] step:3081/10000 train_time:222713ms step_avg:72.29ms +[2025-09-02 06:34:41] [Rank 0] step:3101/10000 train_time:224206ms step_avg:72.30ms +[2025-09-02 06:34:41] [Rank 0] step:3101/10000 train_time:224206ms step_avg:72.30ms +[2025-09-02 06:34:43] [Rank 0] step:3121/10000 train_time:225698ms step_avg:72.32ms +[2025-09-02 06:34:43] [Rank 0] step:3121/10000 train_time:225698ms step_avg:72.32ms +[2025-09-02 06:34:44] [Rank 0] step:3141/10000 train_time:227190ms step_avg:72.33ms +[2025-09-02 06:34:44] [Rank 0] step:3141/10000 train_time:227190ms step_avg:72.33ms +[2025-09-02 06:34:46] [Rank 0] step:3161/10000 train_time:228683ms step_avg:72.35ms +[2025-09-02 06:34:46] [Rank 0] step:3161/10000 train_time:228683ms step_avg:72.35ms +[2025-09-02 06:34:47] [Rank 0] step:3181/10000 train_time:230175ms step_avg:72.36ms +[2025-09-02 06:34:47] [Rank 0] step:3181/10000 train_time:230175ms step_avg:72.36ms +[2025-09-02 06:34:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:34:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:35:00] [Rank 0] PRINT: step:3200/10000 val_loss:4.3581 svd_entropy: attn_qk:H=0.7022,top10E=0.33,eRank=115.2,q75/q25=89.73 attn_vo:H=0.8027,top10E=0.18,eRank=232.7,q75/q25=90.00 mlp_w1:H=0.6608,top10E=0.43,eRank=112.9,q75/q25=7.88 mlp_w2:H=0.7951,top10E=0.20,eRank=210.7,q75/q25=34.99 vo_prod:H=0.7082,top10E=0.28,eRank=115.3,q75/q25=9800.54 train_time:231820ms step_avg:72.44ms +[2025-09-02 06:35:00] [Rank 0] PRINT: step:3200/10000 val_loss:4.3581 svd_entropy: attn_qk:H=0.7022,top10E=0.33,eRank=115.2,q75/q25=89.73 attn_vo:H=0.8027,top10E=0.18,eRank=232.7,q75/q25=90.00 mlp_w1:H=0.6608,top10E=0.43,eRank=112.9,q75/q25=7.88 mlp_w2:H=0.7951,top10E=0.20,eRank=210.7,q75/q25=34.99 vo_prod:H=0.7082,top10E=0.28,eRank=115.3,q75/q25=9800.54 train_time:231820ms step_avg:72.44ms +[2025-09-02 06:35:00] [Rank 0] step:3201/10000 train_time:231833ms step_avg:72.43ms +[2025-09-02 06:35:00] [Rank 0] step:3201/10000 train_time:231833ms step_avg:72.43ms +[2025-09-02 06:35:02] [Rank 0] step:3221/10000 train_time:233178ms step_avg:72.39ms +[2025-09-02 06:35:02] [Rank 0] step:3221/10000 train_time:233178ms step_avg:72.39ms +[2025-09-02 06:35:03] [Rank 0] step:3241/10000 train_time:234668ms step_avg:72.41ms +[2025-09-02 06:35:03] [Rank 0] step:3241/10000 train_time:234668ms step_avg:72.41ms +[2025-09-02 06:35:05] [Rank 0] step:3261/10000 train_time:236159ms step_avg:72.42ms +[2025-09-02 06:35:05] [Rank 0] step:3261/10000 train_time:236159ms step_avg:72.42ms +[2025-09-02 06:35:06] [Rank 0] step:3281/10000 train_time:237650ms step_avg:72.43ms +[2025-09-02 06:35:06] [Rank 0] step:3281/10000 train_time:237650ms step_avg:72.43ms +[2025-09-02 06:35:08] [Rank 0] step:3301/10000 train_time:239142ms step_avg:72.45ms +[2025-09-02 06:35:08] [Rank 0] step:3301/10000 train_time:239142ms step_avg:72.45ms +[2025-09-02 06:35:09] [Rank 0] step:3321/10000 train_time:240633ms step_avg:72.46ms +[2025-09-02 06:35:09] [Rank 0] step:3321/10000 train_time:240633ms step_avg:72.46ms +[2025-09-02 06:35:11] [Rank 0] step:3341/10000 train_time:242124ms step_avg:72.47ms +[2025-09-02 06:35:11] [Rank 0] step:3341/10000 train_time:242124ms step_avg:72.47ms +[2025-09-02 06:35:12] [Rank 0] step:3361/10000 train_time:243615ms step_avg:72.48ms +[2025-09-02 06:35:12] [Rank 0] step:3361/10000 train_time:243615ms step_avg:72.48ms +[2025-09-02 06:35:14] [Rank 0] step:3381/10000 train_time:245106ms step_avg:72.50ms +[2025-09-02 06:35:14] [Rank 0] step:3381/10000 train_time:245106ms step_avg:72.50ms +[2025-09-02 06:35:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:35:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:35:27] [Rank 0] PRINT: step:3400/10000 val_loss:4.3200 svd_entropy: attn_qk:H=0.7070,top10E=0.32,eRank=118.4,q75/q25=94.52 attn_vo:H=0.8069,top10E=0.17,eRank=238.2,q75/q25=87.47 mlp_w1:H=0.6670,top10E=0.42,eRank=116.1,q75/q25=8.20 mlp_w2:H=0.7983,top10E=0.20,eRank=215.5,q75/q25=36.10 vo_prod:H=0.7132,top10E=0.27,eRank=119.3,q75/q25=9148.88 train_time:246749ms step_avg:72.57ms +[2025-09-02 06:35:27] [Rank 0] PRINT: step:3400/10000 val_loss:4.3200 svd_entropy: attn_qk:H=0.7070,top10E=0.32,eRank=118.4,q75/q25=94.52 attn_vo:H=0.8069,top10E=0.17,eRank=238.2,q75/q25=87.47 mlp_w1:H=0.6670,top10E=0.42,eRank=116.1,q75/q25=8.20 mlp_w2:H=0.7983,top10E=0.20,eRank=215.5,q75/q25=36.10 vo_prod:H=0.7132,top10E=0.27,eRank=119.3,q75/q25=9148.88 train_time:246749ms step_avg:72.57ms +[2025-09-02 06:35:27] [Rank 0] step:3401/10000 train_time:246760ms step_avg:72.56ms +[2025-09-02 06:35:27] [Rank 0] step:3401/10000 train_time:246760ms step_avg:72.56ms +[2025-09-02 06:35:29] [Rank 0] step:3421/10000 train_time:248112ms step_avg:72.53ms +[2025-09-02 06:35:29] [Rank 0] step:3421/10000 train_time:248112ms step_avg:72.53ms +[2025-09-02 06:35:30] [Rank 0] step:3441/10000 train_time:249600ms step_avg:72.54ms +[2025-09-02 06:35:30] [Rank 0] step:3441/10000 train_time:249600ms step_avg:72.54ms +[2025-09-02 06:35:32] [Rank 0] step:3461/10000 train_time:251089ms step_avg:72.55ms +[2025-09-02 06:35:32] [Rank 0] step:3461/10000 train_time:251089ms step_avg:72.55ms +[2025-09-02 06:35:33] [Rank 0] step:3481/10000 train_time:252578ms step_avg:72.56ms +[2025-09-02 06:35:33] [Rank 0] step:3481/10000 train_time:252578ms step_avg:72.56ms +[2025-09-02 06:35:35] [Rank 0] step:3501/10000 train_time:254069ms step_avg:72.57ms +[2025-09-02 06:35:35] [Rank 0] step:3501/10000 train_time:254069ms step_avg:72.57ms +[2025-09-02 06:35:36] [Rank 0] step:3521/10000 train_time:255565ms step_avg:72.58ms +[2025-09-02 06:35:36] [Rank 0] step:3521/10000 train_time:255565ms step_avg:72.58ms +[2025-09-02 06:35:38] [Rank 0] step:3541/10000 train_time:257057ms step_avg:72.59ms +[2025-09-02 06:35:38] [Rank 0] step:3541/10000 train_time:257057ms step_avg:72.59ms +[2025-09-02 06:35:39] [Rank 0] step:3561/10000 train_time:258548ms step_avg:72.61ms +[2025-09-02 06:35:39] [Rank 0] step:3561/10000 train_time:258548ms step_avg:72.61ms +[2025-09-02 06:35:41] [Rank 0] step:3581/10000 train_time:260040ms step_avg:72.62ms +[2025-09-02 06:35:41] [Rank 0] step:3581/10000 train_time:260040ms step_avg:72.62ms +[2025-09-02 06:35:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:35:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:35:54] [Rank 0] PRINT: step:3600/10000 val_loss:4.3126 svd_entropy: attn_qk:H=0.7114,top10E=0.31,eRank=121.3,q75/q25=98.86 attn_vo:H=0.8105,top10E=0.17,eRank=243.1,q75/q25=85.91 mlp_w1:H=0.6729,top10E=0.41,eRank=119.3,q75/q25=8.56 mlp_w2:H=0.8009,top10E=0.19,eRank=219.4,q75/q25=37.22 vo_prod:H=0.7175,top10E=0.27,eRank=122.7,q75/q25=8649.32 train_time:261680ms step_avg:72.69ms +[2025-09-02 06:35:54] [Rank 0] PRINT: step:3600/10000 val_loss:4.3126 svd_entropy: attn_qk:H=0.7114,top10E=0.31,eRank=121.3,q75/q25=98.86 attn_vo:H=0.8105,top10E=0.17,eRank=243.1,q75/q25=85.91 mlp_w1:H=0.6729,top10E=0.41,eRank=119.3,q75/q25=8.56 mlp_w2:H=0.8009,top10E=0.19,eRank=219.4,q75/q25=37.22 vo_prod:H=0.7175,top10E=0.27,eRank=122.7,q75/q25=8649.32 train_time:261680ms step_avg:72.69ms +[2025-09-02 06:35:54] [Rank 0] step:3601/10000 train_time:261692ms step_avg:72.67ms +[2025-09-02 06:35:54] [Rank 0] step:3601/10000 train_time:261692ms step_avg:72.67ms +[2025-09-02 06:35:56] [Rank 0] step:3621/10000 train_time:263041ms step_avg:72.64ms +[2025-09-02 06:35:56] [Rank 0] step:3621/10000 train_time:263041ms step_avg:72.64ms +[2025-09-02 06:35:57] [Rank 0] step:3641/10000 train_time:264530ms step_avg:72.65ms +[2025-09-02 06:35:57] [Rank 0] step:3641/10000 train_time:264530ms step_avg:72.65ms +[2025-09-02 06:35:59] [Rank 0] step:3661/10000 train_time:266020ms step_avg:72.66ms +[2025-09-02 06:35:59] [Rank 0] step:3661/10000 train_time:266020ms step_avg:72.66ms +[2025-09-02 06:36:00] [Rank 0] step:3681/10000 train_time:267512ms step_avg:72.67ms +[2025-09-02 06:36:00] [Rank 0] step:3681/10000 train_time:267512ms step_avg:72.67ms +[2025-09-02 06:36:02] [Rank 0] step:3701/10000 train_time:269003ms step_avg:72.68ms +[2025-09-02 06:36:02] [Rank 0] step:3701/10000 train_time:269003ms step_avg:72.68ms +[2025-09-02 06:36:03] [Rank 0] step:3721/10000 train_time:270521ms step_avg:72.70ms +[2025-09-02 06:36:03] [Rank 0] step:3721/10000 train_time:270521ms step_avg:72.70ms +[2025-09-02 06:36:05] [Rank 0] step:3741/10000 train_time:272049ms step_avg:72.72ms +[2025-09-02 06:36:05] [Rank 0] step:3741/10000 train_time:272049ms step_avg:72.72ms +[2025-09-02 06:36:06] [Rank 0] step:3761/10000 train_time:273577ms step_avg:72.74ms +[2025-09-02 06:36:06] [Rank 0] step:3761/10000 train_time:273577ms step_avg:72.74ms +[2025-09-02 06:36:08] [Rank 0] step:3781/10000 train_time:275105ms step_avg:72.76ms +[2025-09-02 06:36:08] [Rank 0] step:3781/10000 train_time:275105ms step_avg:72.76ms +[2025-09-02 06:36:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:36:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:36:21] [Rank 0] PRINT: step:3800/10000 val_loss:4.2572 svd_entropy: attn_qk:H=0.7153,top10E=0.31,eRank=124.0,q75/q25=101.86 attn_vo:H=0.8138,top10E=0.16,eRank=247.8,q75/q25=83.29 mlp_w1:H=0.6784,top10E=0.40,eRank=122.5,q75/q25=8.87 mlp_w2:H=0.8034,top10E=0.19,eRank=223.4,q75/q25=38.43 vo_prod:H=0.7216,top10E=0.26,eRank=126.1,q75/q25=7964.17 train_time:276788ms step_avg:72.84ms +[2025-09-02 06:36:21] [Rank 0] PRINT: step:3800/10000 val_loss:4.2572 svd_entropy: attn_qk:H=0.7153,top10E=0.31,eRank=124.0,q75/q25=101.86 attn_vo:H=0.8138,top10E=0.16,eRank=247.8,q75/q25=83.29 mlp_w1:H=0.6784,top10E=0.40,eRank=122.5,q75/q25=8.87 mlp_w2:H=0.8034,top10E=0.19,eRank=223.4,q75/q25=38.43 vo_prod:H=0.7216,top10E=0.26,eRank=126.1,q75/q25=7964.17 train_time:276788ms step_avg:72.84ms +[2025-09-02 06:36:21] [Rank 0] step:3801/10000 train_time:276800ms step_avg:72.82ms +[2025-09-02 06:36:21] [Rank 0] step:3801/10000 train_time:276800ms step_avg:72.82ms +[2025-09-02 06:36:23] [Rank 0] step:3821/10000 train_time:278183ms step_avg:72.80ms +[2025-09-02 06:36:23] [Rank 0] step:3821/10000 train_time:278183ms step_avg:72.80ms +[2025-09-02 06:36:24] [Rank 0] step:3841/10000 train_time:279714ms step_avg:72.82ms +[2025-09-02 06:36:24] [Rank 0] step:3841/10000 train_time:279714ms step_avg:72.82ms +[2025-09-02 06:36:26] [Rank 0] step:3861/10000 train_time:281242ms step_avg:72.84ms +[2025-09-02 06:36:26] [Rank 0] step:3861/10000 train_time:281242ms step_avg:72.84ms +[2025-09-02 06:36:27] [Rank 0] step:3881/10000 train_time:282769ms step_avg:72.86ms +[2025-09-02 06:36:27] [Rank 0] step:3881/10000 train_time:282769ms step_avg:72.86ms +[2025-09-02 06:36:29] [Rank 0] step:3901/10000 train_time:284298ms step_avg:72.88ms +[2025-09-02 06:36:29] [Rank 0] step:3901/10000 train_time:284298ms step_avg:72.88ms +[2025-09-02 06:36:30] [Rank 0] step:3921/10000 train_time:285826ms step_avg:72.90ms +[2025-09-02 06:36:30] [Rank 0] step:3921/10000 train_time:285826ms step_avg:72.90ms +[2025-09-02 06:36:32] [Rank 0] step:3941/10000 train_time:287356ms step_avg:72.91ms +[2025-09-02 06:36:32] [Rank 0] step:3941/10000 train_time:287356ms step_avg:72.91ms +[2025-09-02 06:36:33] [Rank 0] step:3961/10000 train_time:288884ms step_avg:72.93ms +[2025-09-02 06:36:33] [Rank 0] step:3961/10000 train_time:288884ms step_avg:72.93ms +[2025-09-02 06:36:35] [Rank 0] step:3981/10000 train_time:290413ms step_avg:72.95ms +[2025-09-02 06:36:35] [Rank 0] step:3981/10000 train_time:290413ms step_avg:72.95ms +[2025-09-02 06:36:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:36:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:36:48] [Rank 0] PRINT: step:4000/10000 val_loss:4.2302 svd_entropy: attn_qk:H=0.7190,top10E=0.30,eRank=126.7,q75/q25=104.48 attn_vo:H=0.8170,top10E=0.16,eRank=252.3,q75/q25=80.29 mlp_w1:H=0.6835,top10E=0.39,eRank=125.6,q75/q25=9.18 mlp_w2:H=0.8059,top10E=0.18,eRank=227.3,q75/q25=38.79 vo_prod:H=0.7255,top10E=0.26,eRank=129.4,q75/q25=7351.34 train_time:292095ms step_avg:73.02ms +[2025-09-02 06:36:48] [Rank 0] PRINT: step:4000/10000 val_loss:4.2302 svd_entropy: attn_qk:H=0.7190,top10E=0.30,eRank=126.7,q75/q25=104.48 attn_vo:H=0.8170,top10E=0.16,eRank=252.3,q75/q25=80.29 mlp_w1:H=0.6835,top10E=0.39,eRank=125.6,q75/q25=9.18 mlp_w2:H=0.8059,top10E=0.18,eRank=227.3,q75/q25=38.79 vo_prod:H=0.7255,top10E=0.26,eRank=129.4,q75/q25=7351.34 train_time:292095ms step_avg:73.02ms +[2025-09-02 06:36:48] [Rank 0] step:4001/10000 train_time:292106ms step_avg:73.01ms +[2025-09-02 06:36:48] [Rank 0] step:4001/10000 train_time:292106ms step_avg:73.01ms +[2025-09-02 06:36:50] [Rank 0] step:4021/10000 train_time:293506ms step_avg:72.99ms +[2025-09-02 06:36:50] [Rank 0] step:4021/10000 train_time:293506ms step_avg:72.99ms +[2025-09-02 06:36:51] [Rank 0] step:4041/10000 train_time:295034ms step_avg:73.01ms +[2025-09-02 06:36:51] [Rank 0] step:4041/10000 train_time:295034ms step_avg:73.01ms +[2025-09-02 06:36:53] [Rank 0] step:4061/10000 train_time:296562ms step_avg:73.03ms +[2025-09-02 06:36:53] [Rank 0] step:4061/10000 train_time:296562ms step_avg:73.03ms +[2025-09-02 06:36:54] [Rank 0] step:4081/10000 train_time:298194ms step_avg:73.07ms +[2025-09-02 06:36:54] [Rank 0] step:4081/10000 train_time:298194ms step_avg:73.07ms +[2025-09-02 06:36:56] [Rank 0] step:4101/10000 train_time:299721ms step_avg:73.08ms +[2025-09-02 06:36:56] [Rank 0] step:4101/10000 train_time:299721ms step_avg:73.08ms +[2025-09-02 06:36:57] [Rank 0] step:4121/10000 train_time:301249ms step_avg:73.10ms +[2025-09-02 06:36:57] [Rank 0] step:4121/10000 train_time:301249ms step_avg:73.10ms +[2025-09-02 06:36:59] [Rank 0] step:4141/10000 train_time:302777ms step_avg:73.12ms +[2025-09-02 06:36:59] [Rank 0] step:4141/10000 train_time:302777ms step_avg:73.12ms +[2025-09-02 06:37:00] [Rank 0] step:4161/10000 train_time:304306ms step_avg:73.13ms +[2025-09-02 06:37:00] [Rank 0] step:4161/10000 train_time:304306ms step_avg:73.13ms +[2025-09-02 06:37:02] [Rank 0] step:4181/10000 train_time:305835ms step_avg:73.15ms +[2025-09-02 06:37:02] [Rank 0] step:4181/10000 train_time:305835ms step_avg:73.15ms +[2025-09-02 06:37:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:37:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:37:15] [Rank 0] PRINT: step:4200/10000 val_loss:4.2140 svd_entropy: attn_qk:H=0.7225,top10E=0.30,eRank=129.2,q75/q25=106.77 attn_vo:H=0.8199,top10E=0.16,eRank=256.5,q75/q25=77.58 mlp_w1:H=0.6884,top10E=0.39,eRank=128.6,q75/q25=9.53 mlp_w2:H=0.8080,top10E=0.18,eRank=230.7,q75/q25=39.74 vo_prod:H=0.7291,top10E=0.25,eRank=132.5,q75/q25=6839.24 train_time:307517ms step_avg:73.22ms +[2025-09-02 06:37:15] [Rank 0] PRINT: step:4200/10000 val_loss:4.2140 svd_entropy: attn_qk:H=0.7225,top10E=0.30,eRank=129.2,q75/q25=106.77 attn_vo:H=0.8199,top10E=0.16,eRank=256.5,q75/q25=77.58 mlp_w1:H=0.6884,top10E=0.39,eRank=128.6,q75/q25=9.53 mlp_w2:H=0.8080,top10E=0.18,eRank=230.7,q75/q25=39.74 vo_prod:H=0.7291,top10E=0.25,eRank=132.5,q75/q25=6839.24 train_time:307517ms step_avg:73.22ms +[2025-09-02 06:37:15] [Rank 0] step:4201/10000 train_time:307529ms step_avg:73.20ms +[2025-09-02 06:37:15] [Rank 0] step:4201/10000 train_time:307529ms step_avg:73.20ms +[2025-09-02 06:37:17] [Rank 0] step:4221/10000 train_time:308925ms step_avg:73.19ms +[2025-09-02 06:37:17] [Rank 0] step:4221/10000 train_time:308925ms step_avg:73.19ms +[2025-09-02 06:37:18] [Rank 0] step:4241/10000 train_time:310453ms step_avg:73.20ms +[2025-09-02 06:37:18] [Rank 0] step:4241/10000 train_time:310453ms step_avg:73.20ms +[2025-09-02 06:37:20] [Rank 0] step:4261/10000 train_time:311979ms step_avg:73.22ms +[2025-09-02 06:37:20] [Rank 0] step:4261/10000 train_time:311979ms step_avg:73.22ms +[2025-09-02 06:37:21] [Rank 0] step:4281/10000 train_time:313505ms step_avg:73.23ms +[2025-09-02 06:37:21] [Rank 0] step:4281/10000 train_time:313505ms step_avg:73.23ms +[2025-09-02 06:37:23] [Rank 0] step:4301/10000 train_time:315032ms step_avg:73.25ms +[2025-09-02 06:37:23] [Rank 0] step:4301/10000 train_time:315032ms step_avg:73.25ms +[2025-09-02 06:37:24] [Rank 0] step:4321/10000 train_time:316562ms step_avg:73.26ms +[2025-09-02 06:37:24] [Rank 0] step:4321/10000 train_time:316562ms step_avg:73.26ms +[2025-09-02 06:37:26] [Rank 0] step:4341/10000 train_time:318088ms step_avg:73.28ms +[2025-09-02 06:37:26] [Rank 0] step:4341/10000 train_time:318088ms step_avg:73.28ms +[2025-09-02 06:37:27] [Rank 0] step:4361/10000 train_time:319617ms step_avg:73.29ms +[2025-09-02 06:37:27] [Rank 0] step:4361/10000 train_time:319617ms step_avg:73.29ms +[2025-09-02 06:37:29] [Rank 0] step:4381/10000 train_time:321142ms step_avg:73.30ms +[2025-09-02 06:37:29] [Rank 0] step:4381/10000 train_time:321142ms step_avg:73.30ms +[2025-09-02 06:37:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:37:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:37:42] [Rank 0] PRINT: step:4400/10000 val_loss:4.1943 svd_entropy: attn_qk:H=0.7257,top10E=0.29,eRank=131.7,q75/q25=108.45 attn_vo:H=0.8226,top10E=0.16,eRank=260.4,q75/q25=75.62 mlp_w1:H=0.6931,top10E=0.38,eRank=131.7,q75/q25=9.79 mlp_w2:H=0.8102,top10E=0.17,eRank=234.3,q75/q25=40.52 vo_prod:H=0.7324,top10E=0.25,eRank=135.5,q75/q25=6268.73 train_time:322822ms step_avg:73.37ms +[2025-09-02 06:37:42] [Rank 0] PRINT: step:4400/10000 val_loss:4.1943 svd_entropy: attn_qk:H=0.7257,top10E=0.29,eRank=131.7,q75/q25=108.45 attn_vo:H=0.8226,top10E=0.16,eRank=260.4,q75/q25=75.62 mlp_w1:H=0.6931,top10E=0.38,eRank=131.7,q75/q25=9.79 mlp_w2:H=0.8102,top10E=0.17,eRank=234.3,q75/q25=40.52 vo_prod:H=0.7324,top10E=0.25,eRank=135.5,q75/q25=6268.73 train_time:322822ms step_avg:73.37ms +[2025-09-02 06:37:42] [Rank 0] step:4401/10000 train_time:322834ms step_avg:73.35ms +[2025-09-02 06:37:42] [Rank 0] step:4401/10000 train_time:322834ms step_avg:73.35ms +[2025-09-02 06:37:44] [Rank 0] step:4421/10000 train_time:324208ms step_avg:73.33ms +[2025-09-02 06:37:44] [Rank 0] step:4421/10000 train_time:324208ms step_avg:73.33ms +[2025-09-02 06:37:45] [Rank 0] step:4441/10000 train_time:325732ms step_avg:73.35ms +[2025-09-02 06:37:45] [Rank 0] step:4441/10000 train_time:325732ms step_avg:73.35ms +[2025-09-02 06:37:47] [Rank 0] step:4461/10000 train_time:327262ms step_avg:73.36ms +[2025-09-02 06:37:47] [Rank 0] step:4461/10000 train_time:327262ms step_avg:73.36ms +[2025-09-02 06:37:48] [Rank 0] step:4481/10000 train_time:328794ms step_avg:73.38ms +[2025-09-02 06:37:48] [Rank 0] step:4481/10000 train_time:328794ms step_avg:73.38ms +[2025-09-02 06:37:50] [Rank 0] step:4501/10000 train_time:330327ms step_avg:73.39ms +[2025-09-02 06:37:50] [Rank 0] step:4501/10000 train_time:330327ms step_avg:73.39ms +[2025-09-02 06:37:51] [Rank 0] step:4521/10000 train_time:331859ms step_avg:73.40ms +[2025-09-02 06:37:51] [Rank 0] step:4521/10000 train_time:331859ms step_avg:73.40ms +[2025-09-02 06:37:53] [Rank 0] step:4541/10000 train_time:333392ms step_avg:73.42ms +[2025-09-02 06:37:53] [Rank 0] step:4541/10000 train_time:333392ms step_avg:73.42ms +[2025-09-02 06:37:54] [Rank 0] step:4561/10000 train_time:334926ms step_avg:73.43ms +[2025-09-02 06:37:54] [Rank 0] step:4561/10000 train_time:334926ms step_avg:73.43ms +[2025-09-02 06:37:56] [Rank 0] step:4581/10000 train_time:336460ms step_avg:73.45ms +[2025-09-02 06:37:56] [Rank 0] step:4581/10000 train_time:336460ms step_avg:73.45ms +[2025-09-02 06:37:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:37:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:38:09] [Rank 0] PRINT: step:4600/10000 val_loss:4.1615 svd_entropy: attn_qk:H=0.7289,top10E=0.29,eRank=134.2,q75/q25=110.54 attn_vo:H=0.8252,top10E=0.15,eRank=264.4,q75/q25=73.26 mlp_w1:H=0.6974,top10E=0.37,eRank=134.5,q75/q25=10.04 mlp_w2:H=0.8122,top10E=0.17,eRank=237.7,q75/q25=41.60 vo_prod:H=0.7358,top10E=0.24,eRank=138.5,q75/q25=5658.79 train_time:338149ms step_avg:73.51ms +[2025-09-02 06:38:09] [Rank 0] PRINT: step:4600/10000 val_loss:4.1615 svd_entropy: attn_qk:H=0.7289,top10E=0.29,eRank=134.2,q75/q25=110.54 attn_vo:H=0.8252,top10E=0.15,eRank=264.4,q75/q25=73.26 mlp_w1:H=0.6974,top10E=0.37,eRank=134.5,q75/q25=10.04 mlp_w2:H=0.8122,top10E=0.17,eRank=237.7,q75/q25=41.60 vo_prod:H=0.7358,top10E=0.24,eRank=138.5,q75/q25=5658.79 train_time:338149ms step_avg:73.51ms +[2025-09-02 06:38:09] [Rank 0] step:4601/10000 train_time:338161ms step_avg:73.50ms +[2025-09-02 06:38:09] [Rank 0] step:4601/10000 train_time:338161ms step_avg:73.50ms +[2025-09-02 06:38:11] [Rank 0] step:4621/10000 train_time:339549ms step_avg:73.48ms +[2025-09-02 06:38:11] [Rank 0] step:4621/10000 train_time:339549ms step_avg:73.48ms +[2025-09-02 06:38:12] [Rank 0] step:4641/10000 train_time:341082ms step_avg:73.49ms +[2025-09-02 06:38:12] [Rank 0] step:4641/10000 train_time:341082ms step_avg:73.49ms +[2025-09-02 06:38:14] [Rank 0] step:4661/10000 train_time:342617ms step_avg:73.51ms +[2025-09-02 06:38:14] [Rank 0] step:4661/10000 train_time:342617ms step_avg:73.51ms +[2025-09-02 06:38:15] [Rank 0] step:4681/10000 train_time:344150ms step_avg:73.52ms +[2025-09-02 06:38:15] [Rank 0] step:4681/10000 train_time:344150ms step_avg:73.52ms +[2025-09-02 06:38:17] [Rank 0] step:4701/10000 train_time:345685ms step_avg:73.53ms +[2025-09-02 06:38:17] [Rank 0] step:4701/10000 train_time:345685ms step_avg:73.53ms +[2025-09-02 06:38:18] [Rank 0] step:4721/10000 train_time:347219ms step_avg:73.55ms +[2025-09-02 06:38:18] [Rank 0] step:4721/10000 train_time:347219ms step_avg:73.55ms +[2025-09-02 06:38:20] [Rank 0] step:4741/10000 train_time:348755ms step_avg:73.56ms +[2025-09-02 06:38:20] [Rank 0] step:4741/10000 train_time:348755ms step_avg:73.56ms +[2025-09-02 06:38:21] [Rank 0] step:4761/10000 train_time:350289ms step_avg:73.57ms +[2025-09-02 06:38:21] [Rank 0] step:4761/10000 train_time:350289ms step_avg:73.57ms +[2025-09-02 06:38:23] [Rank 0] step:4781/10000 train_time:351822ms step_avg:73.59ms +[2025-09-02 06:38:23] [Rank 0] step:4781/10000 train_time:351822ms step_avg:73.59ms +[2025-09-02 06:38:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:38:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:38:36] [Rank 0] PRINT: step:4800/10000 val_loss:4.1484 svd_entropy: attn_qk:H=0.7319,top10E=0.28,eRank=136.5,q75/q25=112.46 attn_vo:H=0.8277,top10E=0.15,eRank=268.2,q75/q25=71.02 mlp_w1:H=0.7014,top10E=0.37,eRank=137.2,q75/q25=10.36 mlp_w2:H=0.8142,top10E=0.17,eRank=241.0,q75/q25=41.84 vo_prod:H=0.7390,top10E=0.24,eRank=141.5,q75/q25=5223.27 train_time:353513ms step_avg:73.65ms +[2025-09-02 06:38:36] [Rank 0] PRINT: step:4800/10000 val_loss:4.1484 svd_entropy: attn_qk:H=0.7319,top10E=0.28,eRank=136.5,q75/q25=112.46 attn_vo:H=0.8277,top10E=0.15,eRank=268.2,q75/q25=71.02 mlp_w1:H=0.7014,top10E=0.37,eRank=137.2,q75/q25=10.36 mlp_w2:H=0.8142,top10E=0.17,eRank=241.0,q75/q25=41.84 vo_prod:H=0.7390,top10E=0.24,eRank=141.5,q75/q25=5223.27 train_time:353513ms step_avg:73.65ms +[2025-09-02 06:38:36] [Rank 0] step:4801/10000 train_time:353525ms step_avg:73.64ms +[2025-09-02 06:38:36] [Rank 0] step:4801/10000 train_time:353525ms step_avg:73.64ms +[2025-09-02 06:38:38] [Rank 0] step:4821/10000 train_time:354919ms step_avg:73.62ms +[2025-09-02 06:38:38] [Rank 0] step:4821/10000 train_time:354919ms step_avg:73.62ms +[2025-09-02 06:38:39] [Rank 0] step:4841/10000 train_time:356452ms step_avg:73.63ms +[2025-09-02 06:38:39] [Rank 0] step:4841/10000 train_time:356452ms step_avg:73.63ms +[2025-09-02 06:38:41] [Rank 0] step:4861/10000 train_time:357991ms step_avg:73.65ms +[2025-09-02 06:38:41] [Rank 0] step:4861/10000 train_time:357991ms step_avg:73.65ms +[2025-09-02 06:38:42] [Rank 0] step:4881/10000 train_time:359525ms step_avg:73.66ms +[2025-09-02 06:38:42] [Rank 0] step:4881/10000 train_time:359525ms step_avg:73.66ms +[2025-09-02 06:38:44] [Rank 0] step:4901/10000 train_time:361059ms step_avg:73.67ms +[2025-09-02 06:38:44] [Rank 0] step:4901/10000 train_time:361059ms step_avg:73.67ms +[2025-09-02 06:38:45] [Rank 0] step:4921/10000 train_time:362596ms step_avg:73.68ms +[2025-09-02 06:38:45] [Rank 0] step:4921/10000 train_time:362596ms step_avg:73.68ms +[2025-09-02 06:38:47] [Rank 0] step:4941/10000 train_time:364134ms step_avg:73.70ms +[2025-09-02 06:38:47] [Rank 0] step:4941/10000 train_time:364134ms step_avg:73.70ms +[2025-09-02 06:38:48] [Rank 0] step:4961/10000 train_time:365669ms step_avg:73.71ms +[2025-09-02 06:38:48] [Rank 0] step:4961/10000 train_time:365669ms step_avg:73.71ms +[2025-09-02 06:38:50] [Rank 0] step:4981/10000 train_time:367204ms step_avg:73.72ms +[2025-09-02 06:38:50] [Rank 0] step:4981/10000 train_time:367204ms step_avg:73.72ms +[2025-09-02 06:38:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:38:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:39:03] [Rank 0] PRINT: step:5000/10000 val_loss:4.1288 svd_entropy: attn_qk:H=0.7346,top10E=0.28,eRank=138.7,q75/q25=113.93 attn_vo:H=0.8300,top10E=0.15,eRank=271.7,q75/q25=68.22 mlp_w1:H=0.7051,top10E=0.36,eRank=139.8,q75/q25=10.67 mlp_w2:H=0.8157,top10E=0.16,eRank=243.6,q75/q25=42.68 vo_prod:H=0.7419,top10E=0.24,eRank=144.3,q75/q25=4629.95 train_time:368893ms step_avg:73.78ms +[2025-09-02 06:39:03] [Rank 0] PRINT: step:5000/10000 val_loss:4.1288 svd_entropy: attn_qk:H=0.7346,top10E=0.28,eRank=138.7,q75/q25=113.93 attn_vo:H=0.8300,top10E=0.15,eRank=271.7,q75/q25=68.22 mlp_w1:H=0.7051,top10E=0.36,eRank=139.8,q75/q25=10.67 mlp_w2:H=0.8157,top10E=0.16,eRank=243.6,q75/q25=42.68 vo_prod:H=0.7419,top10E=0.24,eRank=144.3,q75/q25=4629.95 train_time:368893ms step_avg:73.78ms +[2025-09-02 06:39:03] [Rank 0] step:5001/10000 train_time:368905ms step_avg:73.77ms +[2025-09-02 06:39:03] [Rank 0] step:5001/10000 train_time:368905ms step_avg:73.77ms +[2025-09-02 06:39:05] [Rank 0] step:5021/10000 train_time:370296ms step_avg:73.75ms +[2025-09-02 06:39:05] [Rank 0] step:5021/10000 train_time:370296ms step_avg:73.75ms +[2025-09-02 06:39:06] [Rank 0] step:5041/10000 train_time:371832ms step_avg:73.76ms +[2025-09-02 06:39:06] [Rank 0] step:5041/10000 train_time:371832ms step_avg:73.76ms +[2025-09-02 06:39:08] [Rank 0] step:5061/10000 train_time:373363ms step_avg:73.77ms +[2025-09-02 06:39:08] [Rank 0] step:5061/10000 train_time:373363ms step_avg:73.77ms +[2025-09-02 06:39:09] [Rank 0] step:5081/10000 train_time:374896ms step_avg:73.78ms +[2025-09-02 06:39:09] [Rank 0] step:5081/10000 train_time:374896ms step_avg:73.78ms +[2025-09-02 06:39:11] [Rank 0] step:5101/10000 train_time:376429ms step_avg:73.80ms +[2025-09-02 06:39:11] [Rank 0] step:5101/10000 train_time:376429ms step_avg:73.80ms +[2025-09-02 06:39:12] [Rank 0] step:5121/10000 train_time:377964ms step_avg:73.81ms +[2025-09-02 06:39:12] [Rank 0] step:5121/10000 train_time:377964ms step_avg:73.81ms +[2025-09-02 06:39:14] [Rank 0] step:5141/10000 train_time:379500ms step_avg:73.82ms +[2025-09-02 06:39:14] [Rank 0] step:5141/10000 train_time:379500ms step_avg:73.82ms +[2025-09-02 06:39:15] [Rank 0] step:5161/10000 train_time:381034ms step_avg:73.83ms +[2025-09-02 06:39:15] [Rank 0] step:5161/10000 train_time:381034ms step_avg:73.83ms +[2025-09-02 06:39:17] [Rank 0] step:5181/10000 train_time:382571ms step_avg:73.84ms +[2025-09-02 06:39:17] [Rank 0] step:5181/10000 train_time:382571ms step_avg:73.84ms +[2025-09-02 06:39:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:39:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:39:30] [Rank 0] PRINT: step:5200/10000 val_loss:4.1102 svd_entropy: attn_qk:H=0.7372,top10E=0.28,eRank=140.8,q75/q25=114.75 attn_vo:H=0.8322,top10E=0.15,eRank=275.0,q75/q25=66.16 mlp_w1:H=0.7086,top10E=0.36,eRank=142.3,q75/q25=10.94 mlp_w2:H=0.8170,top10E=0.16,eRank=246.1,q75/q25=43.28 vo_prod:H=0.7445,top10E=0.23,eRank=146.7,q75/q25=4245.39 train_time:384283ms step_avg:73.90ms +[2025-09-02 06:39:30] [Rank 0] PRINT: step:5200/10000 val_loss:4.1102 svd_entropy: attn_qk:H=0.7372,top10E=0.28,eRank=140.8,q75/q25=114.75 attn_vo:H=0.8322,top10E=0.15,eRank=275.0,q75/q25=66.16 mlp_w1:H=0.7086,top10E=0.36,eRank=142.3,q75/q25=10.94 mlp_w2:H=0.8170,top10E=0.16,eRank=246.1,q75/q25=43.28 vo_prod:H=0.7445,top10E=0.23,eRank=146.7,q75/q25=4245.39 train_time:384283ms step_avg:73.90ms +[2025-09-02 06:39:30] [Rank 0] step:5201/10000 train_time:384295ms step_avg:73.89ms +[2025-09-02 06:39:30] [Rank 0] step:5201/10000 train_time:384295ms step_avg:73.89ms +[2025-09-02 06:39:32] [Rank 0] step:5221/10000 train_time:385707ms step_avg:73.88ms +[2025-09-02 06:39:32] [Rank 0] step:5221/10000 train_time:385707ms step_avg:73.88ms +[2025-09-02 06:39:33] [Rank 0] step:5241/10000 train_time:387270ms step_avg:73.89ms +[2025-09-02 06:39:33] [Rank 0] step:5241/10000 train_time:387270ms step_avg:73.89ms +[2025-09-02 06:39:35] [Rank 0] step:5261/10000 train_time:388834ms step_avg:73.91ms +[2025-09-02 06:39:35] [Rank 0] step:5261/10000 train_time:388834ms step_avg:73.91ms +[2025-09-02 06:39:36] [Rank 0] step:5281/10000 train_time:390401ms step_avg:73.93ms +[2025-09-02 06:39:36] [Rank 0] step:5281/10000 train_time:390401ms step_avg:73.93ms +[2025-09-02 06:39:38] [Rank 0] step:5301/10000 train_time:391976ms step_avg:73.94ms +[2025-09-02 06:39:38] [Rank 0] step:5301/10000 train_time:391976ms step_avg:73.94ms +[2025-09-02 06:39:40] [Rank 0] step:5321/10000 train_time:393539ms step_avg:73.96ms +[2025-09-02 06:39:40] [Rank 0] step:5321/10000 train_time:393539ms step_avg:73.96ms +[2025-09-02 06:39:41] [Rank 0] step:5341/10000 train_time:395104ms step_avg:73.98ms +[2025-09-02 06:39:41] [Rank 0] step:5341/10000 train_time:395104ms step_avg:73.98ms +[2025-09-02 06:39:43] [Rank 0] step:5361/10000 train_time:396674ms step_avg:73.99ms +[2025-09-02 06:39:43] [Rank 0] step:5361/10000 train_time:396674ms step_avg:73.99ms +[2025-09-02 06:39:44] [Rank 0] step:5381/10000 train_time:398243ms step_avg:74.01ms +[2025-09-02 06:39:44] [Rank 0] step:5381/10000 train_time:398243ms step_avg:74.01ms +[2025-09-02 06:39:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:39:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:39:57] [Rank 0] PRINT: step:5400/10000 val_loss:4.0917 svd_entropy: attn_qk:H=0.7396,top10E=0.27,eRank=142.8,q75/q25=114.96 attn_vo:H=0.8342,top10E=0.14,eRank=278.2,q75/q25=63.91 mlp_w1:H=0.7119,top10E=0.35,eRank=144.8,q75/q25=11.26 mlp_w2:H=0.8183,top10E=0.16,eRank=248.3,q75/q25=44.39 vo_prod:H=0.7471,top10E=0.23,eRank=149.3,q75/q25=3864.21 train_time:399966ms step_avg:74.07ms +[2025-09-02 06:39:57] [Rank 0] PRINT: step:5400/10000 val_loss:4.0917 svd_entropy: attn_qk:H=0.7396,top10E=0.27,eRank=142.8,q75/q25=114.96 attn_vo:H=0.8342,top10E=0.14,eRank=278.2,q75/q25=63.91 mlp_w1:H=0.7119,top10E=0.35,eRank=144.8,q75/q25=11.26 mlp_w2:H=0.8183,top10E=0.16,eRank=248.3,q75/q25=44.39 vo_prod:H=0.7471,top10E=0.23,eRank=149.3,q75/q25=3864.21 train_time:399966ms step_avg:74.07ms +[2025-09-02 06:39:58] [Rank 0] step:5401/10000 train_time:399978ms step_avg:74.06ms +[2025-09-02 06:39:58] [Rank 0] step:5401/10000 train_time:399978ms step_avg:74.06ms +[2025-09-02 06:39:59] [Rank 0] step:5421/10000 train_time:401406ms step_avg:74.05ms +[2025-09-02 06:39:59] [Rank 0] step:5421/10000 train_time:401406ms step_avg:74.05ms +[2025-09-02 06:40:01] [Rank 0] step:5441/10000 train_time:402966ms step_avg:74.06ms +[2025-09-02 06:40:01] [Rank 0] step:5441/10000 train_time:402966ms step_avg:74.06ms +[2025-09-02 06:40:02] [Rank 0] step:5461/10000 train_time:404537ms step_avg:74.08ms +[2025-09-02 06:40:02] [Rank 0] step:5461/10000 train_time:404537ms step_avg:74.08ms +[2025-09-02 06:40:04] [Rank 0] step:5481/10000 train_time:406104ms step_avg:74.09ms +[2025-09-02 06:40:04] [Rank 0] step:5481/10000 train_time:406104ms step_avg:74.09ms +[2025-09-02 06:40:05] [Rank 0] step:5501/10000 train_time:407677ms step_avg:74.11ms +[2025-09-02 06:40:05] [Rank 0] step:5501/10000 train_time:407677ms step_avg:74.11ms +[2025-09-02 06:40:07] [Rank 0] step:5521/10000 train_time:409249ms step_avg:74.13ms +[2025-09-02 06:40:07] [Rank 0] step:5521/10000 train_time:409249ms step_avg:74.13ms +[2025-09-02 06:40:08] [Rank 0] step:5541/10000 train_time:410815ms step_avg:74.14ms +[2025-09-02 06:40:08] [Rank 0] step:5541/10000 train_time:410815ms step_avg:74.14ms +[2025-09-02 06:40:10] [Rank 0] step:5561/10000 train_time:412382ms step_avg:74.16ms +[2025-09-02 06:40:10] [Rank 0] step:5561/10000 train_time:412382ms step_avg:74.16ms +[2025-09-02 06:40:12] [Rank 0] step:5581/10000 train_time:413950ms step_avg:74.17ms +[2025-09-02 06:40:12] [Rank 0] step:5581/10000 train_time:413950ms step_avg:74.17ms +[2025-09-02 06:40:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:40:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:40:25] [Rank 0] PRINT: step:5600/10000 val_loss:4.0794 svd_entropy: attn_qk:H=0.7420,top10E=0.27,eRank=144.9,q75/q25=115.35 attn_vo:H=0.8361,top10E=0.14,eRank=281.3,q75/q25=61.77 mlp_w1:H=0.7151,top10E=0.35,eRank=147.2,q75/q25=11.59 mlp_w2:H=0.8194,top10E=0.16,eRank=250.5,q75/q25=44.96 vo_prod:H=0.7496,top10E=0.23,eRank=151.7,q75/q25=3512.78 train_time:415676ms step_avg:74.23ms +[2025-09-02 06:40:25] [Rank 0] PRINT: step:5600/10000 val_loss:4.0794 svd_entropy: attn_qk:H=0.7420,top10E=0.27,eRank=144.9,q75/q25=115.35 attn_vo:H=0.8361,top10E=0.14,eRank=281.3,q75/q25=61.77 mlp_w1:H=0.7151,top10E=0.35,eRank=147.2,q75/q25=11.59 mlp_w2:H=0.8194,top10E=0.16,eRank=250.5,q75/q25=44.96 vo_prod:H=0.7496,top10E=0.23,eRank=151.7,q75/q25=3512.78 train_time:415676ms step_avg:74.23ms +[2025-09-02 06:40:25] [Rank 0] step:5601/10000 train_time:415688ms step_avg:74.22ms +[2025-09-02 06:40:25] [Rank 0] step:5601/10000 train_time:415688ms step_avg:74.22ms +[2025-09-02 06:40:26] [Rank 0] step:5621/10000 train_time:417109ms step_avg:74.21ms +[2025-09-02 06:40:26] [Rank 0] step:5621/10000 train_time:417109ms step_avg:74.21ms +[2025-09-02 06:40:28] [Rank 0] step:5641/10000 train_time:418676ms step_avg:74.22ms +[2025-09-02 06:40:28] [Rank 0] step:5641/10000 train_time:418676ms step_avg:74.22ms +[2025-09-02 06:40:30] [Rank 0] step:5661/10000 train_time:420239ms step_avg:74.23ms +[2025-09-02 06:40:30] [Rank 0] step:5661/10000 train_time:420239ms step_avg:74.23ms +[2025-09-02 06:40:31] [Rank 0] step:5681/10000 train_time:421810ms step_avg:74.25ms +[2025-09-02 06:40:31] [Rank 0] step:5681/10000 train_time:421810ms step_avg:74.25ms +[2025-09-02 06:40:33] [Rank 0] step:5701/10000 train_time:423375ms step_avg:74.26ms +[2025-09-02 06:40:33] [Rank 0] step:5701/10000 train_time:423375ms step_avg:74.26ms +[2025-09-02 06:40:34] [Rank 0] step:5721/10000 train_time:424943ms step_avg:74.28ms +[2025-09-02 06:40:34] [Rank 0] step:5721/10000 train_time:424943ms step_avg:74.28ms +[2025-09-02 06:40:36] [Rank 0] step:5741/10000 train_time:426517ms step_avg:74.29ms +[2025-09-02 06:40:36] [Rank 0] step:5741/10000 train_time:426517ms step_avg:74.29ms +[2025-09-02 06:40:37] [Rank 0] step:5761/10000 train_time:428084ms step_avg:74.31ms +[2025-09-02 06:40:37] [Rank 0] step:5761/10000 train_time:428084ms step_avg:74.31ms +[2025-09-02 06:40:39] [Rank 0] step:5781/10000 train_time:429653ms step_avg:74.32ms +[2025-09-02 06:40:39] [Rank 0] step:5781/10000 train_time:429653ms step_avg:74.32ms +[2025-09-02 06:40:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:40:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:40:52] [Rank 0] PRINT: step:5800/10000 val_loss:4.0695 svd_entropy: attn_qk:H=0.7443,top10E=0.27,eRank=146.9,q75/q25=115.53 attn_vo:H=0.8379,top10E=0.14,eRank=284.2,q75/q25=59.51 mlp_w1:H=0.7180,top10E=0.34,eRank=149.4,q75/q25=11.86 mlp_w2:H=0.8205,top10E=0.16,eRank=252.5,q75/q25=45.13 vo_prod:H=0.7519,top10E=0.23,eRank=154.1,q75/q25=3185.94 train_time:431380ms step_avg:74.38ms +[2025-09-02 06:40:52] [Rank 0] PRINT: step:5800/10000 val_loss:4.0695 svd_entropy: attn_qk:H=0.7443,top10E=0.27,eRank=146.9,q75/q25=115.53 attn_vo:H=0.8379,top10E=0.14,eRank=284.2,q75/q25=59.51 mlp_w1:H=0.7180,top10E=0.34,eRank=149.4,q75/q25=11.86 mlp_w2:H=0.8205,top10E=0.16,eRank=252.5,q75/q25=45.13 vo_prod:H=0.7519,top10E=0.23,eRank=154.1,q75/q25=3185.94 train_time:431380ms step_avg:74.38ms +[2025-09-02 06:40:52] [Rank 0] step:5801/10000 train_time:431392ms step_avg:74.37ms +[2025-09-02 06:40:52] [Rank 0] step:5801/10000 train_time:431392ms step_avg:74.37ms +[2025-09-02 06:40:54] [Rank 0] step:5821/10000 train_time:432813ms step_avg:74.35ms +[2025-09-02 06:40:54] [Rank 0] step:5821/10000 train_time:432813ms step_avg:74.35ms +[2025-09-02 06:40:55] [Rank 0] step:5841/10000 train_time:434380ms step_avg:74.37ms +[2025-09-02 06:40:55] [Rank 0] step:5841/10000 train_time:434380ms step_avg:74.37ms +[2025-09-02 06:40:57] [Rank 0] step:5861/10000 train_time:435950ms step_avg:74.38ms +[2025-09-02 06:40:57] [Rank 0] step:5861/10000 train_time:435950ms step_avg:74.38ms +[2025-09-02 06:40:59] [Rank 0] step:5881/10000 train_time:437520ms step_avg:74.40ms +[2025-09-02 06:40:59] [Rank 0] step:5881/10000 train_time:437520ms step_avg:74.40ms +[2025-09-02 06:41:00] [Rank 0] step:5901/10000 train_time:439089ms step_avg:74.41ms +[2025-09-02 06:41:00] [Rank 0] step:5901/10000 train_time:439089ms step_avg:74.41ms +[2025-09-02 06:41:02] [Rank 0] step:5921/10000 train_time:440659ms step_avg:74.42ms +[2025-09-02 06:41:02] [Rank 0] step:5921/10000 train_time:440659ms step_avg:74.42ms +[2025-09-02 06:41:03] [Rank 0] step:5941/10000 train_time:442233ms step_avg:74.44ms +[2025-09-02 06:41:03] [Rank 0] step:5941/10000 train_time:442233ms step_avg:74.44ms +[2025-09-02 06:41:05] [Rank 0] step:5961/10000 train_time:443808ms step_avg:74.45ms +[2025-09-02 06:41:05] [Rank 0] step:5961/10000 train_time:443808ms step_avg:74.45ms +[2025-09-02 06:41:06] [Rank 0] step:5981/10000 train_time:445384ms step_avg:74.47ms +[2025-09-02 06:41:06] [Rank 0] step:5981/10000 train_time:445384ms step_avg:74.47ms +[2025-09-02 06:41:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:41:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:41:20] [Rank 0] PRINT: step:6000/10000 val_loss:4.0472 svd_entropy: attn_qk:H=0.7465,top10E=0.26,eRank=148.8,q75/q25=115.73 attn_vo:H=0.8397,top10E=0.14,eRank=287.1,q75/q25=57.62 mlp_w1:H=0.7209,top10E=0.34,eRank=151.8,q75/q25=12.19 mlp_w2:H=0.8216,top10E=0.15,eRank=254.5,q75/q25=45.40 vo_prod:H=0.7543,top10E=0.22,eRank=156.5,q75/q25=2838.43 train_time:447111ms step_avg:74.52ms +[2025-09-02 06:41:20] [Rank 0] PRINT: step:6000/10000 val_loss:4.0472 svd_entropy: attn_qk:H=0.7465,top10E=0.26,eRank=148.8,q75/q25=115.73 attn_vo:H=0.8397,top10E=0.14,eRank=287.1,q75/q25=57.62 mlp_w1:H=0.7209,top10E=0.34,eRank=151.8,q75/q25=12.19 mlp_w2:H=0.8216,top10E=0.15,eRank=254.5,q75/q25=45.40 vo_prod:H=0.7543,top10E=0.22,eRank=156.5,q75/q25=2838.43 train_time:447111ms step_avg:74.52ms +[2025-09-02 06:41:20] [Rank 0] step:6001/10000 train_time:447123ms step_avg:74.51ms +[2025-09-02 06:41:20] [Rank 0] step:6001/10000 train_time:447123ms step_avg:74.51ms +[2025-09-02 06:41:21] [Rank 0] step:6021/10000 train_time:448562ms step_avg:74.50ms +[2025-09-02 06:41:21] [Rank 0] step:6021/10000 train_time:448562ms step_avg:74.50ms +[2025-09-02 06:41:23] [Rank 0] step:6041/10000 train_time:450137ms step_avg:74.51ms +[2025-09-02 06:41:23] [Rank 0] step:6041/10000 train_time:450137ms step_avg:74.51ms +[2025-09-02 06:41:24] [Rank 0] step:6061/10000 train_time:451717ms step_avg:74.53ms +[2025-09-02 06:41:24] [Rank 0] step:6061/10000 train_time:451717ms step_avg:74.53ms +[2025-09-02 06:41:26] [Rank 0] step:6081/10000 train_time:453288ms step_avg:74.54ms +[2025-09-02 06:41:26] [Rank 0] step:6081/10000 train_time:453288ms step_avg:74.54ms +[2025-09-02 06:41:28] [Rank 0] step:6101/10000 train_time:454863ms step_avg:74.56ms +[2025-09-02 06:41:28] [Rank 0] step:6101/10000 train_time:454863ms step_avg:74.56ms +[2025-09-02 06:41:29] [Rank 0] step:6121/10000 train_time:456696ms step_avg:74.61ms +[2025-09-02 06:41:29] [Rank 0] step:6121/10000 train_time:456696ms step_avg:74.61ms +[2025-09-02 06:41:31] [Rank 0] step:6141/10000 train_time:458275ms step_avg:74.63ms +[2025-09-02 06:41:31] [Rank 0] step:6141/10000 train_time:458275ms step_avg:74.63ms +[2025-09-02 06:41:33] [Rank 0] step:6161/10000 train_time:459847ms step_avg:74.64ms +[2025-09-02 06:41:33] [Rank 0] step:6161/10000 train_time:459847ms step_avg:74.64ms +[2025-09-02 06:41:34] [Rank 0] step:6181/10000 train_time:461418ms step_avg:74.65ms +[2025-09-02 06:41:34] [Rank 0] step:6181/10000 train_time:461418ms step_avg:74.65ms +[2025-09-02 06:41:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:41:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:41:47] [Rank 0] PRINT: step:6200/10000 val_loss:4.0318 svd_entropy: attn_qk:H=0.7486,top10E=0.26,eRank=150.8,q75/q25=116.41 attn_vo:H=0.8414,top10E=0.14,eRank=289.8,q75/q25=55.65 mlp_w1:H=0.7235,top10E=0.34,eRank=153.9,q75/q25=12.45 mlp_w2:H=0.8224,top10E=0.15,eRank=256.3,q75/q25=45.91 vo_prod:H=0.7564,top10E=0.22,eRank=158.7,q75/q25=2559.23 train_time:463147ms step_avg:74.70ms +[2025-09-02 06:41:47] [Rank 0] PRINT: step:6200/10000 val_loss:4.0318 svd_entropy: attn_qk:H=0.7486,top10E=0.26,eRank=150.8,q75/q25=116.41 attn_vo:H=0.8414,top10E=0.14,eRank=289.8,q75/q25=55.65 mlp_w1:H=0.7235,top10E=0.34,eRank=153.9,q75/q25=12.45 mlp_w2:H=0.8224,top10E=0.15,eRank=256.3,q75/q25=45.91 vo_prod:H=0.7564,top10E=0.22,eRank=158.7,q75/q25=2559.23 train_time:463147ms step_avg:74.70ms +[2025-09-02 06:41:47] [Rank 0] step:6201/10000 train_time:463159ms step_avg:74.69ms +[2025-09-02 06:41:47] [Rank 0] step:6201/10000 train_time:463159ms step_avg:74.69ms +[2025-09-02 06:41:49] [Rank 0] step:6221/10000 train_time:464581ms step_avg:74.68ms +[2025-09-02 06:41:49] [Rank 0] step:6221/10000 train_time:464581ms step_avg:74.68ms +[2025-09-02 06:41:51] [Rank 0] step:6241/10000 train_time:466147ms step_avg:74.69ms +[2025-09-02 06:41:51] [Rank 0] step:6241/10000 train_time:466147ms step_avg:74.69ms +[2025-09-02 06:41:52] [Rank 0] step:6261/10000 train_time:467719ms step_avg:74.70ms +[2025-09-02 06:41:52] [Rank 0] step:6261/10000 train_time:467719ms step_avg:74.70ms +[2025-09-02 06:41:54] [Rank 0] step:6281/10000 train_time:469292ms step_avg:74.72ms +[2025-09-02 06:41:54] [Rank 0] step:6281/10000 train_time:469292ms step_avg:74.72ms +[2025-09-02 06:41:55] [Rank 0] step:6301/10000 train_time:470865ms step_avg:74.73ms +[2025-09-02 06:41:55] [Rank 0] step:6301/10000 train_time:470865ms step_avg:74.73ms +[2025-09-02 06:41:57] [Rank 0] step:6321/10000 train_time:472435ms step_avg:74.74ms +[2025-09-02 06:41:57] [Rank 0] step:6321/10000 train_time:472435ms step_avg:74.74ms +[2025-09-02 06:41:58] [Rank 0] step:6341/10000 train_time:474009ms step_avg:74.75ms +[2025-09-02 06:41:58] [Rank 0] step:6341/10000 train_time:474009ms step_avg:74.75ms +[2025-09-02 06:42:00] [Rank 0] step:6361/10000 train_time:475585ms step_avg:74.77ms +[2025-09-02 06:42:00] [Rank 0] step:6361/10000 train_time:475585ms step_avg:74.77ms +[2025-09-02 06:42:02] [Rank 0] step:6381/10000 train_time:477164ms step_avg:74.78ms +[2025-09-02 06:42:02] [Rank 0] step:6381/10000 train_time:477164ms step_avg:74.78ms +[2025-09-02 06:42:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:42:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:42:15] [Rank 0] PRINT: step:6400/10000 val_loss:4.0170 svd_entropy: attn_qk:H=0.7505,top10E=0.26,eRank=152.5,q75/q25=116.67 attn_vo:H=0.8428,top10E=0.14,eRank=292.2,q75/q25=53.99 mlp_w1:H=0.7258,top10E=0.33,eRank=155.8,q75/q25=12.69 mlp_w2:H=0.8232,top10E=0.15,eRank=257.8,q75/q25=46.26 vo_prod:H=0.7584,top10E=0.22,eRank=160.7,q75/q25=2322.89 train_time:478898ms step_avg:74.83ms +[2025-09-02 06:42:15] [Rank 0] PRINT: step:6400/10000 val_loss:4.0170 svd_entropy: attn_qk:H=0.7505,top10E=0.26,eRank=152.5,q75/q25=116.67 attn_vo:H=0.8428,top10E=0.14,eRank=292.2,q75/q25=53.99 mlp_w1:H=0.7258,top10E=0.33,eRank=155.8,q75/q25=12.69 mlp_w2:H=0.8232,top10E=0.15,eRank=257.8,q75/q25=46.26 vo_prod:H=0.7584,top10E=0.22,eRank=160.7,q75/q25=2322.89 train_time:478898ms step_avg:74.83ms +[2025-09-02 06:42:15] [Rank 0] step:6401/10000 train_time:478909ms step_avg:74.82ms +[2025-09-02 06:42:15] [Rank 0] step:6401/10000 train_time:478909ms step_avg:74.82ms +[2025-09-02 06:42:16] [Rank 0] step:6421/10000 train_time:480333ms step_avg:74.81ms +[2025-09-02 06:42:16] [Rank 0] step:6421/10000 train_time:480333ms step_avg:74.81ms +[2025-09-02 06:42:18] [Rank 0] step:6441/10000 train_time:481906ms step_avg:74.82ms +[2025-09-02 06:42:18] [Rank 0] step:6441/10000 train_time:481906ms step_avg:74.82ms +[2025-09-02 06:42:20] [Rank 0] step:6461/10000 train_time:483482ms step_avg:74.83ms +[2025-09-02 06:42:20] [Rank 0] step:6461/10000 train_time:483482ms step_avg:74.83ms +[2025-09-02 06:42:21] [Rank 0] step:6481/10000 train_time:485061ms step_avg:74.84ms +[2025-09-02 06:42:21] [Rank 0] step:6481/10000 train_time:485061ms step_avg:74.84ms +[2025-09-02 06:42:23] [Rank 0] step:6501/10000 train_time:486629ms step_avg:74.85ms +[2025-09-02 06:42:23] [Rank 0] step:6501/10000 train_time:486629ms step_avg:74.85ms +[2025-09-02 06:42:24] [Rank 0] step:6521/10000 train_time:488199ms step_avg:74.87ms +[2025-09-02 06:42:24] [Rank 0] step:6521/10000 train_time:488199ms step_avg:74.87ms +[2025-09-02 06:42:26] [Rank 0] step:6541/10000 train_time:489773ms step_avg:74.88ms +[2025-09-02 06:42:26] [Rank 0] step:6541/10000 train_time:489773ms step_avg:74.88ms +[2025-09-02 06:42:27] [Rank 0] step:6561/10000 train_time:491350ms step_avg:74.89ms +[2025-09-02 06:42:27] [Rank 0] step:6561/10000 train_time:491350ms step_avg:74.89ms +[2025-09-02 06:42:29] [Rank 0] step:6581/10000 train_time:492921ms step_avg:74.90ms +[2025-09-02 06:42:29] [Rank 0] step:6581/10000 train_time:492921ms step_avg:74.90ms +[2025-09-02 06:42:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:42:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:42:42] [Rank 0] PRINT: step:6600/10000 val_loss:4.0040 svd_entropy: attn_qk:H=0.7522,top10E=0.26,eRank=154.0,q75/q25=116.64 attn_vo:H=0.8442,top10E=0.13,eRank=294.5,q75/q25=52.15 mlp_w1:H=0.7279,top10E=0.33,eRank=157.6,q75/q25=12.90 mlp_w2:H=0.8238,top10E=0.15,eRank=259.2,q75/q25=46.90 vo_prod:H=0.7602,top10E=0.22,eRank=162.7,q75/q25=2124.34 train_time:494655ms step_avg:74.95ms +[2025-09-02 06:42:42] [Rank 0] PRINT: step:6600/10000 val_loss:4.0040 svd_entropy: attn_qk:H=0.7522,top10E=0.26,eRank=154.0,q75/q25=116.64 attn_vo:H=0.8442,top10E=0.13,eRank=294.5,q75/q25=52.15 mlp_w1:H=0.7279,top10E=0.33,eRank=157.6,q75/q25=12.90 mlp_w2:H=0.8238,top10E=0.15,eRank=259.2,q75/q25=46.90 vo_prod:H=0.7602,top10E=0.22,eRank=162.7,q75/q25=2124.34 train_time:494655ms step_avg:74.95ms +[2025-09-02 06:42:42] [Rank 0] step:6601/10000 train_time:494667ms step_avg:74.94ms +[2025-09-02 06:42:42] [Rank 0] step:6601/10000 train_time:494667ms step_avg:74.94ms +[2025-09-02 06:42:44] [Rank 0] step:6621/10000 train_time:496091ms step_avg:74.93ms +[2025-09-02 06:42:44] [Rank 0] step:6621/10000 train_time:496091ms step_avg:74.93ms +[2025-09-02 06:42:45] [Rank 0] step:6641/10000 train_time:497666ms step_avg:74.94ms +[2025-09-02 06:42:45] [Rank 0] step:6641/10000 train_time:497666ms step_avg:74.94ms +[2025-09-02 06:42:47] [Rank 0] step:6661/10000 train_time:499242ms step_avg:74.95ms +[2025-09-02 06:42:47] [Rank 0] step:6661/10000 train_time:499242ms step_avg:74.95ms +[2025-09-02 06:42:49] [Rank 0] step:6681/10000 train_time:500831ms step_avg:74.96ms +[2025-09-02 06:42:49] [Rank 0] step:6681/10000 train_time:500831ms step_avg:74.96ms +[2025-09-02 06:42:50] [Rank 0] step:6701/10000 train_time:502442ms step_avg:74.98ms +[2025-09-02 06:42:50] [Rank 0] step:6701/10000 train_time:502442ms step_avg:74.98ms +[2025-09-02 06:42:52] [Rank 0] step:6721/10000 train_time:504042ms step_avg:75.00ms +[2025-09-02 06:42:52] [Rank 0] step:6721/10000 train_time:504042ms step_avg:75.00ms +[2025-09-02 06:42:53] [Rank 0] step:6741/10000 train_time:505639ms step_avg:75.01ms +[2025-09-02 06:42:53] [Rank 0] step:6741/10000 train_time:505639ms step_avg:75.01ms +[2025-09-02 06:42:55] [Rank 0] step:6761/10000 train_time:507243ms step_avg:75.02ms +[2025-09-02 06:42:55] [Rank 0] step:6761/10000 train_time:507243ms step_avg:75.02ms +[2025-09-02 06:42:57] [Rank 0] step:6781/10000 train_time:508848ms step_avg:75.04ms +[2025-09-02 06:42:57] [Rank 0] step:6781/10000 train_time:508848ms step_avg:75.04ms +[2025-09-02 06:42:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:42:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:43:10] [Rank 0] PRINT: step:6800/10000 val_loss:3.9878 svd_entropy: attn_qk:H=0.7537,top10E=0.26,eRank=155.4,q75/q25=116.73 attn_vo:H=0.8455,top10E=0.13,eRank=296.6,q75/q25=50.92 mlp_w1:H=0.7298,top10E=0.33,eRank=159.4,q75/q25=13.21 mlp_w2:H=0.8244,top10E=0.15,eRank=260.4,q75/q25=47.69 vo_prod:H=0.7619,top10E=0.21,eRank=164.5,q75/q25=1973.40 train_time:510616ms step_avg:75.09ms +[2025-09-02 06:43:10] [Rank 0] PRINT: step:6800/10000 val_loss:3.9878 svd_entropy: attn_qk:H=0.7537,top10E=0.26,eRank=155.4,q75/q25=116.73 attn_vo:H=0.8455,top10E=0.13,eRank=296.6,q75/q25=50.92 mlp_w1:H=0.7298,top10E=0.33,eRank=159.4,q75/q25=13.21 mlp_w2:H=0.8244,top10E=0.15,eRank=260.4,q75/q25=47.69 vo_prod:H=0.7619,top10E=0.21,eRank=164.5,q75/q25=1973.40 train_time:510616ms step_avg:75.09ms +[2025-09-02 06:43:10] [Rank 0] step:6801/10000 train_time:510628ms step_avg:75.08ms +[2025-09-02 06:43:10] [Rank 0] step:6801/10000 train_time:510628ms step_avg:75.08ms +[2025-09-02 06:43:12] [Rank 0] step:6821/10000 train_time:512082ms step_avg:75.07ms +[2025-09-02 06:43:12] [Rank 0] step:6821/10000 train_time:512082ms step_avg:75.07ms +[2025-09-02 06:43:13] [Rank 0] step:6841/10000 train_time:513678ms step_avg:75.09ms +[2025-09-02 06:43:13] [Rank 0] step:6841/10000 train_time:513678ms step_avg:75.09ms +[2025-09-02 06:43:15] [Rank 0] step:6861/10000 train_time:515281ms step_avg:75.10ms +[2025-09-02 06:43:15] [Rank 0] step:6861/10000 train_time:515281ms step_avg:75.10ms +[2025-09-02 06:43:16] [Rank 0] step:6881/10000 train_time:516881ms step_avg:75.12ms +[2025-09-02 06:43:16] [Rank 0] step:6881/10000 train_time:516881ms step_avg:75.12ms +[2025-09-02 06:43:18] [Rank 0] step:6901/10000 train_time:518488ms step_avg:75.13ms +[2025-09-02 06:43:18] [Rank 0] step:6901/10000 train_time:518488ms step_avg:75.13ms +[2025-09-02 06:43:20] [Rank 0] step:6921/10000 train_time:520085ms step_avg:75.15ms +[2025-09-02 06:43:20] [Rank 0] step:6921/10000 train_time:520085ms step_avg:75.15ms +[2025-09-02 06:43:21] [Rank 0] step:6941/10000 train_time:521695ms step_avg:75.16ms +[2025-09-02 06:43:21] [Rank 0] step:6941/10000 train_time:521695ms step_avg:75.16ms +[2025-09-02 06:43:23] [Rank 0] step:6961/10000 train_time:523315ms step_avg:75.18ms +[2025-09-02 06:43:23] [Rank 0] step:6961/10000 train_time:523315ms step_avg:75.18ms +[2025-09-02 06:43:24] [Rank 0] step:6981/10000 train_time:524926ms step_avg:75.19ms +[2025-09-02 06:43:24] [Rank 0] step:6981/10000 train_time:524926ms step_avg:75.19ms +[2025-09-02 06:43:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:43:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:43:38] [Rank 0] PRINT: step:7000/10000 val_loss:3.9723 svd_entropy: attn_qk:H=0.7551,top10E=0.25,eRank=156.8,q75/q25=116.81 attn_vo:H=0.8466,top10E=0.13,eRank=298.5,q75/q25=49.62 mlp_w1:H=0.7317,top10E=0.32,eRank=161.0,q75/q25=13.32 mlp_w2:H=0.8250,top10E=0.15,eRank=261.7,q75/q25=47.77 vo_prod:H=0.7635,top10E=0.21,eRank=166.3,q75/q25=1835.13 train_time:526695ms step_avg:75.24ms +[2025-09-02 06:43:38] [Rank 0] PRINT: step:7000/10000 val_loss:3.9723 svd_entropy: attn_qk:H=0.7551,top10E=0.25,eRank=156.8,q75/q25=116.81 attn_vo:H=0.8466,top10E=0.13,eRank=298.5,q75/q25=49.62 mlp_w1:H=0.7317,top10E=0.32,eRank=161.0,q75/q25=13.32 mlp_w2:H=0.8250,top10E=0.15,eRank=261.7,q75/q25=47.77 vo_prod:H=0.7635,top10E=0.21,eRank=166.3,q75/q25=1835.13 train_time:526695ms step_avg:75.24ms +[2025-09-02 06:43:38] [Rank 0] step:7001/10000 train_time:526708ms step_avg:75.23ms +[2025-09-02 06:43:38] [Rank 0] step:7001/10000 train_time:526708ms step_avg:75.23ms +[2025-09-02 06:43:39] [Rank 0] step:7021/10000 train_time:528171ms step_avg:75.23ms +[2025-09-02 06:43:39] [Rank 0] step:7021/10000 train_time:528171ms step_avg:75.23ms +[2025-09-02 06:43:41] [Rank 0] step:7041/10000 train_time:529773ms step_avg:75.24ms +[2025-09-02 06:43:41] [Rank 0] step:7041/10000 train_time:529773ms step_avg:75.24ms +[2025-09-02 06:43:43] [Rank 0] step:7061/10000 train_time:531374ms step_avg:75.25ms +[2025-09-02 06:43:43] [Rank 0] step:7061/10000 train_time:531374ms step_avg:75.25ms +[2025-09-02 06:43:44] [Rank 0] step:7081/10000 train_time:532977ms step_avg:75.27ms +[2025-09-02 06:43:44] [Rank 0] step:7081/10000 train_time:532977ms step_avg:75.27ms +[2025-09-02 06:43:46] [Rank 0] step:7101/10000 train_time:534581ms step_avg:75.28ms +[2025-09-02 06:43:46] [Rank 0] step:7101/10000 train_time:534581ms step_avg:75.28ms +[2025-09-02 06:43:47] [Rank 0] step:7121/10000 train_time:536181ms step_avg:75.30ms +[2025-09-02 06:43:47] [Rank 0] step:7121/10000 train_time:536181ms step_avg:75.30ms +[2025-09-02 06:43:49] [Rank 0] step:7141/10000 train_time:537785ms step_avg:75.31ms +[2025-09-02 06:43:49] [Rank 0] step:7141/10000 train_time:537785ms step_avg:75.31ms +[2025-09-02 06:43:51] [Rank 0] step:7161/10000 train_time:539388ms step_avg:75.32ms +[2025-09-02 06:43:51] [Rank 0] step:7161/10000 train_time:539388ms step_avg:75.32ms +[2025-09-02 06:43:52] [Rank 0] step:7181/10000 train_time:540989ms step_avg:75.34ms +[2025-09-02 06:43:52] [Rank 0] step:7181/10000 train_time:540989ms step_avg:75.34ms +[2025-09-02 06:43:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:43:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:44:05] [Rank 0] PRINT: step:7200/10000 val_loss:3.9631 svd_entropy: attn_qk:H=0.7565,top10E=0.25,eRank=158.1,q75/q25=116.62 attn_vo:H=0.8477,top10E=0.13,eRank=300.3,q75/q25=48.46 mlp_w1:H=0.7333,top10E=0.32,eRank=162.5,q75/q25=13.56 mlp_w2:H=0.8256,top10E=0.15,eRank=262.9,q75/q25=48.08 vo_prod:H=0.7650,top10E=0.21,eRank=167.9,q75/q25=1718.28 train_time:542755ms step_avg:75.38ms +[2025-09-02 06:44:05] [Rank 0] PRINT: step:7200/10000 val_loss:3.9631 svd_entropy: attn_qk:H=0.7565,top10E=0.25,eRank=158.1,q75/q25=116.62 attn_vo:H=0.8477,top10E=0.13,eRank=300.3,q75/q25=48.46 mlp_w1:H=0.7333,top10E=0.32,eRank=162.5,q75/q25=13.56 mlp_w2:H=0.8256,top10E=0.15,eRank=262.9,q75/q25=48.08 vo_prod:H=0.7650,top10E=0.21,eRank=167.9,q75/q25=1718.28 train_time:542755ms step_avg:75.38ms +[2025-09-02 06:44:05] [Rank 0] step:7201/10000 train_time:542768ms step_avg:75.37ms +[2025-09-02 06:44:05] [Rank 0] step:7201/10000 train_time:542768ms step_avg:75.37ms +[2025-09-02 06:44:07] [Rank 0] step:7221/10000 train_time:544219ms step_avg:75.37ms +[2025-09-02 06:44:07] [Rank 0] step:7221/10000 train_time:544219ms step_avg:75.37ms +[2025-09-02 06:44:09] [Rank 0] step:7241/10000 train_time:545815ms step_avg:75.38ms +[2025-09-02 06:44:09] [Rank 0] step:7241/10000 train_time:545815ms step_avg:75.38ms +[2025-09-02 06:44:10] [Rank 0] step:7261/10000 train_time:547412ms step_avg:75.39ms +[2025-09-02 06:44:10] [Rank 0] step:7261/10000 train_time:547412ms step_avg:75.39ms +[2025-09-02 06:44:12] [Rank 0] step:7281/10000 train_time:549019ms step_avg:75.40ms +[2025-09-02 06:44:12] [Rank 0] step:7281/10000 train_time:549019ms step_avg:75.40ms +[2025-09-02 06:44:14] [Rank 0] step:7301/10000 train_time:550621ms step_avg:75.42ms +[2025-09-02 06:44:14] [Rank 0] step:7301/10000 train_time:550621ms step_avg:75.42ms +[2025-09-02 06:44:15] [Rank 0] step:7321/10000 train_time:552233ms step_avg:75.43ms +[2025-09-02 06:44:15] [Rank 0] step:7321/10000 train_time:552233ms step_avg:75.43ms +[2025-09-02 06:44:17] [Rank 0] step:7341/10000 train_time:553836ms step_avg:75.44ms +[2025-09-02 06:44:17] [Rank 0] step:7341/10000 train_time:553836ms step_avg:75.44ms +[2025-09-02 06:44:18] [Rank 0] step:7361/10000 train_time:555441ms step_avg:75.46ms +[2025-09-02 06:44:18] [Rank 0] step:7361/10000 train_time:555441ms step_avg:75.46ms +[2025-09-02 06:44:20] [Rank 0] step:7381/10000 train_time:557051ms step_avg:75.47ms +[2025-09-02 06:44:20] [Rank 0] step:7381/10000 train_time:557051ms step_avg:75.47ms +[2025-09-02 06:44:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:44:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:44:33] [Rank 0] PRINT: step:7400/10000 val_loss:3.9433 svd_entropy: attn_qk:H=0.7577,top10E=0.25,eRank=159.3,q75/q25=116.22 attn_vo:H=0.8486,top10E=0.13,eRank=301.9,q75/q25=47.15 mlp_w1:H=0.7347,top10E=0.32,eRank=163.8,q75/q25=13.77 mlp_w2:H=0.8261,top10E=0.15,eRank=263.9,q75/q25=48.29 vo_prod:H=0.7664,top10E=0.21,eRank=169.4,q75/q25=1605.37 train_time:558803ms step_avg:75.51ms +[2025-09-02 06:44:33] [Rank 0] PRINT: step:7400/10000 val_loss:3.9433 svd_entropy: attn_qk:H=0.7577,top10E=0.25,eRank=159.3,q75/q25=116.22 attn_vo:H=0.8486,top10E=0.13,eRank=301.9,q75/q25=47.15 mlp_w1:H=0.7347,top10E=0.32,eRank=163.8,q75/q25=13.77 mlp_w2:H=0.8261,top10E=0.15,eRank=263.9,q75/q25=48.29 vo_prod:H=0.7664,top10E=0.21,eRank=169.4,q75/q25=1605.37 train_time:558803ms step_avg:75.51ms +[2025-09-02 06:44:33] [Rank 0] step:7401/10000 train_time:558816ms step_avg:75.51ms +[2025-09-02 06:44:33] [Rank 0] step:7401/10000 train_time:558816ms step_avg:75.51ms +[2025-09-02 06:44:35] [Rank 0] step:7421/10000 train_time:560280ms step_avg:75.50ms +[2025-09-02 06:44:35] [Rank 0] step:7421/10000 train_time:560280ms step_avg:75.50ms +[2025-09-02 06:44:36] [Rank 0] step:7441/10000 train_time:561881ms step_avg:75.51ms +[2025-09-02 06:44:36] [Rank 0] step:7441/10000 train_time:561881ms step_avg:75.51ms +[2025-09-02 06:44:38] [Rank 0] step:7461/10000 train_time:563487ms step_avg:75.52ms +[2025-09-02 06:44:38] [Rank 0] step:7461/10000 train_time:563487ms step_avg:75.52ms +[2025-09-02 06:44:40] [Rank 0] step:7481/10000 train_time:565093ms step_avg:75.54ms +[2025-09-02 06:44:40] [Rank 0] step:7481/10000 train_time:565093ms step_avg:75.54ms +[2025-09-02 06:44:41] [Rank 0] step:7501/10000 train_time:566702ms step_avg:75.55ms +[2025-09-02 06:44:41] [Rank 0] step:7501/10000 train_time:566702ms step_avg:75.55ms +[2025-09-02 06:44:43] [Rank 0] step:7521/10000 train_time:568312ms step_avg:75.56ms +[2025-09-02 06:44:43] [Rank 0] step:7521/10000 train_time:568312ms step_avg:75.56ms +[2025-09-02 06:44:45] [Rank 0] step:7541/10000 train_time:569936ms step_avg:75.58ms +[2025-09-02 06:44:45] [Rank 0] step:7541/10000 train_time:569936ms step_avg:75.58ms +[2025-09-02 06:44:46] [Rank 0] step:7561/10000 train_time:571531ms step_avg:75.59ms +[2025-09-02 06:44:46] [Rank 0] step:7561/10000 train_time:571531ms step_avg:75.59ms +[2025-09-02 06:44:48] [Rank 0] step:7581/10000 train_time:573149ms step_avg:75.60ms +[2025-09-02 06:44:48] [Rank 0] step:7581/10000 train_time:573149ms step_avg:75.60ms +[2025-09-02 06:44:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:44:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:45:01] [Rank 0] PRINT: step:7600/10000 val_loss:3.9405 svd_entropy: attn_qk:H=0.7589,top10E=0.25,eRank=160.4,q75/q25=115.25 attn_vo:H=0.8494,top10E=0.13,eRank=303.2,q75/q25=46.14 mlp_w1:H=0.7360,top10E=0.32,eRank=165.1,q75/q25=13.91 mlp_w2:H=0.8264,top10E=0.14,eRank=264.7,q75/q25=48.66 vo_prod:H=0.7674,top10E=0.21,eRank=170.5,q75/q25=1525.52 train_time:574924ms step_avg:75.65ms +[2025-09-02 06:45:01] [Rank 0] PRINT: step:7600/10000 val_loss:3.9405 svd_entropy: attn_qk:H=0.7589,top10E=0.25,eRank=160.4,q75/q25=115.25 attn_vo:H=0.8494,top10E=0.13,eRank=303.2,q75/q25=46.14 mlp_w1:H=0.7360,top10E=0.32,eRank=165.1,q75/q25=13.91 mlp_w2:H=0.8264,top10E=0.14,eRank=264.7,q75/q25=48.66 vo_prod:H=0.7674,top10E=0.21,eRank=170.5,q75/q25=1525.52 train_time:574924ms step_avg:75.65ms +[2025-09-02 06:45:01] [Rank 0] step:7601/10000 train_time:574937ms step_avg:75.64ms +[2025-09-02 06:45:01] [Rank 0] step:7601/10000 train_time:574937ms step_avg:75.64ms +[2025-09-02 06:45:03] [Rank 0] step:7621/10000 train_time:576400ms step_avg:75.63ms +[2025-09-02 06:45:03] [Rank 0] step:7621/10000 train_time:576400ms step_avg:75.63ms +[2025-09-02 06:45:04] [Rank 0] step:7641/10000 train_time:578004ms step_avg:75.65ms +[2025-09-02 06:45:04] [Rank 0] step:7641/10000 train_time:578004ms step_avg:75.65ms +[2025-09-02 06:45:06] [Rank 0] step:7661/10000 train_time:579609ms step_avg:75.66ms +[2025-09-02 06:45:06] [Rank 0] step:7661/10000 train_time:579609ms step_avg:75.66ms +[2025-09-02 06:45:08] [Rank 0] step:7681/10000 train_time:581212ms step_avg:75.67ms +[2025-09-02 06:45:08] [Rank 0] step:7681/10000 train_time:581212ms step_avg:75.67ms +[2025-09-02 06:45:09] [Rank 0] step:7701/10000 train_time:582815ms step_avg:75.68ms +[2025-09-02 06:45:09] [Rank 0] step:7701/10000 train_time:582815ms step_avg:75.68ms +[2025-09-02 06:45:11] [Rank 0] step:7721/10000 train_time:584440ms step_avg:75.69ms +[2025-09-02 06:45:11] [Rank 0] step:7721/10000 train_time:584440ms step_avg:75.69ms +[2025-09-02 06:45:12] [Rank 0] step:7741/10000 train_time:586045ms step_avg:75.71ms +[2025-09-02 06:45:12] [Rank 0] step:7741/10000 train_time:586045ms step_avg:75.71ms +[2025-09-02 06:45:14] [Rank 0] step:7761/10000 train_time:587657ms step_avg:75.72ms +[2025-09-02 06:45:14] [Rank 0] step:7761/10000 train_time:587657ms step_avg:75.72ms +[2025-09-02 06:45:16] [Rank 0] step:7781/10000 train_time:589273ms step_avg:75.73ms +[2025-09-02 06:45:16] [Rank 0] step:7781/10000 train_time:589273ms step_avg:75.73ms +[2025-09-02 06:45:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:45:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:45:29] [Rank 0] PRINT: step:7800/10000 val_loss:3.9255 svd_entropy: attn_qk:H=0.7599,top10E=0.25,eRank=161.4,q75/q25=115.05 attn_vo:H=0.8502,top10E=0.13,eRank=304.6,q75/q25=45.30 mlp_w1:H=0.7373,top10E=0.31,eRank=166.3,q75/q25=14.02 mlp_w2:H=0.8269,top10E=0.14,eRank=265.7,q75/q25=48.79 vo_prod:H=0.7686,top10E=0.21,eRank=171.9,q75/q25=1462.14 train_time:591054ms step_avg:75.78ms +[2025-09-02 06:45:29] [Rank 0] PRINT: step:7800/10000 val_loss:3.9255 svd_entropy: attn_qk:H=0.7599,top10E=0.25,eRank=161.4,q75/q25=115.05 attn_vo:H=0.8502,top10E=0.13,eRank=304.6,q75/q25=45.30 mlp_w1:H=0.7373,top10E=0.31,eRank=166.3,q75/q25=14.02 mlp_w2:H=0.8269,top10E=0.14,eRank=265.7,q75/q25=48.79 vo_prod:H=0.7686,top10E=0.21,eRank=171.9,q75/q25=1462.14 train_time:591054ms step_avg:75.78ms +[2025-09-02 06:45:29] [Rank 0] step:7801/10000 train_time:591067ms step_avg:75.77ms +[2025-09-02 06:45:29] [Rank 0] step:7801/10000 train_time:591067ms step_avg:75.77ms +[2025-09-02 06:45:31] [Rank 0] step:7821/10000 train_time:592529ms step_avg:75.76ms +[2025-09-02 06:45:31] [Rank 0] step:7821/10000 train_time:592529ms step_avg:75.76ms +[2025-09-02 06:45:32] [Rank 0] step:7841/10000 train_time:594133ms step_avg:75.77ms +[2025-09-02 06:45:32] [Rank 0] step:7841/10000 train_time:594133ms step_avg:75.77ms +[2025-09-02 06:45:34] [Rank 0] step:7861/10000 train_time:595743ms step_avg:75.78ms +[2025-09-02 06:45:34] [Rank 0] step:7861/10000 train_time:595743ms step_avg:75.78ms +[2025-09-02 06:45:35] [Rank 0] step:7881/10000 train_time:597358ms step_avg:75.80ms +[2025-09-02 06:45:35] [Rank 0] step:7881/10000 train_time:597358ms step_avg:75.80ms +[2025-09-02 06:45:37] [Rank 0] step:7901/10000 train_time:598965ms step_avg:75.81ms +[2025-09-02 06:45:37] [Rank 0] step:7901/10000 train_time:598965ms step_avg:75.81ms +[2025-09-02 06:45:39] [Rank 0] step:7921/10000 train_time:600573ms step_avg:75.82ms +[2025-09-02 06:45:39] [Rank 0] step:7921/10000 train_time:600573ms step_avg:75.82ms +[2025-09-02 06:45:40] [Rank 0] step:7941/10000 train_time:602188ms step_avg:75.83ms +[2025-09-02 06:45:40] [Rank 0] step:7941/10000 train_time:602188ms step_avg:75.83ms +[2025-09-02 06:45:42] [Rank 0] step:7961/10000 train_time:603800ms step_avg:75.84ms +[2025-09-02 06:45:42] [Rank 0] step:7961/10000 train_time:603800ms step_avg:75.84ms +[2025-09-02 06:45:43] [Rank 0] step:7981/10000 train_time:605408ms step_avg:75.86ms +[2025-09-02 06:45:43] [Rank 0] step:7981/10000 train_time:605408ms step_avg:75.86ms +[2025-09-02 06:45:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:45:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:45:57] [Rank 0] PRINT: step:8000/10000 val_loss:3.9110 svd_entropy: attn_qk:H=0.7608,top10E=0.25,eRank=162.3,q75/q25=114.77 attn_vo:H=0.8509,top10E=0.13,eRank=305.8,q75/q25=44.55 mlp_w1:H=0.7383,top10E=0.31,eRank=167.3,q75/q25=14.20 mlp_w2:H=0.8273,top10E=0.14,eRank=266.5,q75/q25=49.18 vo_prod:H=0.7697,top10E=0.21,eRank=173.2,q75/q25=1366.78 train_time:607182ms step_avg:75.90ms +[2025-09-02 06:45:57] [Rank 0] PRINT: step:8000/10000 val_loss:3.9110 svd_entropy: attn_qk:H=0.7608,top10E=0.25,eRank=162.3,q75/q25=114.77 attn_vo:H=0.8509,top10E=0.13,eRank=305.8,q75/q25=44.55 mlp_w1:H=0.7383,top10E=0.31,eRank=167.3,q75/q25=14.20 mlp_w2:H=0.8273,top10E=0.14,eRank=266.5,q75/q25=49.18 vo_prod:H=0.7697,top10E=0.21,eRank=173.2,q75/q25=1366.78 train_time:607182ms step_avg:75.90ms +[2025-09-02 06:45:57] [Rank 0] step:8001/10000 train_time:607194ms step_avg:75.89ms +[2025-09-02 06:45:57] [Rank 0] step:8001/10000 train_time:607194ms step_avg:75.89ms +[2025-09-02 06:45:58] [Rank 0] step:8021/10000 train_time:608653ms step_avg:75.88ms +[2025-09-02 06:45:58] [Rank 0] step:8021/10000 train_time:608653ms step_avg:75.88ms +[2025-09-02 06:46:00] [Rank 0] step:8041/10000 train_time:610269ms step_avg:75.89ms +[2025-09-02 06:46:00] [Rank 0] step:8041/10000 train_time:610269ms step_avg:75.89ms +[2025-09-02 06:46:02] [Rank 0] step:8061/10000 train_time:611875ms step_avg:75.91ms +[2025-09-02 06:46:02] [Rank 0] step:8061/10000 train_time:611875ms step_avg:75.91ms +[2025-09-02 06:46:03] [Rank 0] step:8081/10000 train_time:613477ms step_avg:75.92ms +[2025-09-02 06:46:03] [Rank 0] step:8081/10000 train_time:613477ms step_avg:75.92ms +[2025-09-02 06:46:05] [Rank 0] step:8101/10000 train_time:615087ms step_avg:75.93ms +[2025-09-02 06:46:05] [Rank 0] step:8101/10000 train_time:615087ms step_avg:75.93ms +[2025-09-02 06:46:06] [Rank 0] step:8121/10000 train_time:616695ms step_avg:75.94ms +[2025-09-02 06:46:06] [Rank 0] step:8121/10000 train_time:616695ms step_avg:75.94ms +[2025-09-02 06:46:08] [Rank 0] step:8141/10000 train_time:618400ms step_avg:75.96ms +[2025-09-02 06:46:08] [Rank 0] step:8141/10000 train_time:618400ms step_avg:75.96ms +[2025-09-02 06:46:10] [Rank 0] step:8161/10000 train_time:620017ms step_avg:75.97ms +[2025-09-02 06:46:10] [Rank 0] step:8161/10000 train_time:620017ms step_avg:75.97ms +[2025-09-02 06:46:11] [Rank 0] step:8181/10000 train_time:621657ms step_avg:75.99ms +[2025-09-02 06:46:11] [Rank 0] step:8181/10000 train_time:621657ms step_avg:75.99ms +[2025-09-02 06:46:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:46:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:46:25] [Rank 0] PRINT: step:8200/10000 val_loss:3.9015 svd_entropy: attn_qk:H=0.7617,top10E=0.25,eRank=163.2,q75/q25=114.63 attn_vo:H=0.8516,top10E=0.13,eRank=306.9,q75/q25=43.69 mlp_w1:H=0.7394,top10E=0.31,eRank=168.3,q75/q25=14.29 mlp_w2:H=0.8277,top10E=0.14,eRank=267.3,q75/q25=49.10 vo_prod:H=0.7707,top10E=0.20,eRank=174.3,q75/q25=1309.21 train_time:623482ms step_avg:76.03ms +[2025-09-02 06:46:25] [Rank 0] PRINT: step:8200/10000 val_loss:3.9015 svd_entropy: attn_qk:H=0.7617,top10E=0.25,eRank=163.2,q75/q25=114.63 attn_vo:H=0.8516,top10E=0.13,eRank=306.9,q75/q25=43.69 mlp_w1:H=0.7394,top10E=0.31,eRank=168.3,q75/q25=14.29 mlp_w2:H=0.8277,top10E=0.14,eRank=267.3,q75/q25=49.10 vo_prod:H=0.7707,top10E=0.20,eRank=174.3,q75/q25=1309.21 train_time:623482ms step_avg:76.03ms +[2025-09-02 06:46:25] [Rank 0] step:8201/10000 train_time:623494ms step_avg:76.03ms +[2025-09-02 06:46:25] [Rank 0] step:8201/10000 train_time:623494ms step_avg:76.03ms +[2025-09-02 06:46:27] [Rank 0] step:8221/10000 train_time:624995ms step_avg:76.02ms +[2025-09-02 06:46:27] [Rank 0] step:8221/10000 train_time:624995ms step_avg:76.02ms +[2025-09-02 06:46:28] [Rank 0] step:8241/10000 train_time:626638ms step_avg:76.04ms +[2025-09-02 06:46:28] [Rank 0] step:8241/10000 train_time:626638ms step_avg:76.04ms +[2025-09-02 06:46:30] [Rank 0] step:8261/10000 train_time:628267ms step_avg:76.05ms +[2025-09-02 06:46:30] [Rank 0] step:8261/10000 train_time:628267ms step_avg:76.05ms +[2025-09-02 06:46:31] [Rank 0] step:8281/10000 train_time:629917ms step_avg:76.07ms +[2025-09-02 06:46:31] [Rank 0] step:8281/10000 train_time:629917ms step_avg:76.07ms +[2025-09-02 06:46:33] [Rank 0] step:8301/10000 train_time:631552ms step_avg:76.08ms +[2025-09-02 06:46:33] [Rank 0] step:8301/10000 train_time:631552ms step_avg:76.08ms +[2025-09-02 06:46:35] [Rank 0] step:8321/10000 train_time:633178ms step_avg:76.09ms +[2025-09-02 06:46:35] [Rank 0] step:8321/10000 train_time:633178ms step_avg:76.09ms +[2025-09-02 06:46:36] [Rank 0] step:8341/10000 train_time:634815ms step_avg:76.11ms +[2025-09-02 06:46:36] [Rank 0] step:8341/10000 train_time:634815ms step_avg:76.11ms +[2025-09-02 06:46:38] [Rank 0] step:8361/10000 train_time:636450ms step_avg:76.12ms +[2025-09-02 06:46:38] [Rank 0] step:8361/10000 train_time:636450ms step_avg:76.12ms +[2025-09-02 06:46:40] [Rank 0] step:8381/10000 train_time:638084ms step_avg:76.13ms +[2025-09-02 06:46:40] [Rank 0] step:8381/10000 train_time:638084ms step_avg:76.13ms +[2025-09-02 06:46:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:46:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:46:53] [Rank 0] PRINT: step:8400/10000 val_loss:3.8905 svd_entropy: attn_qk:H=0.7624,top10E=0.25,eRank=163.9,q75/q25=114.69 attn_vo:H=0.8522,top10E=0.13,eRank=307.9,q75/q25=43.08 mlp_w1:H=0.7403,top10E=0.31,eRank=169.2,q75/q25=14.44 mlp_w2:H=0.8280,top10E=0.14,eRank=268.0,q75/q25=49.42 vo_prod:H=0.7716,top10E=0.20,eRank=175.3,q75/q25=1236.31 train_time:639880ms step_avg:76.18ms +[2025-09-02 06:46:53] [Rank 0] PRINT: step:8400/10000 val_loss:3.8905 svd_entropy: attn_qk:H=0.7624,top10E=0.25,eRank=163.9,q75/q25=114.69 attn_vo:H=0.8522,top10E=0.13,eRank=307.9,q75/q25=43.08 mlp_w1:H=0.7403,top10E=0.31,eRank=169.2,q75/q25=14.44 mlp_w2:H=0.8280,top10E=0.14,eRank=268.0,q75/q25=49.42 vo_prod:H=0.7716,top10E=0.20,eRank=175.3,q75/q25=1236.31 train_time:639880ms step_avg:76.18ms +[2025-09-02 06:46:53] [Rank 0] step:8401/10000 train_time:639893ms step_avg:76.17ms +[2025-09-02 06:46:53] [Rank 0] step:8401/10000 train_time:639893ms step_avg:76.17ms +[2025-09-02 06:46:55] [Rank 0] step:8421/10000 train_time:641371ms step_avg:76.16ms +[2025-09-02 06:46:55] [Rank 0] step:8421/10000 train_time:641371ms step_avg:76.16ms +[2025-09-02 06:46:56] [Rank 0] step:8441/10000 train_time:643004ms step_avg:76.18ms +[2025-09-02 06:46:56] [Rank 0] step:8441/10000 train_time:643004ms step_avg:76.18ms +[2025-09-02 06:46:58] [Rank 0] step:8461/10000 train_time:644633ms step_avg:76.19ms +[2025-09-02 06:46:58] [Rank 0] step:8461/10000 train_time:644633ms step_avg:76.19ms +[2025-09-02 06:47:00] [Rank 0] step:8481/10000 train_time:646270ms step_avg:76.20ms +[2025-09-02 06:47:00] [Rank 0] step:8481/10000 train_time:646270ms step_avg:76.20ms +[2025-09-02 06:47:01] [Rank 0] step:8501/10000 train_time:647925ms step_avg:76.22ms +[2025-09-02 06:47:01] [Rank 0] step:8501/10000 train_time:647925ms step_avg:76.22ms +[2025-09-02 06:47:03] [Rank 0] step:8521/10000 train_time:649566ms step_avg:76.23ms +[2025-09-02 06:47:03] [Rank 0] step:8521/10000 train_time:649566ms step_avg:76.23ms +[2025-09-02 06:47:04] [Rank 0] step:8541/10000 train_time:651214ms step_avg:76.25ms +[2025-09-02 06:47:04] [Rank 0] step:8541/10000 train_time:651214ms step_avg:76.25ms +[2025-09-02 06:47:06] [Rank 0] step:8561/10000 train_time:652853ms step_avg:76.26ms +[2025-09-02 06:47:06] [Rank 0] step:8561/10000 train_time:652853ms step_avg:76.26ms +[2025-09-02 06:47:08] [Rank 0] step:8581/10000 train_time:654491ms step_avg:76.27ms +[2025-09-02 06:47:08] [Rank 0] step:8581/10000 train_time:654491ms step_avg:76.27ms +[2025-09-02 06:47:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:47:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:47:21] [Rank 0] PRINT: step:8600/10000 val_loss:3.8817 svd_entropy: attn_qk:H=0.7631,top10E=0.24,eRank=164.6,q75/q25=114.11 attn_vo:H=0.8526,top10E=0.13,eRank=308.7,q75/q25=42.58 mlp_w1:H=0.7411,top10E=0.31,eRank=170.0,q75/q25=14.47 mlp_w2:H=0.8283,top10E=0.14,eRank=268.6,q75/q25=49.54 vo_prod:H=0.7724,top10E=0.20,eRank=176.2,q75/q25=1196.42 train_time:656276ms step_avg:76.31ms +[2025-09-02 06:47:21] [Rank 0] PRINT: step:8600/10000 val_loss:3.8817 svd_entropy: attn_qk:H=0.7631,top10E=0.24,eRank=164.6,q75/q25=114.11 attn_vo:H=0.8526,top10E=0.13,eRank=308.7,q75/q25=42.58 mlp_w1:H=0.7411,top10E=0.31,eRank=170.0,q75/q25=14.47 mlp_w2:H=0.8283,top10E=0.14,eRank=268.6,q75/q25=49.54 vo_prod:H=0.7724,top10E=0.20,eRank=176.2,q75/q25=1196.42 train_time:656276ms step_avg:76.31ms +[2025-09-02 06:47:21] [Rank 0] step:8601/10000 train_time:656288ms step_avg:76.30ms +[2025-09-02 06:47:21] [Rank 0] step:8601/10000 train_time:656288ms step_avg:76.30ms +[2025-09-02 06:47:23] [Rank 0] step:8621/10000 train_time:657763ms step_avg:76.30ms +[2025-09-02 06:47:23] [Rank 0] step:8621/10000 train_time:657763ms step_avg:76.30ms +[2025-09-02 06:47:24] [Rank 0] step:8641/10000 train_time:659397ms step_avg:76.31ms +[2025-09-02 06:47:24] [Rank 0] step:8641/10000 train_time:659397ms step_avg:76.31ms +[2025-09-02 06:47:26] [Rank 0] step:8661/10000 train_time:661027ms step_avg:76.32ms +[2025-09-02 06:47:26] [Rank 0] step:8661/10000 train_time:661027ms step_avg:76.32ms +[2025-09-02 06:47:28] [Rank 0] step:8681/10000 train_time:662658ms step_avg:76.33ms +[2025-09-02 06:47:28] [Rank 0] step:8681/10000 train_time:662658ms step_avg:76.33ms +[2025-09-02 06:47:29] [Rank 0] step:8701/10000 train_time:664288ms step_avg:76.35ms +[2025-09-02 06:47:29] [Rank 0] step:8701/10000 train_time:664288ms step_avg:76.35ms +[2025-09-02 06:47:31] [Rank 0] step:8721/10000 train_time:665924ms step_avg:76.36ms +[2025-09-02 06:47:31] [Rank 0] step:8721/10000 train_time:665924ms step_avg:76.36ms +[2025-09-02 06:47:33] [Rank 0] step:8741/10000 train_time:667552ms step_avg:76.37ms +[2025-09-02 06:47:33] [Rank 0] step:8741/10000 train_time:667552ms step_avg:76.37ms +[2025-09-02 06:47:34] [Rank 0] step:8761/10000 train_time:669182ms step_avg:76.38ms +[2025-09-02 06:47:34] [Rank 0] step:8761/10000 train_time:669182ms step_avg:76.38ms +[2025-09-02 06:47:36] [Rank 0] step:8781/10000 train_time:670826ms step_avg:76.40ms +[2025-09-02 06:47:36] [Rank 0] step:8781/10000 train_time:670826ms step_avg:76.40ms +[2025-09-02 06:47:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:47:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:47:49] [Rank 0] PRINT: step:8800/10000 val_loss:3.8727 svd_entropy: attn_qk:H=0.7637,top10E=0.24,eRank=165.2,q75/q25=113.49 attn_vo:H=0.8531,top10E=0.13,eRank=309.6,q75/q25=41.96 mlp_w1:H=0.7419,top10E=0.31,eRank=170.8,q75/q25=14.57 mlp_w2:H=0.8286,top10E=0.14,eRank=269.2,q75/q25=49.60 vo_prod:H=0.7731,top10E=0.20,eRank=177.1,q75/q25=1163.62 train_time:672627ms step_avg:76.43ms +[2025-09-02 06:47:49] [Rank 0] PRINT: step:8800/10000 val_loss:3.8727 svd_entropy: attn_qk:H=0.7637,top10E=0.24,eRank=165.2,q75/q25=113.49 attn_vo:H=0.8531,top10E=0.13,eRank=309.6,q75/q25=41.96 mlp_w1:H=0.7419,top10E=0.31,eRank=170.8,q75/q25=14.57 mlp_w2:H=0.8286,top10E=0.14,eRank=269.2,q75/q25=49.60 vo_prod:H=0.7731,top10E=0.20,eRank=177.1,q75/q25=1163.62 train_time:672627ms step_avg:76.43ms +[2025-09-02 06:47:49] [Rank 0] step:8801/10000 train_time:672639ms step_avg:76.43ms +[2025-09-02 06:47:49] [Rank 0] step:8801/10000 train_time:672639ms step_avg:76.43ms +[2025-09-02 06:47:51] [Rank 0] step:8821/10000 train_time:674115ms step_avg:76.42ms +[2025-09-02 06:47:51] [Rank 0] step:8821/10000 train_time:674115ms step_avg:76.42ms +[2025-09-02 06:47:53] [Rank 0] step:8841/10000 train_time:675772ms step_avg:76.44ms +[2025-09-02 06:47:53] [Rank 0] step:8841/10000 train_time:675772ms step_avg:76.44ms +[2025-09-02 06:47:54] [Rank 0] step:8861/10000 train_time:677405ms step_avg:76.45ms +[2025-09-02 06:47:54] [Rank 0] step:8861/10000 train_time:677405ms step_avg:76.45ms +[2025-09-02 06:47:56] [Rank 0] step:8881/10000 train_time:679041ms step_avg:76.46ms +[2025-09-02 06:47:56] [Rank 0] step:8881/10000 train_time:679041ms step_avg:76.46ms +[2025-09-02 06:47:57] [Rank 0] step:8901/10000 train_time:680681ms step_avg:76.47ms +[2025-09-02 06:47:57] [Rank 0] step:8901/10000 train_time:680681ms step_avg:76.47ms +[2025-09-02 06:47:59] [Rank 0] step:8921/10000 train_time:682326ms step_avg:76.49ms +[2025-09-02 06:47:59] [Rank 0] step:8921/10000 train_time:682326ms step_avg:76.49ms +[2025-09-02 06:48:01] [Rank 0] step:8941/10000 train_time:683973ms step_avg:76.50ms +[2025-09-02 06:48:01] [Rank 0] step:8941/10000 train_time:683973ms step_avg:76.50ms +[2025-09-02 06:48:02] [Rank 0] step:8961/10000 train_time:685607ms step_avg:76.51ms +[2025-09-02 06:48:02] [Rank 0] step:8961/10000 train_time:685607ms step_avg:76.51ms +[2025-09-02 06:48:04] [Rank 0] step:8981/10000 train_time:687238ms step_avg:76.52ms +[2025-09-02 06:48:04] [Rank 0] step:8981/10000 train_time:687238ms step_avg:76.52ms +[2025-09-02 06:48:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:48:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:48:17] [Rank 0] PRINT: step:9000/10000 val_loss:3.8633 svd_entropy: attn_qk:H=0.7643,top10E=0.24,eRank=165.8,q75/q25=113.28 attn_vo:H=0.8535,top10E=0.12,eRank=310.3,q75/q25=41.50 mlp_w1:H=0.7424,top10E=0.31,eRank=171.4,q75/q25=14.66 mlp_w2:H=0.8289,top10E=0.14,eRank=269.7,q75/q25=49.60 vo_prod:H=0.7738,top10E=0.20,eRank=177.8,q75/q25=1147.19 train_time:689038ms step_avg:76.56ms +[2025-09-02 06:48:17] [Rank 0] PRINT: step:9000/10000 val_loss:3.8633 svd_entropy: attn_qk:H=0.7643,top10E=0.24,eRank=165.8,q75/q25=113.28 attn_vo:H=0.8535,top10E=0.12,eRank=310.3,q75/q25=41.50 mlp_w1:H=0.7424,top10E=0.31,eRank=171.4,q75/q25=14.66 mlp_w2:H=0.8289,top10E=0.14,eRank=269.7,q75/q25=49.60 vo_prod:H=0.7738,top10E=0.20,eRank=177.8,q75/q25=1147.19 train_time:689038ms step_avg:76.56ms +[2025-09-02 06:48:17] [Rank 0] step:9001/10000 train_time:689050ms step_avg:76.55ms +[2025-09-02 06:48:17] [Rank 0] step:9001/10000 train_time:689050ms step_avg:76.55ms +[2025-09-02 06:48:19] [Rank 0] step:9021/10000 train_time:690531ms step_avg:76.55ms +[2025-09-02 06:48:19] [Rank 0] step:9021/10000 train_time:690531ms step_avg:76.55ms +[2025-09-02 06:48:21] [Rank 0] step:9041/10000 train_time:692166ms step_avg:76.56ms +[2025-09-02 06:48:21] [Rank 0] step:9041/10000 train_time:692166ms step_avg:76.56ms +[2025-09-02 06:48:22] [Rank 0] step:9061/10000 train_time:693814ms step_avg:76.57ms +[2025-09-02 06:48:22] [Rank 0] step:9061/10000 train_time:693814ms step_avg:76.57ms +[2025-09-02 06:48:24] [Rank 0] step:9081/10000 train_time:695459ms step_avg:76.58ms +[2025-09-02 06:48:24] [Rank 0] step:9081/10000 train_time:695459ms step_avg:76.58ms +[2025-09-02 06:48:26] [Rank 0] step:9101/10000 train_time:697119ms step_avg:76.60ms +[2025-09-02 06:48:26] [Rank 0] step:9101/10000 train_time:697119ms step_avg:76.60ms +[2025-09-02 06:48:27] [Rank 0] step:9121/10000 train_time:698761ms step_avg:76.61ms +[2025-09-02 06:48:27] [Rank 0] step:9121/10000 train_time:698761ms step_avg:76.61ms +[2025-09-02 06:48:29] [Rank 0] step:9141/10000 train_time:700383ms step_avg:76.62ms +[2025-09-02 06:48:29] [Rank 0] step:9141/10000 train_time:700383ms step_avg:76.62ms +[2025-09-02 06:48:30] [Rank 0] step:9161/10000 train_time:702009ms step_avg:76.63ms +[2025-09-02 06:48:30] [Rank 0] step:9161/10000 train_time:702009ms step_avg:76.63ms +[2025-09-02 06:48:32] [Rank 0] step:9181/10000 train_time:703676ms step_avg:76.64ms +[2025-09-02 06:48:32] [Rank 0] step:9181/10000 train_time:703676ms step_avg:76.64ms +[2025-09-02 06:48:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:48:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:48:45] [Rank 0] PRINT: step:9200/10000 val_loss:3.8563 svd_entropy: attn_qk:H=0.7647,top10E=0.24,eRank=166.2,q75/q25=113.08 attn_vo:H=0.8539,top10E=0.12,eRank=310.9,q75/q25=41.14 mlp_w1:H=0.7429,top10E=0.31,eRank=171.9,q75/q25=14.69 mlp_w2:H=0.8291,top10E=0.14,eRank=270.1,q75/q25=49.44 vo_prod:H=0.7743,top10E=0.20,eRank=178.5,q75/q25=1095.65 train_time:705477ms step_avg:76.68ms +[2025-09-02 06:48:45] [Rank 0] PRINT: step:9200/10000 val_loss:3.8563 svd_entropy: attn_qk:H=0.7647,top10E=0.24,eRank=166.2,q75/q25=113.08 attn_vo:H=0.8539,top10E=0.12,eRank=310.9,q75/q25=41.14 mlp_w1:H=0.7429,top10E=0.31,eRank=171.9,q75/q25=14.69 mlp_w2:H=0.8291,top10E=0.14,eRank=270.1,q75/q25=49.44 vo_prod:H=0.7743,top10E=0.20,eRank=178.5,q75/q25=1095.65 train_time:705477ms step_avg:76.68ms +[2025-09-02 06:48:46] [Rank 0] step:9201/10000 train_time:705489ms step_avg:76.68ms +[2025-09-02 06:48:46] [Rank 0] step:9201/10000 train_time:705489ms step_avg:76.68ms +[2025-09-02 06:48:47] [Rank 0] step:9221/10000 train_time:706988ms step_avg:76.67ms +[2025-09-02 06:48:47] [Rank 0] step:9221/10000 train_time:706988ms step_avg:76.67ms +[2025-09-02 06:48:49] [Rank 0] step:9241/10000 train_time:708633ms step_avg:76.68ms +[2025-09-02 06:48:49] [Rank 0] step:9241/10000 train_time:708633ms step_avg:76.68ms +[2025-09-02 06:48:51] [Rank 0] step:9261/10000 train_time:710279ms step_avg:76.70ms +[2025-09-02 06:48:51] [Rank 0] step:9261/10000 train_time:710279ms step_avg:76.70ms +[2025-09-02 06:48:52] [Rank 0] step:9281/10000 train_time:711905ms step_avg:76.71ms +[2025-09-02 06:48:52] [Rank 0] step:9281/10000 train_time:711905ms step_avg:76.71ms +[2025-09-02 06:48:54] [Rank 0] step:9301/10000 train_time:713542ms step_avg:76.72ms +[2025-09-02 06:48:54] [Rank 0] step:9301/10000 train_time:713542ms step_avg:76.72ms +[2025-09-02 06:48:55] [Rank 0] step:9321/10000 train_time:715182ms step_avg:76.73ms +[2025-09-02 06:48:55] [Rank 0] step:9321/10000 train_time:715182ms step_avg:76.73ms +[2025-09-02 06:48:57] [Rank 0] step:9341/10000 train_time:716824ms step_avg:76.74ms +[2025-09-02 06:48:57] [Rank 0] step:9341/10000 train_time:716824ms step_avg:76.74ms +[2025-09-02 06:48:59] [Rank 0] step:9361/10000 train_time:718469ms step_avg:76.75ms +[2025-09-02 06:48:59] [Rank 0] step:9361/10000 train_time:718469ms step_avg:76.75ms +[2025-09-02 06:49:00] [Rank 0] step:9381/10000 train_time:720122ms step_avg:76.76ms +[2025-09-02 06:49:00] [Rank 0] step:9381/10000 train_time:720122ms step_avg:76.76ms +[2025-09-02 06:49:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:49:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:49:14] [Rank 0] PRINT: step:9400/10000 val_loss:3.8487 svd_entropy: attn_qk:H=0.7651,top10E=0.24,eRank=166.6,q75/q25=113.11 attn_vo:H=0.8542,top10E=0.12,eRank=311.4,q75/q25=40.76 mlp_w1:H=0.7435,top10E=0.31,eRank=172.4,q75/q25=14.72 mlp_w2:H=0.8293,top10E=0.14,eRank=270.5,q75/q25=49.47 vo_prod:H=0.7749,top10E=0.20,eRank=179.1,q75/q25=1056.30 train_time:721928ms step_avg:76.80ms +[2025-09-02 06:49:14] [Rank 0] PRINT: step:9400/10000 val_loss:3.8487 svd_entropy: attn_qk:H=0.7651,top10E=0.24,eRank=166.6,q75/q25=113.11 attn_vo:H=0.8542,top10E=0.12,eRank=311.4,q75/q25=40.76 mlp_w1:H=0.7435,top10E=0.31,eRank=172.4,q75/q25=14.72 mlp_w2:H=0.8293,top10E=0.14,eRank=270.5,q75/q25=49.47 vo_prod:H=0.7749,top10E=0.20,eRank=179.1,q75/q25=1056.30 train_time:721928ms step_avg:76.80ms +[2025-09-02 06:49:14] [Rank 0] step:9401/10000 train_time:721940ms step_avg:76.79ms +[2025-09-02 06:49:14] [Rank 0] step:9401/10000 train_time:721940ms step_avg:76.79ms +[2025-09-02 06:49:15] [Rank 0] step:9421/10000 train_time:723411ms step_avg:76.79ms +[2025-09-02 06:49:15] [Rank 0] step:9421/10000 train_time:723411ms step_avg:76.79ms +[2025-09-02 06:49:17] [Rank 0] step:9441/10000 train_time:725049ms step_avg:76.80ms +[2025-09-02 06:49:17] [Rank 0] step:9441/10000 train_time:725049ms step_avg:76.80ms +[2025-09-02 06:49:19] [Rank 0] step:9461/10000 train_time:726692ms step_avg:76.81ms +[2025-09-02 06:49:19] [Rank 0] step:9461/10000 train_time:726692ms step_avg:76.81ms +[2025-09-02 06:49:20] [Rank 0] step:9481/10000 train_time:728334ms step_avg:76.82ms +[2025-09-02 06:49:20] [Rank 0] step:9481/10000 train_time:728334ms step_avg:76.82ms +[2025-09-02 06:49:22] [Rank 0] step:9501/10000 train_time:729986ms step_avg:76.83ms +[2025-09-02 06:49:22] [Rank 0] step:9501/10000 train_time:729986ms step_avg:76.83ms +[2025-09-02 06:49:24] [Rank 0] step:9521/10000 train_time:731621ms step_avg:76.84ms +[2025-09-02 06:49:24] [Rank 0] step:9521/10000 train_time:731621ms step_avg:76.84ms +[2025-09-02 06:49:25] [Rank 0] step:9541/10000 train_time:733260ms step_avg:76.85ms +[2025-09-02 06:49:25] [Rank 0] step:9541/10000 train_time:733260ms step_avg:76.85ms +[2025-09-02 06:49:27] [Rank 0] step:9561/10000 train_time:734896ms step_avg:76.86ms +[2025-09-02 06:49:27] [Rank 0] step:9561/10000 train_time:734896ms step_avg:76.86ms +[2025-09-02 06:49:29] [Rank 0] step:9581/10000 train_time:736539ms step_avg:76.87ms +[2025-09-02 06:49:29] [Rank 0] step:9581/10000 train_time:736539ms step_avg:76.87ms +[2025-09-02 06:49:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:49:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:49:42] [Rank 0] PRINT: step:9600/10000 val_loss:3.8426 svd_entropy: attn_qk:H=0.7654,top10E=0.24,eRank=167.0,q75/q25=113.26 attn_vo:H=0.8544,top10E=0.12,eRank=311.9,q75/q25=40.60 mlp_w1:H=0.7438,top10E=0.30,eRank=172.8,q75/q25=14.76 mlp_w2:H=0.8294,top10E=0.14,eRank=270.9,q75/q25=49.49 vo_prod:H=0.7753,top10E=0.20,eRank=179.6,q75/q25=1045.03 train_time:738352ms step_avg:76.91ms +[2025-09-02 06:49:42] [Rank 0] PRINT: step:9600/10000 val_loss:3.8426 svd_entropy: attn_qk:H=0.7654,top10E=0.24,eRank=167.0,q75/q25=113.26 attn_vo:H=0.8544,top10E=0.12,eRank=311.9,q75/q25=40.60 mlp_w1:H=0.7438,top10E=0.30,eRank=172.8,q75/q25=14.76 mlp_w2:H=0.8294,top10E=0.14,eRank=270.9,q75/q25=49.49 vo_prod:H=0.7753,top10E=0.20,eRank=179.6,q75/q25=1045.03 train_time:738352ms step_avg:76.91ms +[2025-09-02 06:49:42] [Rank 0] step:9601/10000 train_time:738365ms step_avg:76.90ms +[2025-09-02 06:49:42] [Rank 0] step:9601/10000 train_time:738365ms step_avg:76.90ms +[2025-09-02 06:49:44] [Rank 0] step:9621/10000 train_time:739854ms step_avg:76.90ms +[2025-09-02 06:49:44] [Rank 0] step:9621/10000 train_time:739854ms step_avg:76.90ms +[2025-09-02 06:49:45] [Rank 0] step:9641/10000 train_time:741497ms step_avg:76.91ms +[2025-09-02 06:49:45] [Rank 0] step:9641/10000 train_time:741497ms step_avg:76.91ms +[2025-09-02 06:49:47] [Rank 0] step:9661/10000 train_time:743164ms step_avg:76.92ms +[2025-09-02 06:49:47] [Rank 0] step:9661/10000 train_time:743164ms step_avg:76.92ms +[2025-09-02 06:49:49] [Rank 0] step:9681/10000 train_time:744823ms step_avg:76.94ms +[2025-09-02 06:49:49] [Rank 0] step:9681/10000 train_time:744823ms step_avg:76.94ms +[2025-09-02 06:49:50] [Rank 0] step:9701/10000 train_time:746499ms step_avg:76.95ms +[2025-09-02 06:49:50] [Rank 0] step:9701/10000 train_time:746499ms step_avg:76.95ms +[2025-09-02 06:49:52] [Rank 0] step:9721/10000 train_time:748155ms step_avg:76.96ms +[2025-09-02 06:49:52] [Rank 0] step:9721/10000 train_time:748155ms step_avg:76.96ms +[2025-09-02 06:49:54] [Rank 0] step:9741/10000 train_time:749838ms step_avg:76.98ms +[2025-09-02 06:49:54] [Rank 0] step:9741/10000 train_time:749838ms step_avg:76.98ms +[2025-09-02 06:49:55] [Rank 0] step:9761/10000 train_time:751502ms step_avg:76.99ms +[2025-09-02 06:49:55] [Rank 0] step:9761/10000 train_time:751502ms step_avg:76.99ms +[2025-09-02 06:49:57] [Rank 0] step:9781/10000 train_time:753183ms step_avg:77.00ms +[2025-09-02 06:49:57] [Rank 0] step:9781/10000 train_time:753183ms step_avg:77.00ms +[2025-09-02 06:49:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:49:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:50:10] [Rank 0] PRINT: step:9800/10000 val_loss:3.8366 svd_entropy: attn_qk:H=0.7656,top10E=0.24,eRank=167.2,q75/q25=113.19 attn_vo:H=0.8546,top10E=0.12,eRank=312.2,q75/q25=40.39 mlp_w1:H=0.7442,top10E=0.30,eRank=173.2,q75/q25=14.81 mlp_w2:H=0.8295,top10E=0.14,eRank=271.1,q75/q25=49.56 vo_prod:H=0.7756,top10E=0.20,eRank=180.0,q75/q25=1027.01 train_time:755030ms step_avg:77.04ms +[2025-09-02 06:50:10] [Rank 0] PRINT: step:9800/10000 val_loss:3.8366 svd_entropy: attn_qk:H=0.7656,top10E=0.24,eRank=167.2,q75/q25=113.19 attn_vo:H=0.8546,top10E=0.12,eRank=312.2,q75/q25=40.39 mlp_w1:H=0.7442,top10E=0.30,eRank=173.2,q75/q25=14.81 mlp_w2:H=0.8295,top10E=0.14,eRank=271.1,q75/q25=49.56 vo_prod:H=0.7756,top10E=0.20,eRank=180.0,q75/q25=1027.01 train_time:755030ms step_avg:77.04ms +[2025-09-02 06:50:10] [Rank 0] step:9801/10000 train_time:755042ms step_avg:77.04ms +[2025-09-02 06:50:10] [Rank 0] step:9801/10000 train_time:755042ms step_avg:77.04ms +[2025-09-02 06:50:12] [Rank 0] step:9821/10000 train_time:756546ms step_avg:77.03ms +[2025-09-02 06:50:12] [Rank 0] step:9821/10000 train_time:756546ms step_avg:77.03ms +[2025-09-02 06:50:14] [Rank 0] step:9841/10000 train_time:758225ms step_avg:77.05ms +[2025-09-02 06:50:14] [Rank 0] step:9841/10000 train_time:758225ms step_avg:77.05ms +[2025-09-02 06:50:15] [Rank 0] step:9861/10000 train_time:759882ms step_avg:77.06ms +[2025-09-02 06:50:15] [Rank 0] step:9861/10000 train_time:759882ms step_avg:77.06ms +[2025-09-02 06:50:17] [Rank 0] step:9881/10000 train_time:761535ms step_avg:77.07ms +[2025-09-02 06:50:17] [Rank 0] step:9881/10000 train_time:761535ms step_avg:77.07ms +[2025-09-02 06:50:19] [Rank 0] step:9901/10000 train_time:763206ms step_avg:77.08ms +[2025-09-02 06:50:19] [Rank 0] step:9901/10000 train_time:763206ms step_avg:77.08ms +[2025-09-02 06:50:20] [Rank 0] step:9921/10000 train_time:764868ms step_avg:77.10ms +[2025-09-02 06:50:20] [Rank 0] step:9921/10000 train_time:764868ms step_avg:77.10ms +[2025-09-02 06:50:22] [Rank 0] step:9941/10000 train_time:766537ms step_avg:77.11ms +[2025-09-02 06:50:22] [Rank 0] step:9941/10000 train_time:766537ms step_avg:77.11ms +[2025-09-02 06:50:24] [Rank 0] step:9961/10000 train_time:768202ms step_avg:77.12ms +[2025-09-02 06:50:24] [Rank 0] step:9961/10000 train_time:768202ms step_avg:77.12ms +[2025-09-02 06:50:25] [Rank 0] step:9981/10000 train_time:769866ms step_avg:77.13ms +[2025-09-02 06:50:25] [Rank 0] step:9981/10000 train_time:769866ms step_avg:77.13ms +[2025-09-02 06:50:27] [Rank 0] step:10000/10000 train_time:771457ms step_avg:77.15ms +[2025-09-02 06:50:27] [Rank 0] step:10000/10000 train_time:771457ms step_avg:77.15ms +[2025-09-02 06:50:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:50:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 06:50:39] [Rank 0] PRINT: step:10000/10000 val_loss:3.8311 svd_entropy: attn_qk:H=0.7658,top10E=0.24,eRank=167.4,q75/q25=113.38 attn_vo:H=0.8548,top10E=0.12,eRank=312.4,q75/q25=40.23 mlp_w1:H=0.7444,top10E=0.30,eRank=173.4,q75/q25=14.83 mlp_w2:H=0.8296,top10E=0.14,eRank=271.3,q75/q25=49.62 vo_prod:H=0.7758,top10E=0.20,eRank=180.3,q75/q25=1020.10 train_time:771714ms step_avg:77.17ms +[2025-09-02 06:50:39] [Rank 0] PRINT: step:10000/10000 val_loss:3.8311 svd_entropy: attn_qk:H=0.7658,top10E=0.24,eRank=167.4,q75/q25=113.38 attn_vo:H=0.8548,top10E=0.12,eRank=312.4,q75/q25=40.23 mlp_w1:H=0.7444,top10E=0.30,eRank=173.4,q75/q25=14.83 mlp_w2:H=0.8296,top10E=0.14,eRank=271.3,q75/q25=49.62 vo_prod:H=0.7758,top10E=0.20,eRank=180.3,q75/q25=1020.10 train_time:771714ms step_avg:77.17ms +[2025-09-02 06:50:39] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 06:50:39 2025 --- +[2025-09-02 06:50:39] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 06:50:39 2025 --- +[2025-09-02 06:50:39] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14416 MiB +[2025-09-02 06:50:39] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14416 MiB diff --git a/logs_svd_qkvo/mode_15_param_qkvo_seed_43/config.json b/logs_svd_qkvo/mode_15_param_qkvo_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..b3b4332b21f091853d93b4acf29759756cd4f7a5 --- /dev/null +++ b/logs_svd_qkvo/mode_15_param_qkvo_seed_43/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 15, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "0ff6edc8-7c7f-43f6-a78e-15d238f61498", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_15_param_qkvo_seed_43/training_log_0ff6edc8-7c7f-43f6-a78e-15d238f61498.txt b/logs_svd_qkvo/mode_15_param_qkvo_seed_43/training_log_0ff6edc8-7c7f-43f6-a78e-15d238f61498.txt new file mode 100644 index 0000000000000000000000000000000000000000..e1caebb19e1dd9b8a655310a64fd667414b88c61 --- /dev/null +++ b/logs_svd_qkvo/mode_15_param_qkvo_seed_43/training_log_0ff6edc8-7c7f-43f6-a78e-15d238f61498.txt @@ -0,0 +1,2984 @@ +[2025-09-02 07:40:31] [Rank 0] PRINT: --- Script Start: Tue Sep 2 07:40:31 2025 --- +[2025-09-02 07:40:31] [Rank 0] PRINT: --- Script Start: Tue Sep 2 07:40:31 2025 --- +[2025-09-02 07:40:31] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=15, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 07:40:31] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=15, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 07:40:31] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 07:40:31] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 07:40:31] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-02 07:40:31] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-02 07:40:31] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_15_param_qkvo_seed_43 +[2025-09-02 07:40:31] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_15_param_qkvo_seed_43 +[2025-09-02 07:40:31] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 07:40:31] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 07:40:31] [Rank 0] PRINT: Constructing model... +[2025-09-02 07:40:31] [Rank 0] PRINT: Constructing model... +[2025-09-02 07:40:33] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 07:40:33] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 07:40:33] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 07:40:33] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 07:40:33] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 07:40:33] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 07:40:33] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 15 +[2025-09-02 07:40:33] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 15 +[2025-09-02 07:40:33] [Rank 0] PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 07:40:33] [Rank 0] PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 07:40:33] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 07:40:33] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 07:40:33] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 07:40:33] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 07:40:33] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 07:40:33] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 07:40:33] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 07:40:33] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 07:40:33] [Rank 0] PRINT: Starting warmup... +[2025-09-02 07:40:33] [Rank 0] PRINT: Starting warmup... +[2025-09-02 07:41:14] [Rank 0] PRINT: Warmup complete. +[2025-09-02 07:41:14] [Rank 0] PRINT: Warmup complete. +[2025-09-02 07:41:15] [Rank 0] PRINT: Starting training... +[2025-09-02 07:41:15] [Rank 0] PRINT: Starting training... +[2025-09-02 07:41:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:41:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:41:30] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.25 attn_vo:H=0.4624,top10E=0.02,eRank=233.0,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.7,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 07:41:30] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.25 attn_vo:H=0.4624,top10E=0.02,eRank=233.0,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.7,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 07:41:32] [Rank 0] step:21/10000 train_time:1311ms step_avg:62.45ms +[2025-09-02 07:41:32] [Rank 0] step:21/10000 train_time:1311ms step_avg:62.45ms +[2025-09-02 07:41:33] [Rank 0] step:41/10000 train_time:2709ms step_avg:66.08ms +[2025-09-02 07:41:33] [Rank 0] step:41/10000 train_time:2709ms step_avg:66.08ms +[2025-09-02 07:41:35] [Rank 0] step:61/10000 train_time:4111ms step_avg:67.39ms +[2025-09-02 07:41:35] [Rank 0] step:61/10000 train_time:4111ms step_avg:67.39ms +[2025-09-02 07:41:36] [Rank 0] step:81/10000 train_time:5515ms step_avg:68.09ms +[2025-09-02 07:41:36] [Rank 0] step:81/10000 train_time:5515ms step_avg:68.09ms +[2025-09-02 07:41:37] [Rank 0] step:101/10000 train_time:6921ms step_avg:68.53ms +[2025-09-02 07:41:37] [Rank 0] step:101/10000 train_time:6921ms step_avg:68.53ms +[2025-09-02 07:41:39] [Rank 0] step:121/10000 train_time:8328ms step_avg:68.83ms +[2025-09-02 07:41:39] [Rank 0] step:121/10000 train_time:8328ms step_avg:68.83ms +[2025-09-02 07:41:40] [Rank 0] step:141/10000 train_time:9738ms step_avg:69.06ms +[2025-09-02 07:41:40] [Rank 0] step:141/10000 train_time:9738ms step_avg:69.06ms +[2025-09-02 07:41:42] [Rank 0] step:161/10000 train_time:11148ms step_avg:69.24ms +[2025-09-02 07:41:42] [Rank 0] step:161/10000 train_time:11148ms step_avg:69.24ms +[2025-09-02 07:41:43] [Rank 0] step:181/10000 train_time:12557ms step_avg:69.38ms +[2025-09-02 07:41:43] [Rank 0] step:181/10000 train_time:12557ms step_avg:69.38ms +[2025-09-02 07:41:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:41:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:41:56] [Rank 0] PRINT: step:200/10000 val_loss:6.4989 svd_entropy: attn_qk:H=0.4360,top10E=0.81,eRank=36.2,q75/q25=12.14 attn_vo:H=0.5556,top10E=0.63,eRank=115.8,q75/q25=97.21 mlp_w1:H=0.4144,top10E=0.76,eRank=22.4,q75/q25=2.72 mlp_w2:H=0.1659,top10E=0.95,eRank=5.0,q75/q25=334.73 vo_prod:H=0.2563,top10E=0.96,eRank=6.8,q75/q25=629.20 train_time:14108ms step_avg:70.54ms +[2025-09-02 07:41:56] [Rank 0] PRINT: step:200/10000 val_loss:6.4989 svd_entropy: attn_qk:H=0.4360,top10E=0.81,eRank=36.2,q75/q25=12.14 attn_vo:H=0.5556,top10E=0.63,eRank=115.8,q75/q25=97.21 mlp_w1:H=0.4144,top10E=0.76,eRank=22.4,q75/q25=2.72 mlp_w2:H=0.1659,top10E=0.95,eRank=5.0,q75/q25=334.73 vo_prod:H=0.2563,top10E=0.96,eRank=6.8,q75/q25=629.20 train_time:14108ms step_avg:70.54ms +[2025-09-02 07:41:56] [Rank 0] step:201/10000 train_time:14121ms step_avg:70.25ms +[2025-09-02 07:41:56] [Rank 0] step:201/10000 train_time:14121ms step_avg:70.25ms +[2025-09-02 07:41:58] [Rank 0] step:221/10000 train_time:15399ms step_avg:69.68ms +[2025-09-02 07:41:58] [Rank 0] step:221/10000 train_time:15399ms step_avg:69.68ms +[2025-09-02 07:41:59] [Rank 0] step:241/10000 train_time:16808ms step_avg:69.74ms +[2025-09-02 07:41:59] [Rank 0] step:241/10000 train_time:16808ms step_avg:69.74ms +[2025-09-02 07:42:01] [Rank 0] step:261/10000 train_time:18217ms step_avg:69.80ms +[2025-09-02 07:42:01] [Rank 0] step:261/10000 train_time:18217ms step_avg:69.80ms +[2025-09-02 07:42:02] [Rank 0] step:281/10000 train_time:19626ms step_avg:69.84ms +[2025-09-02 07:42:02] [Rank 0] step:281/10000 train_time:19626ms step_avg:69.84ms +[2025-09-02 07:42:04] [Rank 0] step:301/10000 train_time:21036ms step_avg:69.89ms +[2025-09-02 07:42:04] [Rank 0] step:301/10000 train_time:21036ms step_avg:69.89ms +[2025-09-02 07:42:05] [Rank 0] step:321/10000 train_time:22447ms step_avg:69.93ms +[2025-09-02 07:42:05] [Rank 0] step:321/10000 train_time:22447ms step_avg:69.93ms +[2025-09-02 07:42:06] [Rank 0] step:341/10000 train_time:23859ms step_avg:69.97ms +[2025-09-02 07:42:06] [Rank 0] step:341/10000 train_time:23859ms step_avg:69.97ms +[2025-09-02 07:42:08] [Rank 0] step:361/10000 train_time:25269ms step_avg:70.00ms +[2025-09-02 07:42:08] [Rank 0] step:361/10000 train_time:25269ms step_avg:70.00ms +[2025-09-02 07:42:09] [Rank 0] step:381/10000 train_time:26680ms step_avg:70.03ms +[2025-09-02 07:42:09] [Rank 0] step:381/10000 train_time:26680ms step_avg:70.03ms +[2025-09-02 07:42:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:42:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:42:22] [Rank 0] PRINT: step:400/10000 val_loss:5.9675 svd_entropy: attn_qk:H=0.5006,top10E=0.70,eRank=44.8,q75/q25=13.67 attn_vo:H=0.5895,top10E=0.52,eRank=92.7,q75/q25=36.64 mlp_w1:H=0.4465,top10E=0.70,eRank=36.9,q75/q25=3.14 mlp_w2:H=0.5287,top10E=0.61,eRank=35.7,q75/q25=15.05 vo_prod:H=0.4178,top10E=0.81,eRank=17.3,q75/q25=265.25 train_time:28233ms step_avg:70.58ms +[2025-09-02 07:42:22] [Rank 0] PRINT: step:400/10000 val_loss:5.9675 svd_entropy: attn_qk:H=0.5006,top10E=0.70,eRank=44.8,q75/q25=13.67 attn_vo:H=0.5895,top10E=0.52,eRank=92.7,q75/q25=36.64 mlp_w1:H=0.4465,top10E=0.70,eRank=36.9,q75/q25=3.14 mlp_w2:H=0.5287,top10E=0.61,eRank=35.7,q75/q25=15.05 vo_prod:H=0.4178,top10E=0.81,eRank=17.3,q75/q25=265.25 train_time:28233ms step_avg:70.58ms +[2025-09-02 07:42:23] [Rank 0] step:401/10000 train_time:28245ms step_avg:70.44ms +[2025-09-02 07:42:23] [Rank 0] step:401/10000 train_time:28245ms step_avg:70.44ms +[2025-09-02 07:42:24] [Rank 0] step:421/10000 train_time:29533ms step_avg:70.15ms +[2025-09-02 07:42:24] [Rank 0] step:421/10000 train_time:29533ms step_avg:70.15ms +[2025-09-02 07:42:25] [Rank 0] step:441/10000 train_time:30946ms step_avg:70.17ms +[2025-09-02 07:42:25] [Rank 0] step:441/10000 train_time:30946ms step_avg:70.17ms +[2025-09-02 07:42:27] [Rank 0] step:461/10000 train_time:32358ms step_avg:70.19ms +[2025-09-02 07:42:27] [Rank 0] step:461/10000 train_time:32358ms step_avg:70.19ms +[2025-09-02 07:42:28] [Rank 0] step:481/10000 train_time:33769ms step_avg:70.21ms +[2025-09-02 07:42:28] [Rank 0] step:481/10000 train_time:33769ms step_avg:70.21ms +[2025-09-02 07:42:30] [Rank 0] step:501/10000 train_time:35182ms step_avg:70.22ms +[2025-09-02 07:42:30] [Rank 0] step:501/10000 train_time:35182ms step_avg:70.22ms +[2025-09-02 07:42:31] [Rank 0] step:521/10000 train_time:36594ms step_avg:70.24ms +[2025-09-02 07:42:31] [Rank 0] step:521/10000 train_time:36594ms step_avg:70.24ms +[2025-09-02 07:42:32] [Rank 0] step:541/10000 train_time:38007ms step_avg:70.25ms +[2025-09-02 07:42:32] [Rank 0] step:541/10000 train_time:38007ms step_avg:70.25ms +[2025-09-02 07:42:34] [Rank 0] step:561/10000 train_time:39420ms step_avg:70.27ms +[2025-09-02 07:42:34] [Rank 0] step:561/10000 train_time:39420ms step_avg:70.27ms +[2025-09-02 07:42:35] [Rank 0] step:581/10000 train_time:40833ms step_avg:70.28ms +[2025-09-02 07:42:35] [Rank 0] step:581/10000 train_time:40833ms step_avg:70.28ms +[2025-09-02 07:42:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:42:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:42:48] [Rank 0] PRINT: step:600/10000 val_loss:5.6565 svd_entropy: attn_qk:H=0.5409,top10E=0.62,eRank=52.5,q75/q25=15.45 attn_vo:H=0.6250,top10E=0.44,eRank=101.1,q75/q25=28.45 mlp_w1:H=0.4828,top10E=0.65,eRank=47.2,q75/q25=3.46 mlp_w2:H=0.6224,top10E=0.47,eRank=64.7,q75/q25=11.58 vo_prod:H=0.4884,top10E=0.67,eRank=27.3,q75/q25=254.66 train_time:42387ms step_avg:70.65ms +[2025-09-02 07:42:48] [Rank 0] PRINT: step:600/10000 val_loss:5.6565 svd_entropy: attn_qk:H=0.5409,top10E=0.62,eRank=52.5,q75/q25=15.45 attn_vo:H=0.6250,top10E=0.44,eRank=101.1,q75/q25=28.45 mlp_w1:H=0.4828,top10E=0.65,eRank=47.2,q75/q25=3.46 mlp_w2:H=0.6224,top10E=0.47,eRank=64.7,q75/q25=11.58 vo_prod:H=0.4884,top10E=0.67,eRank=27.3,q75/q25=254.66 train_time:42387ms step_avg:70.65ms +[2025-09-02 07:42:48] [Rank 0] step:601/10000 train_time:42399ms step_avg:70.55ms +[2025-09-02 07:42:48] [Rank 0] step:601/10000 train_time:42399ms step_avg:70.55ms +[2025-09-02 07:42:50] [Rank 0] step:621/10000 train_time:43690ms step_avg:70.35ms +[2025-09-02 07:42:50] [Rank 0] step:621/10000 train_time:43690ms step_avg:70.35ms +[2025-09-02 07:42:51] [Rank 0] step:641/10000 train_time:45100ms step_avg:70.36ms +[2025-09-02 07:42:51] [Rank 0] step:641/10000 train_time:45100ms step_avg:70.36ms +[2025-09-02 07:42:53] [Rank 0] step:661/10000 train_time:46510ms step_avg:70.36ms +[2025-09-02 07:42:53] [Rank 0] step:661/10000 train_time:46510ms step_avg:70.36ms +[2025-09-02 07:42:54] [Rank 0] step:681/10000 train_time:47920ms step_avg:70.37ms +[2025-09-02 07:42:54] [Rank 0] step:681/10000 train_time:47920ms step_avg:70.37ms +[2025-09-02 07:42:55] [Rank 0] step:701/10000 train_time:49330ms step_avg:70.37ms +[2025-09-02 07:42:55] [Rank 0] step:701/10000 train_time:49330ms step_avg:70.37ms +[2025-09-02 07:42:57] [Rank 0] step:721/10000 train_time:50739ms step_avg:70.37ms +[2025-09-02 07:42:57] [Rank 0] step:721/10000 train_time:50739ms step_avg:70.37ms +[2025-09-02 07:42:58] [Rank 0] step:741/10000 train_time:52150ms step_avg:70.38ms +[2025-09-02 07:42:58] [Rank 0] step:741/10000 train_time:52150ms step_avg:70.38ms +[2025-09-02 07:43:00] [Rank 0] step:761/10000 train_time:53572ms step_avg:70.40ms +[2025-09-02 07:43:00] [Rank 0] step:761/10000 train_time:53572ms step_avg:70.40ms +[2025-09-02 07:43:01] [Rank 0] step:781/10000 train_time:54997ms step_avg:70.42ms +[2025-09-02 07:43:01] [Rank 0] step:781/10000 train_time:54997ms step_avg:70.42ms +[2025-09-02 07:43:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:43:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:43:14] [Rank 0] PRINT: step:800/10000 val_loss:5.4316 svd_entropy: attn_qk:H=0.5696,top10E=0.56,eRank=59.2,q75/q25=17.66 attn_vo:H=0.6493,top10E=0.39,eRank=109.6,q75/q25=29.92 mlp_w1:H=0.5124,top10E=0.62,eRank=54.5,q75/q25=3.77 mlp_w2:H=0.6666,top10E=0.40,eRank=86.3,q75/q25=12.34 vo_prod:H=0.5234,top10E=0.60,eRank=34.4,q75/q25=390.13 train_time:56566ms step_avg:70.71ms +[2025-09-02 07:43:14] [Rank 0] PRINT: step:800/10000 val_loss:5.4316 svd_entropy: attn_qk:H=0.5696,top10E=0.56,eRank=59.2,q75/q25=17.66 attn_vo:H=0.6493,top10E=0.39,eRank=109.6,q75/q25=29.92 mlp_w1:H=0.5124,top10E=0.62,eRank=54.5,q75/q25=3.77 mlp_w2:H=0.6666,top10E=0.40,eRank=86.3,q75/q25=12.34 vo_prod:H=0.5234,top10E=0.60,eRank=34.4,q75/q25=390.13 train_time:56566ms step_avg:70.71ms +[2025-09-02 07:43:14] [Rank 0] step:801/10000 train_time:56578ms step_avg:70.63ms +[2025-09-02 07:43:14] [Rank 0] step:801/10000 train_time:56578ms step_avg:70.63ms +[2025-09-02 07:43:15] [Rank 0] step:821/10000 train_time:57863ms step_avg:70.48ms +[2025-09-02 07:43:15] [Rank 0] step:821/10000 train_time:57863ms step_avg:70.48ms +[2025-09-02 07:43:17] [Rank 0] step:841/10000 train_time:59285ms step_avg:70.49ms +[2025-09-02 07:43:17] [Rank 0] step:841/10000 train_time:59285ms step_avg:70.49ms +[2025-09-02 07:43:18] [Rank 0] step:861/10000 train_time:60706ms step_avg:70.51ms +[2025-09-02 07:43:18] [Rank 0] step:861/10000 train_time:60706ms step_avg:70.51ms +[2025-09-02 07:43:20] [Rank 0] step:881/10000 train_time:62130ms step_avg:70.52ms +[2025-09-02 07:43:20] [Rank 0] step:881/10000 train_time:62130ms step_avg:70.52ms +[2025-09-02 07:43:21] [Rank 0] step:901/10000 train_time:63553ms step_avg:70.54ms +[2025-09-02 07:43:21] [Rank 0] step:901/10000 train_time:63553ms step_avg:70.54ms +[2025-09-02 07:43:23] [Rank 0] step:921/10000 train_time:64976ms step_avg:70.55ms +[2025-09-02 07:43:23] [Rank 0] step:921/10000 train_time:64976ms step_avg:70.55ms +[2025-09-02 07:43:24] [Rank 0] step:941/10000 train_time:66399ms step_avg:70.56ms +[2025-09-02 07:43:24] [Rank 0] step:941/10000 train_time:66399ms step_avg:70.56ms +[2025-09-02 07:43:25] [Rank 0] step:961/10000 train_time:67822ms step_avg:70.57ms +[2025-09-02 07:43:25] [Rank 0] step:961/10000 train_time:67822ms step_avg:70.57ms +[2025-09-02 07:43:27] [Rank 0] step:981/10000 train_time:69246ms step_avg:70.59ms +[2025-09-02 07:43:27] [Rank 0] step:981/10000 train_time:69246ms step_avg:70.59ms +[2025-09-02 07:43:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:43:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:43:40] [Rank 0] PRINT: step:1000/10000 val_loss:5.2617 svd_entropy: attn_qk:H=0.5918,top10E=0.51,eRank=65.3,q75/q25=20.41 attn_vo:H=0.6731,top10E=0.35,eRank=121.0,q75/q25=37.66 mlp_w1:H=0.5373,top10E=0.59,eRank=61.1,q75/q25=4.09 mlp_w2:H=0.6979,top10E=0.35,eRank=106.4,q75/q25=14.17 vo_prod:H=0.5540,top10E=0.54,eRank=42.0,q75/q25=848.97 train_time:70813ms step_avg:70.81ms +[2025-09-02 07:43:40] [Rank 0] PRINT: step:1000/10000 val_loss:5.2617 svd_entropy: attn_qk:H=0.5918,top10E=0.51,eRank=65.3,q75/q25=20.41 attn_vo:H=0.6731,top10E=0.35,eRank=121.0,q75/q25=37.66 mlp_w1:H=0.5373,top10E=0.59,eRank=61.1,q75/q25=4.09 mlp_w2:H=0.6979,top10E=0.35,eRank=106.4,q75/q25=14.17 vo_prod:H=0.5540,top10E=0.54,eRank=42.0,q75/q25=848.97 train_time:70813ms step_avg:70.81ms +[2025-09-02 07:43:40] [Rank 0] step:1001/10000 train_time:70825ms step_avg:70.75ms +[2025-09-02 07:43:40] [Rank 0] step:1001/10000 train_time:70825ms step_avg:70.75ms +[2025-09-02 07:43:41] [Rank 0] step:1021/10000 train_time:72120ms step_avg:70.64ms +[2025-09-02 07:43:41] [Rank 0] step:1021/10000 train_time:72120ms step_avg:70.64ms +[2025-09-02 07:43:43] [Rank 0] step:1041/10000 train_time:73544ms step_avg:70.65ms +[2025-09-02 07:43:43] [Rank 0] step:1041/10000 train_time:73544ms step_avg:70.65ms +[2025-09-02 07:43:44] [Rank 0] step:1061/10000 train_time:74970ms step_avg:70.66ms +[2025-09-02 07:43:44] [Rank 0] step:1061/10000 train_time:74970ms step_avg:70.66ms +[2025-09-02 07:43:46] [Rank 0] step:1081/10000 train_time:76394ms step_avg:70.67ms +[2025-09-02 07:43:46] [Rank 0] step:1081/10000 train_time:76394ms step_avg:70.67ms +[2025-09-02 07:43:47] [Rank 0] step:1101/10000 train_time:77818ms step_avg:70.68ms +[2025-09-02 07:43:47] [Rank 0] step:1101/10000 train_time:77818ms step_avg:70.68ms +[2025-09-02 07:43:48] [Rank 0] step:1121/10000 train_time:79242ms step_avg:70.69ms +[2025-09-02 07:43:48] [Rank 0] step:1121/10000 train_time:79242ms step_avg:70.69ms +[2025-09-02 07:43:50] [Rank 0] step:1141/10000 train_time:80667ms step_avg:70.70ms +[2025-09-02 07:43:50] [Rank 0] step:1141/10000 train_time:80667ms step_avg:70.70ms +[2025-09-02 07:43:51] [Rank 0] step:1161/10000 train_time:82091ms step_avg:70.71ms +[2025-09-02 07:43:51] [Rank 0] step:1161/10000 train_time:82091ms step_avg:70.71ms +[2025-09-02 07:43:53] [Rank 0] step:1181/10000 train_time:83520ms step_avg:70.72ms +[2025-09-02 07:43:53] [Rank 0] step:1181/10000 train_time:83520ms step_avg:70.72ms +[2025-09-02 07:43:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:43:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:44:06] [Rank 0] PRINT: step:1200/10000 val_loss:5.1074 svd_entropy: attn_qk:H=0.6097,top10E=0.48,eRank=71.2,q75/q25=23.93 attn_vo:H=0.6935,top10E=0.32,eRank=132.7,q75/q25=49.70 mlp_w1:H=0.5580,top10E=0.56,eRank=67.1,q75/q25=4.45 mlp_w2:H=0.7182,top10E=0.31,eRank=121.7,q75/q25=17.14 vo_prod:H=0.5783,top10E=0.49,eRank=49.3,q75/q25=1744.02 train_time:85087ms step_avg:70.91ms +[2025-09-02 07:44:06] [Rank 0] PRINT: step:1200/10000 val_loss:5.1074 svd_entropy: attn_qk:H=0.6097,top10E=0.48,eRank=71.2,q75/q25=23.93 attn_vo:H=0.6935,top10E=0.32,eRank=132.7,q75/q25=49.70 mlp_w1:H=0.5580,top10E=0.56,eRank=67.1,q75/q25=4.45 mlp_w2:H=0.7182,top10E=0.31,eRank=121.7,q75/q25=17.14 vo_prod:H=0.5783,top10E=0.49,eRank=49.3,q75/q25=1744.02 train_time:85087ms step_avg:70.91ms +[2025-09-02 07:44:06] [Rank 0] step:1201/10000 train_time:85099ms step_avg:70.86ms +[2025-09-02 07:44:06] [Rank 0] step:1201/10000 train_time:85099ms step_avg:70.86ms +[2025-09-02 07:44:07] [Rank 0] step:1221/10000 train_time:86405ms step_avg:70.77ms +[2025-09-02 07:44:07] [Rank 0] step:1221/10000 train_time:86405ms step_avg:70.77ms +[2025-09-02 07:44:09] [Rank 0] step:1241/10000 train_time:87931ms step_avg:70.85ms +[2025-09-02 07:44:09] [Rank 0] step:1241/10000 train_time:87931ms step_avg:70.85ms +[2025-09-02 07:44:10] [Rank 0] step:1261/10000 train_time:89355ms step_avg:70.86ms +[2025-09-02 07:44:10] [Rank 0] step:1261/10000 train_time:89355ms step_avg:70.86ms +[2025-09-02 07:44:12] [Rank 0] step:1281/10000 train_time:90780ms step_avg:70.87ms +[2025-09-02 07:44:12] [Rank 0] step:1281/10000 train_time:90780ms step_avg:70.87ms +[2025-09-02 07:44:13] [Rank 0] step:1301/10000 train_time:92205ms step_avg:70.87ms +[2025-09-02 07:44:13] [Rank 0] step:1301/10000 train_time:92205ms step_avg:70.87ms +[2025-09-02 07:44:14] [Rank 0] step:1321/10000 train_time:93630ms step_avg:70.88ms +[2025-09-02 07:44:14] [Rank 0] step:1321/10000 train_time:93630ms step_avg:70.88ms +[2025-09-02 07:44:16] [Rank 0] step:1341/10000 train_time:95056ms step_avg:70.88ms +[2025-09-02 07:44:16] [Rank 0] step:1341/10000 train_time:95056ms step_avg:70.88ms +[2025-09-02 07:44:17] [Rank 0] step:1361/10000 train_time:96483ms step_avg:70.89ms +[2025-09-02 07:44:17] [Rank 0] step:1361/10000 train_time:96483ms step_avg:70.89ms +[2025-09-02 07:44:19] [Rank 0] step:1381/10000 train_time:97910ms step_avg:70.90ms +[2025-09-02 07:44:19] [Rank 0] step:1381/10000 train_time:97910ms step_avg:70.90ms +[2025-09-02 07:44:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:44:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:44:32] [Rank 0] PRINT: step:1400/10000 val_loss:4.9896 svd_entropy: attn_qk:H=0.6246,top10E=0.45,eRank=76.7,q75/q25=28.68 attn_vo:H=0.7103,top10E=0.30,eRank=143.9,q75/q25=62.18 mlp_w1:H=0.5763,top10E=0.54,eRank=73.1,q75/q25=4.86 mlp_w2:H=0.7351,top10E=0.29,eRank=136.5,q75/q25=20.34 vo_prod:H=0.5978,top10E=0.45,eRank=56.1,q75/q25=3099.89 train_time:99480ms step_avg:71.06ms +[2025-09-02 07:44:32] [Rank 0] PRINT: step:1400/10000 val_loss:4.9896 svd_entropy: attn_qk:H=0.6246,top10E=0.45,eRank=76.7,q75/q25=28.68 attn_vo:H=0.7103,top10E=0.30,eRank=143.9,q75/q25=62.18 mlp_w1:H=0.5763,top10E=0.54,eRank=73.1,q75/q25=4.86 mlp_w2:H=0.7351,top10E=0.29,eRank=136.5,q75/q25=20.34 vo_prod:H=0.5978,top10E=0.45,eRank=56.1,q75/q25=3099.89 train_time:99480ms step_avg:71.06ms +[2025-09-02 07:44:32] [Rank 0] step:1401/10000 train_time:99492ms step_avg:71.01ms +[2025-09-02 07:44:32] [Rank 0] step:1401/10000 train_time:99492ms step_avg:71.01ms +[2025-09-02 07:44:33] [Rank 0] step:1421/10000 train_time:100777ms step_avg:70.92ms +[2025-09-02 07:44:33] [Rank 0] step:1421/10000 train_time:100777ms step_avg:70.92ms +[2025-09-02 07:44:35] [Rank 0] step:1441/10000 train_time:102201ms step_avg:70.92ms +[2025-09-02 07:44:35] [Rank 0] step:1441/10000 train_time:102201ms step_avg:70.92ms +[2025-09-02 07:44:36] [Rank 0] step:1461/10000 train_time:103627ms step_avg:70.93ms +[2025-09-02 07:44:36] [Rank 0] step:1461/10000 train_time:103627ms step_avg:70.93ms +[2025-09-02 07:44:37] [Rank 0] step:1481/10000 train_time:105053ms step_avg:70.93ms +[2025-09-02 07:44:37] [Rank 0] step:1481/10000 train_time:105053ms step_avg:70.93ms +[2025-09-02 07:44:39] [Rank 0] step:1501/10000 train_time:106488ms step_avg:70.94ms +[2025-09-02 07:44:39] [Rank 0] step:1501/10000 train_time:106488ms step_avg:70.94ms +[2025-09-02 07:44:40] [Rank 0] step:1521/10000 train_time:107925ms step_avg:70.96ms +[2025-09-02 07:44:40] [Rank 0] step:1521/10000 train_time:107925ms step_avg:70.96ms +[2025-09-02 07:44:42] [Rank 0] step:1541/10000 train_time:109361ms step_avg:70.97ms +[2025-09-02 07:44:42] [Rank 0] step:1541/10000 train_time:109361ms step_avg:70.97ms +[2025-09-02 07:44:43] [Rank 0] step:1561/10000 train_time:110797ms step_avg:70.98ms +[2025-09-02 07:44:43] [Rank 0] step:1561/10000 train_time:110797ms step_avg:70.98ms +[2025-09-02 07:44:45] [Rank 0] step:1581/10000 train_time:112232ms step_avg:70.99ms +[2025-09-02 07:44:45] [Rank 0] step:1581/10000 train_time:112232ms step_avg:70.99ms +[2025-09-02 07:44:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:44:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:44:58] [Rank 0] PRINT: step:1600/10000 val_loss:4.8596 svd_entropy: attn_qk:H=0.6371,top10E=0.43,eRank=81.5,q75/q25=34.54 attn_vo:H=0.7244,top10E=0.28,eRank=154.4,q75/q25=72.57 mlp_w1:H=0.5931,top10E=0.52,eRank=78.9,q75/q25=5.29 mlp_w2:H=0.7497,top10E=0.26,eRank=150.6,q75/q25=23.36 vo_prod:H=0.6140,top10E=0.42,eRank=62.5,q75/q25=4736.25 train_time:113812ms step_avg:71.13ms +[2025-09-02 07:44:58] [Rank 0] PRINT: step:1600/10000 val_loss:4.8596 svd_entropy: attn_qk:H=0.6371,top10E=0.43,eRank=81.5,q75/q25=34.54 attn_vo:H=0.7244,top10E=0.28,eRank=154.4,q75/q25=72.57 mlp_w1:H=0.5931,top10E=0.52,eRank=78.9,q75/q25=5.29 mlp_w2:H=0.7497,top10E=0.26,eRank=150.6,q75/q25=23.36 vo_prod:H=0.6140,top10E=0.42,eRank=62.5,q75/q25=4736.25 train_time:113812ms step_avg:71.13ms +[2025-09-02 07:44:58] [Rank 0] step:1601/10000 train_time:113824ms step_avg:71.10ms +[2025-09-02 07:44:58] [Rank 0] step:1601/10000 train_time:113824ms step_avg:71.10ms +[2025-09-02 07:44:59] [Rank 0] step:1621/10000 train_time:115135ms step_avg:71.03ms +[2025-09-02 07:44:59] [Rank 0] step:1621/10000 train_time:115135ms step_avg:71.03ms +[2025-09-02 07:45:01] [Rank 0] step:1641/10000 train_time:116569ms step_avg:71.04ms +[2025-09-02 07:45:01] [Rank 0] step:1641/10000 train_time:116569ms step_avg:71.04ms +[2025-09-02 07:45:02] [Rank 0] step:1661/10000 train_time:118003ms step_avg:71.04ms +[2025-09-02 07:45:02] [Rank 0] step:1661/10000 train_time:118003ms step_avg:71.04ms +[2025-09-02 07:45:03] [Rank 0] step:1681/10000 train_time:119440ms step_avg:71.05ms +[2025-09-02 07:45:03] [Rank 0] step:1681/10000 train_time:119440ms step_avg:71.05ms +[2025-09-02 07:45:05] [Rank 0] step:1701/10000 train_time:120876ms step_avg:71.06ms +[2025-09-02 07:45:05] [Rank 0] step:1701/10000 train_time:120876ms step_avg:71.06ms +[2025-09-02 07:45:06] [Rank 0] step:1721/10000 train_time:122311ms step_avg:71.07ms +[2025-09-02 07:45:06] [Rank 0] step:1721/10000 train_time:122311ms step_avg:71.07ms +[2025-09-02 07:45:08] [Rank 0] step:1741/10000 train_time:123746ms step_avg:71.08ms +[2025-09-02 07:45:08] [Rank 0] step:1741/10000 train_time:123746ms step_avg:71.08ms +[2025-09-02 07:45:09] [Rank 0] step:1761/10000 train_time:125182ms step_avg:71.09ms +[2025-09-02 07:45:09] [Rank 0] step:1761/10000 train_time:125182ms step_avg:71.09ms +[2025-09-02 07:45:11] [Rank 0] step:1781/10000 train_time:126618ms step_avg:71.09ms +[2025-09-02 07:45:11] [Rank 0] step:1781/10000 train_time:126618ms step_avg:71.09ms +[2025-09-02 07:45:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:45:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:45:24] [Rank 0] PRINT: step:1800/10000 val_loss:4.7615 svd_entropy: attn_qk:H=0.6481,top10E=0.41,eRank=86.1,q75/q25=40.63 attn_vo:H=0.7361,top10E=0.26,eRank=164.1,q75/q25=81.08 mlp_w1:H=0.6071,top10E=0.50,eRank=84.2,q75/q25=5.78 mlp_w2:H=0.7601,top10E=0.25,eRank=161.9,q75/q25=25.63 vo_prod:H=0.6276,top10E=0.40,eRank=68.4,q75/q25=6507.67 train_time:128197ms step_avg:71.22ms +[2025-09-02 07:45:24] [Rank 0] PRINT: step:1800/10000 val_loss:4.7615 svd_entropy: attn_qk:H=0.6481,top10E=0.41,eRank=86.1,q75/q25=40.63 attn_vo:H=0.7361,top10E=0.26,eRank=164.1,q75/q25=81.08 mlp_w1:H=0.6071,top10E=0.50,eRank=84.2,q75/q25=5.78 mlp_w2:H=0.7601,top10E=0.25,eRank=161.9,q75/q25=25.63 vo_prod:H=0.6276,top10E=0.40,eRank=68.4,q75/q25=6507.67 train_time:128197ms step_avg:71.22ms +[2025-09-02 07:45:24] [Rank 0] step:1801/10000 train_time:128209ms step_avg:71.19ms +[2025-09-02 07:45:24] [Rank 0] step:1801/10000 train_time:128209ms step_avg:71.19ms +[2025-09-02 07:45:25] [Rank 0] step:1821/10000 train_time:129521ms step_avg:71.13ms +[2025-09-02 07:45:25] [Rank 0] step:1821/10000 train_time:129521ms step_avg:71.13ms +[2025-09-02 07:45:27] [Rank 0] step:1841/10000 train_time:130957ms step_avg:71.13ms +[2025-09-02 07:45:27] [Rank 0] step:1841/10000 train_time:130957ms step_avg:71.13ms +[2025-09-02 07:45:28] [Rank 0] step:1861/10000 train_time:132391ms step_avg:71.14ms +[2025-09-02 07:45:28] [Rank 0] step:1861/10000 train_time:132391ms step_avg:71.14ms +[2025-09-02 07:45:30] [Rank 0] step:1881/10000 train_time:133829ms step_avg:71.15ms +[2025-09-02 07:45:30] [Rank 0] step:1881/10000 train_time:133829ms step_avg:71.15ms +[2025-09-02 07:45:31] [Rank 0] step:1901/10000 train_time:135268ms step_avg:71.16ms +[2025-09-02 07:45:31] [Rank 0] step:1901/10000 train_time:135268ms step_avg:71.16ms +[2025-09-02 07:45:33] [Rank 0] step:1921/10000 train_time:136704ms step_avg:71.16ms +[2025-09-02 07:45:33] [Rank 0] step:1921/10000 train_time:136704ms step_avg:71.16ms +[2025-09-02 07:45:34] [Rank 0] step:1941/10000 train_time:138140ms step_avg:71.17ms +[2025-09-02 07:45:34] [Rank 0] step:1941/10000 train_time:138140ms step_avg:71.17ms +[2025-09-02 07:45:35] [Rank 0] step:1961/10000 train_time:139576ms step_avg:71.18ms +[2025-09-02 07:45:35] [Rank 0] step:1961/10000 train_time:139576ms step_avg:71.18ms +[2025-09-02 07:45:37] [Rank 0] step:1981/10000 train_time:141014ms step_avg:71.18ms +[2025-09-02 07:45:37] [Rank 0] step:1981/10000 train_time:141014ms step_avg:71.18ms +[2025-09-02 07:45:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:45:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:45:50] [Rank 0] PRINT: step:2000/10000 val_loss:4.6975 svd_entropy: attn_qk:H=0.6575,top10E=0.39,eRank=90.4,q75/q25=47.45 attn_vo:H=0.7461,top10E=0.25,eRank=172.8,q75/q25=88.23 mlp_w1:H=0.6195,top10E=0.48,eRank=89.1,q75/q25=6.23 mlp_w2:H=0.7684,top10E=0.24,eRank=171.5,q75/q25=28.36 vo_prod:H=0.6394,top10E=0.38,eRank=74.0,q75/q25=8165.25 train_time:142596ms step_avg:71.30ms +[2025-09-02 07:45:50] [Rank 0] PRINT: step:2000/10000 val_loss:4.6975 svd_entropy: attn_qk:H=0.6575,top10E=0.39,eRank=90.4,q75/q25=47.45 attn_vo:H=0.7461,top10E=0.25,eRank=172.8,q75/q25=88.23 mlp_w1:H=0.6195,top10E=0.48,eRank=89.1,q75/q25=6.23 mlp_w2:H=0.7684,top10E=0.24,eRank=171.5,q75/q25=28.36 vo_prod:H=0.6394,top10E=0.38,eRank=74.0,q75/q25=8165.25 train_time:142596ms step_avg:71.30ms +[2025-09-02 07:45:50] [Rank 0] step:2001/10000 train_time:142608ms step_avg:71.27ms +[2025-09-02 07:45:50] [Rank 0] step:2001/10000 train_time:142608ms step_avg:71.27ms +[2025-09-02 07:45:52] [Rank 0] step:2021/10000 train_time:143916ms step_avg:71.21ms +[2025-09-02 07:45:52] [Rank 0] step:2021/10000 train_time:143916ms step_avg:71.21ms +[2025-09-02 07:45:53] [Rank 0] step:2041/10000 train_time:145468ms step_avg:71.27ms +[2025-09-02 07:45:53] [Rank 0] step:2041/10000 train_time:145468ms step_avg:71.27ms +[2025-09-02 07:45:55] [Rank 0] step:2061/10000 train_time:146903ms step_avg:71.28ms +[2025-09-02 07:45:55] [Rank 0] step:2061/10000 train_time:146903ms step_avg:71.28ms +[2025-09-02 07:45:56] [Rank 0] step:2081/10000 train_time:148339ms step_avg:71.28ms +[2025-09-02 07:45:56] [Rank 0] step:2081/10000 train_time:148339ms step_avg:71.28ms +[2025-09-02 07:45:58] [Rank 0] step:2101/10000 train_time:149826ms step_avg:71.31ms +[2025-09-02 07:45:58] [Rank 0] step:2101/10000 train_time:149826ms step_avg:71.31ms +[2025-09-02 07:45:59] [Rank 0] step:2121/10000 train_time:151262ms step_avg:71.32ms +[2025-09-02 07:45:59] [Rank 0] step:2121/10000 train_time:151262ms step_avg:71.32ms +[2025-09-02 07:46:00] [Rank 0] step:2141/10000 train_time:152699ms step_avg:71.32ms +[2025-09-02 07:46:00] [Rank 0] step:2141/10000 train_time:152699ms step_avg:71.32ms +[2025-09-02 07:46:02] [Rank 0] step:2161/10000 train_time:154136ms step_avg:71.33ms +[2025-09-02 07:46:02] [Rank 0] step:2161/10000 train_time:154136ms step_avg:71.33ms +[2025-09-02 07:46:03] [Rank 0] step:2181/10000 train_time:155573ms step_avg:71.33ms +[2025-09-02 07:46:03] [Rank 0] step:2181/10000 train_time:155573ms step_avg:71.33ms +[2025-09-02 07:46:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:46:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:46:17] [Rank 0] PRINT: step:2200/10000 val_loss:4.6234 svd_entropy: attn_qk:H=0.6654,top10E=0.38,eRank=94.3,q75/q25=53.46 attn_vo:H=0.7543,top10E=0.24,eRank=180.7,q75/q25=92.19 mlp_w1:H=0.6304,top10E=0.47,eRank=93.7,q75/q25=6.73 mlp_w2:H=0.7753,top10E=0.22,eRank=179.9,q75/q25=30.85 vo_prod:H=0.6490,top10E=0.36,eRank=78.9,q75/q25=9604.19 train_time:157156ms step_avg:71.43ms +[2025-09-02 07:46:17] [Rank 0] PRINT: step:2200/10000 val_loss:4.6234 svd_entropy: attn_qk:H=0.6654,top10E=0.38,eRank=94.3,q75/q25=53.46 attn_vo:H=0.7543,top10E=0.24,eRank=180.7,q75/q25=92.19 mlp_w1:H=0.6304,top10E=0.47,eRank=93.7,q75/q25=6.73 mlp_w2:H=0.7753,top10E=0.22,eRank=179.9,q75/q25=30.85 vo_prod:H=0.6490,top10E=0.36,eRank=78.9,q75/q25=9604.19 train_time:157156ms step_avg:71.43ms +[2025-09-02 07:46:17] [Rank 0] step:2201/10000 train_time:157168ms step_avg:71.41ms +[2025-09-02 07:46:17] [Rank 0] step:2201/10000 train_time:157168ms step_avg:71.41ms +[2025-09-02 07:46:18] [Rank 0] step:2221/10000 train_time:158477ms step_avg:71.35ms +[2025-09-02 07:46:18] [Rank 0] step:2221/10000 train_time:158477ms step_avg:71.35ms +[2025-09-02 07:46:20] [Rank 0] step:2241/10000 train_time:159943ms step_avg:71.37ms +[2025-09-02 07:46:20] [Rank 0] step:2241/10000 train_time:159943ms step_avg:71.37ms +[2025-09-02 07:46:21] [Rank 0] step:2261/10000 train_time:161422ms step_avg:71.39ms +[2025-09-02 07:46:21] [Rank 0] step:2261/10000 train_time:161422ms step_avg:71.39ms +[2025-09-02 07:46:23] [Rank 0] step:2281/10000 train_time:162901ms step_avg:71.42ms +[2025-09-02 07:46:23] [Rank 0] step:2281/10000 train_time:162901ms step_avg:71.42ms +[2025-09-02 07:46:24] [Rank 0] step:2301/10000 train_time:164379ms step_avg:71.44ms +[2025-09-02 07:46:24] [Rank 0] step:2301/10000 train_time:164379ms step_avg:71.44ms +[2025-09-02 07:46:25] [Rank 0] step:2321/10000 train_time:165859ms step_avg:71.46ms +[2025-09-02 07:46:25] [Rank 0] step:2321/10000 train_time:165859ms step_avg:71.46ms +[2025-09-02 07:46:27] [Rank 0] step:2341/10000 train_time:167338ms step_avg:71.48ms +[2025-09-02 07:46:27] [Rank 0] step:2341/10000 train_time:167338ms step_avg:71.48ms +[2025-09-02 07:46:28] [Rank 0] step:2361/10000 train_time:168817ms step_avg:71.50ms +[2025-09-02 07:46:28] [Rank 0] step:2361/10000 train_time:168817ms step_avg:71.50ms +[2025-09-02 07:46:30] [Rank 0] step:2381/10000 train_time:170298ms step_avg:71.52ms +[2025-09-02 07:46:30] [Rank 0] step:2381/10000 train_time:170298ms step_avg:71.52ms +[2025-09-02 07:46:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:46:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:46:43] [Rank 0] PRINT: step:2400/10000 val_loss:4.5508 svd_entropy: attn_qk:H=0.6722,top10E=0.37,eRank=97.8,q75/q25=60.19 attn_vo:H=0.7618,top10E=0.23,eRank=188.1,q75/q25=95.86 mlp_w1:H=0.6402,top10E=0.46,eRank=98.1,q75/q25=7.21 mlp_w2:H=0.7810,top10E=0.21,eRank=187.4,q75/q25=33.90 vo_prod:H=0.6579,top10E=0.35,eRank=83.8,q75/q25=10865.29 train_time:171926ms step_avg:71.64ms +[2025-09-02 07:46:43] [Rank 0] PRINT: step:2400/10000 val_loss:4.5508 svd_entropy: attn_qk:H=0.6722,top10E=0.37,eRank=97.8,q75/q25=60.19 attn_vo:H=0.7618,top10E=0.23,eRank=188.1,q75/q25=95.86 mlp_w1:H=0.6402,top10E=0.46,eRank=98.1,q75/q25=7.21 mlp_w2:H=0.7810,top10E=0.21,eRank=187.4,q75/q25=33.90 vo_prod:H=0.6579,top10E=0.35,eRank=83.8,q75/q25=10865.29 train_time:171926ms step_avg:71.64ms +[2025-09-02 07:46:43] [Rank 0] step:2401/10000 train_time:171938ms step_avg:71.61ms +[2025-09-02 07:46:43] [Rank 0] step:2401/10000 train_time:171938ms step_avg:71.61ms +[2025-09-02 07:46:45] [Rank 0] step:2421/10000 train_time:173288ms step_avg:71.58ms +[2025-09-02 07:46:45] [Rank 0] step:2421/10000 train_time:173288ms step_avg:71.58ms +[2025-09-02 07:46:46] [Rank 0] step:2441/10000 train_time:174765ms step_avg:71.60ms +[2025-09-02 07:46:46] [Rank 0] step:2441/10000 train_time:174765ms step_avg:71.60ms +[2025-09-02 07:46:48] [Rank 0] step:2461/10000 train_time:176243ms step_avg:71.61ms +[2025-09-02 07:46:48] [Rank 0] step:2461/10000 train_time:176243ms step_avg:71.61ms +[2025-09-02 07:46:49] [Rank 0] step:2481/10000 train_time:177721ms step_avg:71.63ms +[2025-09-02 07:46:49] [Rank 0] step:2481/10000 train_time:177721ms step_avg:71.63ms +[2025-09-02 07:46:51] [Rank 0] step:2501/10000 train_time:179200ms step_avg:71.65ms +[2025-09-02 07:46:51] [Rank 0] step:2501/10000 train_time:179200ms step_avg:71.65ms +[2025-09-02 07:46:52] [Rank 0] step:2521/10000 train_time:180680ms step_avg:71.67ms +[2025-09-02 07:46:52] [Rank 0] step:2521/10000 train_time:180680ms step_avg:71.67ms +[2025-09-02 07:46:54] [Rank 0] step:2541/10000 train_time:182159ms step_avg:71.69ms +[2025-09-02 07:46:54] [Rank 0] step:2541/10000 train_time:182159ms step_avg:71.69ms +[2025-09-02 07:46:55] [Rank 0] step:2561/10000 train_time:183638ms step_avg:71.71ms +[2025-09-02 07:46:55] [Rank 0] step:2561/10000 train_time:183638ms step_avg:71.71ms +[2025-09-02 07:46:57] [Rank 0] step:2581/10000 train_time:185119ms step_avg:71.72ms +[2025-09-02 07:46:57] [Rank 0] step:2581/10000 train_time:185119ms step_avg:71.72ms +[2025-09-02 07:46:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:46:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:47:10] [Rank 0] PRINT: step:2600/10000 val_loss:4.4958 svd_entropy: attn_qk:H=0.6788,top10E=0.36,eRank=101.3,q75/q25=66.32 attn_vo:H=0.7684,top10E=0.22,eRank=195.0,q75/q25=97.77 mlp_w1:H=0.6487,top10E=0.44,eRank=102.2,q75/q25=7.72 mlp_w2:H=0.7859,top10E=0.21,eRank=194.0,q75/q25=36.30 vo_prod:H=0.6659,top10E=0.34,eRank=88.4,q75/q25=11768.65 train_time:186748ms step_avg:71.83ms +[2025-09-02 07:47:10] [Rank 0] PRINT: step:2600/10000 val_loss:4.4958 svd_entropy: attn_qk:H=0.6788,top10E=0.36,eRank=101.3,q75/q25=66.32 attn_vo:H=0.7684,top10E=0.22,eRank=195.0,q75/q25=97.77 mlp_w1:H=0.6487,top10E=0.44,eRank=102.2,q75/q25=7.72 mlp_w2:H=0.7859,top10E=0.21,eRank=194.0,q75/q25=36.30 vo_prod:H=0.6659,top10E=0.34,eRank=88.4,q75/q25=11768.65 train_time:186748ms step_avg:71.83ms +[2025-09-02 07:47:10] [Rank 0] step:2601/10000 train_time:186759ms step_avg:71.80ms +[2025-09-02 07:47:10] [Rank 0] step:2601/10000 train_time:186759ms step_avg:71.80ms +[2025-09-02 07:47:11] [Rank 0] step:2621/10000 train_time:188104ms step_avg:71.77ms +[2025-09-02 07:47:11] [Rank 0] step:2621/10000 train_time:188104ms step_avg:71.77ms +[2025-09-02 07:47:13] [Rank 0] step:2641/10000 train_time:189582ms step_avg:71.78ms +[2025-09-02 07:47:13] [Rank 0] step:2641/10000 train_time:189582ms step_avg:71.78ms +[2025-09-02 07:47:14] [Rank 0] step:2661/10000 train_time:191061ms step_avg:71.80ms +[2025-09-02 07:47:14] [Rank 0] step:2661/10000 train_time:191061ms step_avg:71.80ms +[2025-09-02 07:47:16] [Rank 0] step:2681/10000 train_time:192540ms step_avg:71.82ms +[2025-09-02 07:47:16] [Rank 0] step:2681/10000 train_time:192540ms step_avg:71.82ms +[2025-09-02 07:47:17] [Rank 0] step:2701/10000 train_time:194020ms step_avg:71.83ms +[2025-09-02 07:47:17] [Rank 0] step:2701/10000 train_time:194020ms step_avg:71.83ms +[2025-09-02 07:47:19] [Rank 0] step:2721/10000 train_time:195500ms step_avg:71.85ms +[2025-09-02 07:47:19] [Rank 0] step:2721/10000 train_time:195500ms step_avg:71.85ms +[2025-09-02 07:47:20] [Rank 0] step:2741/10000 train_time:196979ms step_avg:71.86ms +[2025-09-02 07:47:20] [Rank 0] step:2741/10000 train_time:196979ms step_avg:71.86ms +[2025-09-02 07:47:22] [Rank 0] step:2761/10000 train_time:198461ms step_avg:71.88ms +[2025-09-02 07:47:22] [Rank 0] step:2761/10000 train_time:198461ms step_avg:71.88ms +[2025-09-02 07:47:23] [Rank 0] step:2781/10000 train_time:199940ms step_avg:71.90ms +[2025-09-02 07:47:23] [Rank 0] step:2781/10000 train_time:199940ms step_avg:71.90ms +[2025-09-02 07:47:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:47:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:47:36] [Rank 0] PRINT: step:2800/10000 val_loss:4.4588 svd_entropy: attn_qk:H=0.6849,top10E=0.35,eRank=104.7,q75/q25=72.25 attn_vo:H=0.7744,top10E=0.21,eRank=201.6,q75/q25=99.09 mlp_w1:H=0.6569,top10E=0.43,eRank=106.2,q75/q25=8.19 mlp_w2:H=0.7905,top10E=0.20,eRank=200.4,q75/q25=38.31 vo_prod:H=0.6731,top10E=0.33,eRank=92.7,q75/q25=12014.56 train_time:201569ms step_avg:71.99ms +[2025-09-02 07:47:36] [Rank 0] PRINT: step:2800/10000 val_loss:4.4588 svd_entropy: attn_qk:H=0.6849,top10E=0.35,eRank=104.7,q75/q25=72.25 attn_vo:H=0.7744,top10E=0.21,eRank=201.6,q75/q25=99.09 mlp_w1:H=0.6569,top10E=0.43,eRank=106.2,q75/q25=8.19 mlp_w2:H=0.7905,top10E=0.20,eRank=200.4,q75/q25=38.31 vo_prod:H=0.6731,top10E=0.33,eRank=92.7,q75/q25=12014.56 train_time:201569ms step_avg:71.99ms +[2025-09-02 07:47:36] [Rank 0] step:2801/10000 train_time:201580ms step_avg:71.97ms +[2025-09-02 07:47:36] [Rank 0] step:2801/10000 train_time:201580ms step_avg:71.97ms +[2025-09-02 07:47:38] [Rank 0] step:2821/10000 train_time:202929ms step_avg:71.94ms +[2025-09-02 07:47:38] [Rank 0] step:2821/10000 train_time:202929ms step_avg:71.94ms +[2025-09-02 07:47:39] [Rank 0] step:2841/10000 train_time:204408ms step_avg:71.95ms +[2025-09-02 07:47:39] [Rank 0] step:2841/10000 train_time:204408ms step_avg:71.95ms +[2025-09-02 07:47:41] [Rank 0] step:2861/10000 train_time:205888ms step_avg:71.96ms +[2025-09-02 07:47:41] [Rank 0] step:2861/10000 train_time:205888ms step_avg:71.96ms +[2025-09-02 07:47:42] [Rank 0] step:2881/10000 train_time:207368ms step_avg:71.98ms +[2025-09-02 07:47:42] [Rank 0] step:2881/10000 train_time:207368ms step_avg:71.98ms +[2025-09-02 07:47:44] [Rank 0] step:2901/10000 train_time:208847ms step_avg:71.99ms +[2025-09-02 07:47:44] [Rank 0] step:2901/10000 train_time:208847ms step_avg:71.99ms +[2025-09-02 07:47:45] [Rank 0] step:2921/10000 train_time:210328ms step_avg:72.01ms +[2025-09-02 07:47:45] [Rank 0] step:2921/10000 train_time:210328ms step_avg:72.01ms +[2025-09-02 07:47:47] [Rank 0] step:2941/10000 train_time:211810ms step_avg:72.02ms +[2025-09-02 07:47:47] [Rank 0] step:2941/10000 train_time:211810ms step_avg:72.02ms +[2025-09-02 07:47:48] [Rank 0] step:2961/10000 train_time:213290ms step_avg:72.03ms +[2025-09-02 07:47:48] [Rank 0] step:2961/10000 train_time:213290ms step_avg:72.03ms +[2025-09-02 07:47:50] [Rank 0] step:2981/10000 train_time:214777ms step_avg:72.05ms +[2025-09-02 07:47:50] [Rank 0] step:2981/10000 train_time:214777ms step_avg:72.05ms +[2025-09-02 07:47:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:47:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:48:03] [Rank 0] PRINT: step:3000/10000 val_loss:4.4159 svd_entropy: attn_qk:H=0.6903,top10E=0.34,eRank=107.9,q75/q25=77.64 attn_vo:H=0.7799,top10E=0.20,eRank=207.7,q75/q25=98.90 mlp_w1:H=0.6638,top10E=0.42,eRank=109.8,q75/q25=8.69 mlp_w2:H=0.7946,top10E=0.19,eRank=206.3,q75/q25=40.20 vo_prod:H=0.6794,top10E=0.32,eRank=96.7,q75/q25=12096.34 train_time:216414ms step_avg:72.14ms +[2025-09-02 07:48:03] [Rank 0] PRINT: step:3000/10000 val_loss:4.4159 svd_entropy: attn_qk:H=0.6903,top10E=0.34,eRank=107.9,q75/q25=77.64 attn_vo:H=0.7799,top10E=0.20,eRank=207.7,q75/q25=98.90 mlp_w1:H=0.6638,top10E=0.42,eRank=109.8,q75/q25=8.69 mlp_w2:H=0.7946,top10E=0.19,eRank=206.3,q75/q25=40.20 vo_prod:H=0.6794,top10E=0.32,eRank=96.7,q75/q25=12096.34 train_time:216414ms step_avg:72.14ms +[2025-09-02 07:48:03] [Rank 0] step:3001/10000 train_time:216426ms step_avg:72.12ms +[2025-09-02 07:48:03] [Rank 0] step:3001/10000 train_time:216426ms step_avg:72.12ms +[2025-09-02 07:48:04] [Rank 0] step:3021/10000 train_time:217779ms step_avg:72.09ms +[2025-09-02 07:48:04] [Rank 0] step:3021/10000 train_time:217779ms step_avg:72.09ms +[2025-09-02 07:48:06] [Rank 0] step:3041/10000 train_time:219269ms step_avg:72.10ms +[2025-09-02 07:48:06] [Rank 0] step:3041/10000 train_time:219269ms step_avg:72.10ms +[2025-09-02 07:48:07] [Rank 0] step:3061/10000 train_time:220756ms step_avg:72.12ms +[2025-09-02 07:48:07] [Rank 0] step:3061/10000 train_time:220756ms step_avg:72.12ms +[2025-09-02 07:48:09] [Rank 0] step:3081/10000 train_time:222245ms step_avg:72.13ms +[2025-09-02 07:48:09] [Rank 0] step:3081/10000 train_time:222245ms step_avg:72.13ms +[2025-09-02 07:48:10] [Rank 0] step:3101/10000 train_time:223735ms step_avg:72.15ms +[2025-09-02 07:48:10] [Rank 0] step:3101/10000 train_time:223735ms step_avg:72.15ms +[2025-09-02 07:48:12] [Rank 0] step:3121/10000 train_time:225224ms step_avg:72.16ms +[2025-09-02 07:48:12] [Rank 0] step:3121/10000 train_time:225224ms step_avg:72.16ms +[2025-09-02 07:48:13] [Rank 0] step:3141/10000 train_time:226714ms step_avg:72.18ms +[2025-09-02 07:48:13] [Rank 0] step:3141/10000 train_time:226714ms step_avg:72.18ms +[2025-09-02 07:48:15] [Rank 0] step:3161/10000 train_time:228203ms step_avg:72.19ms +[2025-09-02 07:48:15] [Rank 0] step:3161/10000 train_time:228203ms step_avg:72.19ms +[2025-09-02 07:48:16] [Rank 0] step:3181/10000 train_time:229693ms step_avg:72.21ms +[2025-09-02 07:48:16] [Rank 0] step:3181/10000 train_time:229693ms step_avg:72.21ms +[2025-09-02 07:48:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:48:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:48:29] [Rank 0] PRINT: step:3200/10000 val_loss:4.3791 svd_entropy: attn_qk:H=0.6952,top10E=0.33,eRank=110.9,q75/q25=82.22 attn_vo:H=0.7847,top10E=0.20,eRank=213.3,q75/q25=98.34 mlp_w1:H=0.6705,top10E=0.41,eRank=113.5,q75/q25=9.22 mlp_w2:H=0.7979,top10E=0.19,eRank=211.4,q75/q25=41.65 vo_prod:H=0.6849,top10E=0.31,eRank=100.3,q75/q25=12058.82 train_time:231334ms step_avg:72.29ms +[2025-09-02 07:48:29] [Rank 0] PRINT: step:3200/10000 val_loss:4.3791 svd_entropy: attn_qk:H=0.6952,top10E=0.33,eRank=110.9,q75/q25=82.22 attn_vo:H=0.7847,top10E=0.20,eRank=213.3,q75/q25=98.34 mlp_w1:H=0.6705,top10E=0.41,eRank=113.5,q75/q25=9.22 mlp_w2:H=0.7979,top10E=0.19,eRank=211.4,q75/q25=41.65 vo_prod:H=0.6849,top10E=0.31,eRank=100.3,q75/q25=12058.82 train_time:231334ms step_avg:72.29ms +[2025-09-02 07:48:29] [Rank 0] step:3201/10000 train_time:231345ms step_avg:72.27ms +[2025-09-02 07:48:29] [Rank 0] step:3201/10000 train_time:231345ms step_avg:72.27ms +[2025-09-02 07:48:31] [Rank 0] step:3221/10000 train_time:232690ms step_avg:72.24ms +[2025-09-02 07:48:31] [Rank 0] step:3221/10000 train_time:232690ms step_avg:72.24ms +[2025-09-02 07:48:32] [Rank 0] step:3241/10000 train_time:234185ms step_avg:72.26ms +[2025-09-02 07:48:32] [Rank 0] step:3241/10000 train_time:234185ms step_avg:72.26ms +[2025-09-02 07:48:34] [Rank 0] step:3261/10000 train_time:235672ms step_avg:72.27ms +[2025-09-02 07:48:34] [Rank 0] step:3261/10000 train_time:235672ms step_avg:72.27ms +[2025-09-02 07:48:35] [Rank 0] step:3281/10000 train_time:237160ms step_avg:72.28ms +[2025-09-02 07:48:35] [Rank 0] step:3281/10000 train_time:237160ms step_avg:72.28ms +[2025-09-02 07:48:37] [Rank 0] step:3301/10000 train_time:238646ms step_avg:72.30ms +[2025-09-02 07:48:37] [Rank 0] step:3301/10000 train_time:238646ms step_avg:72.30ms +[2025-09-02 07:48:38] [Rank 0] step:3321/10000 train_time:240133ms step_avg:72.31ms +[2025-09-02 07:48:38] [Rank 0] step:3321/10000 train_time:240133ms step_avg:72.31ms +[2025-09-02 07:48:40] [Rank 0] step:3341/10000 train_time:241620ms step_avg:72.32ms +[2025-09-02 07:48:40] [Rank 0] step:3341/10000 train_time:241620ms step_avg:72.32ms +[2025-09-02 07:48:41] [Rank 0] step:3361/10000 train_time:243109ms step_avg:72.33ms +[2025-09-02 07:48:41] [Rank 0] step:3361/10000 train_time:243109ms step_avg:72.33ms +[2025-09-02 07:48:43] [Rank 0] step:3381/10000 train_time:244597ms step_avg:72.34ms +[2025-09-02 07:48:43] [Rank 0] step:3381/10000 train_time:244597ms step_avg:72.34ms +[2025-09-02 07:48:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:48:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:48:56] [Rank 0] PRINT: step:3400/10000 val_loss:4.3396 svd_entropy: attn_qk:H=0.7000,top10E=0.33,eRank=113.9,q75/q25=86.01 attn_vo:H=0.7893,top10E=0.19,eRank=218.8,q75/q25=97.29 mlp_w1:H=0.6767,top10E=0.41,eRank=117.0,q75/q25=9.69 mlp_w2:H=0.8017,top10E=0.18,eRank=217.0,q75/q25=42.61 vo_prod:H=0.6904,top10E=0.30,eRank=104.0,q75/q25=11927.34 train_time:246236ms step_avg:72.42ms +[2025-09-02 07:48:56] [Rank 0] PRINT: step:3400/10000 val_loss:4.3396 svd_entropy: attn_qk:H=0.7000,top10E=0.33,eRank=113.9,q75/q25=86.01 attn_vo:H=0.7893,top10E=0.19,eRank=218.8,q75/q25=97.29 mlp_w1:H=0.6767,top10E=0.41,eRank=117.0,q75/q25=9.69 mlp_w2:H=0.8017,top10E=0.18,eRank=217.0,q75/q25=42.61 vo_prod:H=0.6904,top10E=0.30,eRank=104.0,q75/q25=11927.34 train_time:246236ms step_avg:72.42ms +[2025-09-02 07:48:56] [Rank 0] step:3401/10000 train_time:246247ms step_avg:72.40ms +[2025-09-02 07:48:56] [Rank 0] step:3401/10000 train_time:246247ms step_avg:72.40ms +[2025-09-02 07:48:58] [Rank 0] step:3421/10000 train_time:247609ms step_avg:72.38ms +[2025-09-02 07:48:58] [Rank 0] step:3421/10000 train_time:247609ms step_avg:72.38ms +[2025-09-02 07:48:59] [Rank 0] step:3441/10000 train_time:249094ms step_avg:72.39ms +[2025-09-02 07:48:59] [Rank 0] step:3441/10000 train_time:249094ms step_avg:72.39ms +[2025-09-02 07:49:01] [Rank 0] step:3461/10000 train_time:250581ms step_avg:72.40ms +[2025-09-02 07:49:01] [Rank 0] step:3461/10000 train_time:250581ms step_avg:72.40ms +[2025-09-02 07:49:02] [Rank 0] step:3481/10000 train_time:252068ms step_avg:72.41ms +[2025-09-02 07:49:02] [Rank 0] step:3481/10000 train_time:252068ms step_avg:72.41ms +[2025-09-02 07:49:04] [Rank 0] step:3501/10000 train_time:253556ms step_avg:72.42ms +[2025-09-02 07:49:04] [Rank 0] step:3501/10000 train_time:253556ms step_avg:72.42ms +[2025-09-02 07:49:05] [Rank 0] step:3521/10000 train_time:255048ms step_avg:72.44ms +[2025-09-02 07:49:05] [Rank 0] step:3521/10000 train_time:255048ms step_avg:72.44ms +[2025-09-02 07:49:06] [Rank 0] step:3541/10000 train_time:256536ms step_avg:72.45ms +[2025-09-02 07:49:06] [Rank 0] step:3541/10000 train_time:256536ms step_avg:72.45ms +[2025-09-02 07:49:08] [Rank 0] step:3561/10000 train_time:258025ms step_avg:72.46ms +[2025-09-02 07:49:08] [Rank 0] step:3561/10000 train_time:258025ms step_avg:72.46ms +[2025-09-02 07:49:09] [Rank 0] step:3581/10000 train_time:259513ms step_avg:72.47ms +[2025-09-02 07:49:09] [Rank 0] step:3581/10000 train_time:259513ms step_avg:72.47ms +[2025-09-02 07:49:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:49:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:49:23] [Rank 0] PRINT: step:3600/10000 val_loss:4.3333 svd_entropy: attn_qk:H=0.7044,top10E=0.32,eRank=116.8,q75/q25=90.06 attn_vo:H=0.7933,top10E=0.19,eRank=223.7,q75/q25=95.54 mlp_w1:H=0.6824,top10E=0.40,eRank=120.3,q75/q25=10.13 mlp_w2:H=0.8052,top10E=0.18,eRank=222.4,q75/q25=43.23 vo_prod:H=0.6953,top10E=0.30,eRank=107.5,q75/q25=11270.99 train_time:261151ms step_avg:72.54ms +[2025-09-02 07:49:23] [Rank 0] PRINT: step:3600/10000 val_loss:4.3333 svd_entropy: attn_qk:H=0.7044,top10E=0.32,eRank=116.8,q75/q25=90.06 attn_vo:H=0.7933,top10E=0.19,eRank=223.7,q75/q25=95.54 mlp_w1:H=0.6824,top10E=0.40,eRank=120.3,q75/q25=10.13 mlp_w2:H=0.8052,top10E=0.18,eRank=222.4,q75/q25=43.23 vo_prod:H=0.6953,top10E=0.30,eRank=107.5,q75/q25=11270.99 train_time:261151ms step_avg:72.54ms +[2025-09-02 07:49:23] [Rank 0] step:3601/10000 train_time:261163ms step_avg:72.53ms +[2025-09-02 07:49:23] [Rank 0] step:3601/10000 train_time:261163ms step_avg:72.53ms +[2025-09-02 07:49:24] [Rank 0] step:3621/10000 train_time:262505ms step_avg:72.50ms +[2025-09-02 07:49:24] [Rank 0] step:3621/10000 train_time:262505ms step_avg:72.50ms +[2025-09-02 07:49:26] [Rank 0] step:3641/10000 train_time:263991ms step_avg:72.51ms +[2025-09-02 07:49:26] [Rank 0] step:3641/10000 train_time:263991ms step_avg:72.51ms +[2025-09-02 07:49:27] [Rank 0] step:3661/10000 train_time:265478ms step_avg:72.52ms +[2025-09-02 07:49:27] [Rank 0] step:3661/10000 train_time:265478ms step_avg:72.52ms +[2025-09-02 07:49:29] [Rank 0] step:3681/10000 train_time:266968ms step_avg:72.53ms +[2025-09-02 07:49:29] [Rank 0] step:3681/10000 train_time:266968ms step_avg:72.53ms +[2025-09-02 07:49:30] [Rank 0] step:3701/10000 train_time:268456ms step_avg:72.54ms +[2025-09-02 07:49:30] [Rank 0] step:3701/10000 train_time:268456ms step_avg:72.54ms +[2025-09-02 07:49:32] [Rank 0] step:3721/10000 train_time:269970ms step_avg:72.55ms +[2025-09-02 07:49:32] [Rank 0] step:3721/10000 train_time:269970ms step_avg:72.55ms +[2025-09-02 07:49:33] [Rank 0] step:3741/10000 train_time:271495ms step_avg:72.57ms +[2025-09-02 07:49:33] [Rank 0] step:3741/10000 train_time:271495ms step_avg:72.57ms +[2025-09-02 07:49:35] [Rank 0] step:3761/10000 train_time:273018ms step_avg:72.59ms +[2025-09-02 07:49:35] [Rank 0] step:3761/10000 train_time:273018ms step_avg:72.59ms +[2025-09-02 07:49:36] [Rank 0] step:3781/10000 train_time:274542ms step_avg:72.61ms +[2025-09-02 07:49:36] [Rank 0] step:3781/10000 train_time:274542ms step_avg:72.61ms +[2025-09-02 07:49:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:49:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:49:50] [Rank 0] PRINT: step:3800/10000 val_loss:4.2738 svd_entropy: attn_qk:H=0.7080,top10E=0.32,eRank=119.2,q75/q25=94.14 attn_vo:H=0.7970,top10E=0.19,eRank=228.5,q75/q25=93.73 mlp_w1:H=0.6877,top10E=0.39,eRank=123.6,q75/q25=10.50 mlp_w2:H=0.8085,top10E=0.17,eRank=227.4,q75/q25=43.92 vo_prod:H=0.6996,top10E=0.29,eRank=110.7,q75/q25=10738.88 train_time:276221ms step_avg:72.69ms +[2025-09-02 07:49:50] [Rank 0] PRINT: step:3800/10000 val_loss:4.2738 svd_entropy: attn_qk:H=0.7080,top10E=0.32,eRank=119.2,q75/q25=94.14 attn_vo:H=0.7970,top10E=0.19,eRank=228.5,q75/q25=93.73 mlp_w1:H=0.6877,top10E=0.39,eRank=123.6,q75/q25=10.50 mlp_w2:H=0.8085,top10E=0.17,eRank=227.4,q75/q25=43.92 vo_prod:H=0.6996,top10E=0.29,eRank=110.7,q75/q25=10738.88 train_time:276221ms step_avg:72.69ms +[2025-09-02 07:49:50] [Rank 0] step:3801/10000 train_time:276232ms step_avg:72.67ms +[2025-09-02 07:49:50] [Rank 0] step:3801/10000 train_time:276232ms step_avg:72.67ms +[2025-09-02 07:49:51] [Rank 0] step:3821/10000 train_time:277632ms step_avg:72.66ms +[2025-09-02 07:49:51] [Rank 0] step:3821/10000 train_time:277632ms step_avg:72.66ms +[2025-09-02 07:49:53] [Rank 0] step:3841/10000 train_time:279158ms step_avg:72.68ms +[2025-09-02 07:49:53] [Rank 0] step:3841/10000 train_time:279158ms step_avg:72.68ms +[2025-09-02 07:49:54] [Rank 0] step:3861/10000 train_time:280684ms step_avg:72.70ms +[2025-09-02 07:49:54] [Rank 0] step:3861/10000 train_time:280684ms step_avg:72.70ms +[2025-09-02 07:49:56] [Rank 0] step:3881/10000 train_time:282208ms step_avg:72.72ms +[2025-09-02 07:49:56] [Rank 0] step:3881/10000 train_time:282208ms step_avg:72.72ms +[2025-09-02 07:49:57] [Rank 0] step:3901/10000 train_time:283735ms step_avg:72.73ms +[2025-09-02 07:49:57] [Rank 0] step:3901/10000 train_time:283735ms step_avg:72.73ms +[2025-09-02 07:49:59] [Rank 0] step:3921/10000 train_time:285261ms step_avg:72.75ms +[2025-09-02 07:49:59] [Rank 0] step:3921/10000 train_time:285261ms step_avg:72.75ms +[2025-09-02 07:50:00] [Rank 0] step:3941/10000 train_time:286788ms step_avg:72.77ms +[2025-09-02 07:50:00] [Rank 0] step:3941/10000 train_time:286788ms step_avg:72.77ms +[2025-09-02 07:50:02] [Rank 0] step:3961/10000 train_time:288312ms step_avg:72.79ms +[2025-09-02 07:50:02] [Rank 0] step:3961/10000 train_time:288312ms step_avg:72.79ms +[2025-09-02 07:50:04] [Rank 0] step:3981/10000 train_time:289839ms step_avg:72.81ms +[2025-09-02 07:50:04] [Rank 0] step:3981/10000 train_time:289839ms step_avg:72.81ms +[2025-09-02 07:50:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:50:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:50:17] [Rank 0] PRINT: step:4000/10000 val_loss:4.2488 svd_entropy: attn_qk:H=0.7117,top10E=0.31,eRank=121.8,q75/q25=96.93 attn_vo:H=0.8005,top10E=0.18,eRank=233.0,q75/q25=90.47 mlp_w1:H=0.6927,top10E=0.38,eRank=126.9,q75/q25=10.94 mlp_w2:H=0.8114,top10E=0.17,eRank=232.2,q75/q25=44.57 vo_prod:H=0.7038,top10E=0.29,eRank=113.7,q75/q25=10069.75 train_time:291516ms step_avg:72.88ms +[2025-09-02 07:50:17] [Rank 0] PRINT: step:4000/10000 val_loss:4.2488 svd_entropy: attn_qk:H=0.7117,top10E=0.31,eRank=121.8,q75/q25=96.93 attn_vo:H=0.8005,top10E=0.18,eRank=233.0,q75/q25=90.47 mlp_w1:H=0.6927,top10E=0.38,eRank=126.9,q75/q25=10.94 mlp_w2:H=0.8114,top10E=0.17,eRank=232.2,q75/q25=44.57 vo_prod:H=0.7038,top10E=0.29,eRank=113.7,q75/q25=10069.75 train_time:291516ms step_avg:72.88ms +[2025-09-02 07:50:17] [Rank 0] step:4001/10000 train_time:291528ms step_avg:72.86ms +[2025-09-02 07:50:17] [Rank 0] step:4001/10000 train_time:291528ms step_avg:72.86ms +[2025-09-02 07:50:18] [Rank 0] step:4021/10000 train_time:292910ms step_avg:72.85ms +[2025-09-02 07:50:18] [Rank 0] step:4021/10000 train_time:292910ms step_avg:72.85ms +[2025-09-02 07:50:20] [Rank 0] step:4041/10000 train_time:294433ms step_avg:72.86ms +[2025-09-02 07:50:20] [Rank 0] step:4041/10000 train_time:294433ms step_avg:72.86ms +[2025-09-02 07:50:21] [Rank 0] step:4061/10000 train_time:295955ms step_avg:72.88ms +[2025-09-02 07:50:21] [Rank 0] step:4061/10000 train_time:295955ms step_avg:72.88ms +[2025-09-02 07:50:23] [Rank 0] step:4081/10000 train_time:297597ms step_avg:72.92ms +[2025-09-02 07:50:23] [Rank 0] step:4081/10000 train_time:297597ms step_avg:72.92ms +[2025-09-02 07:50:25] [Rank 0] step:4101/10000 train_time:299122ms step_avg:72.94ms +[2025-09-02 07:50:25] [Rank 0] step:4101/10000 train_time:299122ms step_avg:72.94ms +[2025-09-02 07:50:26] [Rank 0] step:4121/10000 train_time:300645ms step_avg:72.95ms +[2025-09-02 07:50:26] [Rank 0] step:4121/10000 train_time:300645ms step_avg:72.95ms +[2025-09-02 07:50:28] [Rank 0] step:4141/10000 train_time:302169ms step_avg:72.97ms +[2025-09-02 07:50:28] [Rank 0] step:4141/10000 train_time:302169ms step_avg:72.97ms +[2025-09-02 07:50:29] [Rank 0] step:4161/10000 train_time:303692ms step_avg:72.99ms +[2025-09-02 07:50:29] [Rank 0] step:4161/10000 train_time:303692ms step_avg:72.99ms +[2025-09-02 07:50:31] [Rank 0] step:4181/10000 train_time:305217ms step_avg:73.00ms +[2025-09-02 07:50:31] [Rank 0] step:4181/10000 train_time:305217ms step_avg:73.00ms +[2025-09-02 07:50:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:50:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:50:44] [Rank 0] PRINT: step:4200/10000 val_loss:4.2304 svd_entropy: attn_qk:H=0.7153,top10E=0.31,eRank=124.3,q75/q25=99.89 attn_vo:H=0.8037,top10E=0.18,eRank=237.2,q75/q25=88.48 mlp_w1:H=0.6973,top10E=0.38,eRank=129.9,q75/q25=11.38 mlp_w2:H=0.8137,top10E=0.17,eRank=236.1,q75/q25=45.61 vo_prod:H=0.7075,top10E=0.28,eRank=116.6,q75/q25=9270.97 train_time:306892ms step_avg:73.07ms +[2025-09-02 07:50:44] [Rank 0] PRINT: step:4200/10000 val_loss:4.2304 svd_entropy: attn_qk:H=0.7153,top10E=0.31,eRank=124.3,q75/q25=99.89 attn_vo:H=0.8037,top10E=0.18,eRank=237.2,q75/q25=88.48 mlp_w1:H=0.6973,top10E=0.38,eRank=129.9,q75/q25=11.38 mlp_w2:H=0.8137,top10E=0.17,eRank=236.1,q75/q25=45.61 vo_prod:H=0.7075,top10E=0.28,eRank=116.6,q75/q25=9270.97 train_time:306892ms step_avg:73.07ms +[2025-09-02 07:50:44] [Rank 0] step:4201/10000 train_time:306904ms step_avg:73.05ms +[2025-09-02 07:50:44] [Rank 0] step:4201/10000 train_time:306904ms step_avg:73.05ms +[2025-09-02 07:50:45] [Rank 0] step:4221/10000 train_time:308292ms step_avg:73.04ms +[2025-09-02 07:50:45] [Rank 0] step:4221/10000 train_time:308292ms step_avg:73.04ms +[2025-09-02 07:50:47] [Rank 0] step:4241/10000 train_time:309814ms step_avg:73.05ms +[2025-09-02 07:50:47] [Rank 0] step:4241/10000 train_time:309814ms step_avg:73.05ms +[2025-09-02 07:50:48] [Rank 0] step:4261/10000 train_time:311336ms step_avg:73.07ms +[2025-09-02 07:50:48] [Rank 0] step:4261/10000 train_time:311336ms step_avg:73.07ms +[2025-09-02 07:50:50] [Rank 0] step:4281/10000 train_time:312858ms step_avg:73.08ms +[2025-09-02 07:50:50] [Rank 0] step:4281/10000 train_time:312858ms step_avg:73.08ms +[2025-09-02 07:50:52] [Rank 0] step:4301/10000 train_time:314382ms step_avg:73.10ms +[2025-09-02 07:50:52] [Rank 0] step:4301/10000 train_time:314382ms step_avg:73.10ms +[2025-09-02 07:50:53] [Rank 0] step:4321/10000 train_time:315907ms step_avg:73.11ms +[2025-09-02 07:50:53] [Rank 0] step:4321/10000 train_time:315907ms step_avg:73.11ms +[2025-09-02 07:50:55] [Rank 0] step:4341/10000 train_time:317427ms step_avg:73.12ms +[2025-09-02 07:50:55] [Rank 0] step:4341/10000 train_time:317427ms step_avg:73.12ms +[2025-09-02 07:50:56] [Rank 0] step:4361/10000 train_time:318952ms step_avg:73.14ms +[2025-09-02 07:50:56] [Rank 0] step:4361/10000 train_time:318952ms step_avg:73.14ms +[2025-09-02 07:50:58] [Rank 0] step:4381/10000 train_time:320473ms step_avg:73.15ms +[2025-09-02 07:50:58] [Rank 0] step:4381/10000 train_time:320473ms step_avg:73.15ms +[2025-09-02 07:50:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:50:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:51:11] [Rank 0] PRINT: step:4400/10000 val_loss:4.2066 svd_entropy: attn_qk:H=0.7186,top10E=0.30,eRank=126.6,q75/q25=102.38 attn_vo:H=0.8067,top10E=0.17,eRank=241.2,q75/q25=85.82 mlp_w1:H=0.7017,top10E=0.37,eRank=132.9,q75/q25=11.89 mlp_w2:H=0.8157,top10E=0.16,eRank=239.5,q75/q25=46.56 vo_prod:H=0.7113,top10E=0.28,eRank=119.6,q75/q25=8669.71 train_time:322149ms step_avg:73.22ms +[2025-09-02 07:51:11] [Rank 0] PRINT: step:4400/10000 val_loss:4.2066 svd_entropy: attn_qk:H=0.7186,top10E=0.30,eRank=126.6,q75/q25=102.38 attn_vo:H=0.8067,top10E=0.17,eRank=241.2,q75/q25=85.82 mlp_w1:H=0.7017,top10E=0.37,eRank=132.9,q75/q25=11.89 mlp_w2:H=0.8157,top10E=0.16,eRank=239.5,q75/q25=46.56 vo_prod:H=0.7113,top10E=0.28,eRank=119.6,q75/q25=8669.71 train_time:322149ms step_avg:73.22ms +[2025-09-02 07:51:11] [Rank 0] step:4401/10000 train_time:322160ms step_avg:73.20ms +[2025-09-02 07:51:11] [Rank 0] step:4401/10000 train_time:322160ms step_avg:73.20ms +[2025-09-02 07:51:13] [Rank 0] step:4421/10000 train_time:323539ms step_avg:73.18ms +[2025-09-02 07:51:13] [Rank 0] step:4421/10000 train_time:323539ms step_avg:73.18ms +[2025-09-02 07:51:14] [Rank 0] step:4441/10000 train_time:325127ms step_avg:73.21ms +[2025-09-02 07:51:14] [Rank 0] step:4441/10000 train_time:325127ms step_avg:73.21ms +[2025-09-02 07:51:16] [Rank 0] step:4461/10000 train_time:326654ms step_avg:73.22ms +[2025-09-02 07:51:16] [Rank 0] step:4461/10000 train_time:326654ms step_avg:73.22ms +[2025-09-02 07:51:17] [Rank 0] step:4481/10000 train_time:328181ms step_avg:73.24ms +[2025-09-02 07:51:17] [Rank 0] step:4481/10000 train_time:328181ms step_avg:73.24ms +[2025-09-02 07:51:19] [Rank 0] step:4501/10000 train_time:329710ms step_avg:73.25ms +[2025-09-02 07:51:19] [Rank 0] step:4501/10000 train_time:329710ms step_avg:73.25ms +[2025-09-02 07:51:20] [Rank 0] step:4521/10000 train_time:331235ms step_avg:73.27ms +[2025-09-02 07:51:20] [Rank 0] step:4521/10000 train_time:331235ms step_avg:73.27ms +[2025-09-02 07:51:22] [Rank 0] step:4541/10000 train_time:332763ms step_avg:73.28ms +[2025-09-02 07:51:22] [Rank 0] step:4541/10000 train_time:332763ms step_avg:73.28ms +[2025-09-02 07:51:23] [Rank 0] step:4561/10000 train_time:334292ms step_avg:73.29ms +[2025-09-02 07:51:23] [Rank 0] step:4561/10000 train_time:334292ms step_avg:73.29ms +[2025-09-02 07:51:25] [Rank 0] step:4581/10000 train_time:335821ms step_avg:73.31ms +[2025-09-02 07:51:25] [Rank 0] step:4581/10000 train_time:335821ms step_avg:73.31ms +[2025-09-02 07:51:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:51:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:51:38] [Rank 0] PRINT: step:4600/10000 val_loss:4.1780 svd_entropy: attn_qk:H=0.7217,top10E=0.30,eRank=129.0,q75/q25=104.10 attn_vo:H=0.8097,top10E=0.17,eRank=245.3,q75/q25=82.81 mlp_w1:H=0.7058,top10E=0.36,eRank=135.8,q75/q25=12.37 mlp_w2:H=0.8176,top10E=0.16,eRank=242.9,q75/q25=47.64 vo_prod:H=0.7151,top10E=0.27,eRank=122.7,q75/q25=8015.18 train_time:337504ms step_avg:73.37ms +[2025-09-02 07:51:38] [Rank 0] PRINT: step:4600/10000 val_loss:4.1780 svd_entropy: attn_qk:H=0.7217,top10E=0.30,eRank=129.0,q75/q25=104.10 attn_vo:H=0.8097,top10E=0.17,eRank=245.3,q75/q25=82.81 mlp_w1:H=0.7058,top10E=0.36,eRank=135.8,q75/q25=12.37 mlp_w2:H=0.8176,top10E=0.16,eRank=242.9,q75/q25=47.64 vo_prod:H=0.7151,top10E=0.27,eRank=122.7,q75/q25=8015.18 train_time:337504ms step_avg:73.37ms +[2025-09-02 07:51:38] [Rank 0] step:4601/10000 train_time:337516ms step_avg:73.36ms +[2025-09-02 07:51:38] [Rank 0] step:4601/10000 train_time:337516ms step_avg:73.36ms +[2025-09-02 07:51:40] [Rank 0] step:4621/10000 train_time:338904ms step_avg:73.34ms +[2025-09-02 07:51:40] [Rank 0] step:4621/10000 train_time:338904ms step_avg:73.34ms +[2025-09-02 07:51:41] [Rank 0] step:4641/10000 train_time:340430ms step_avg:73.35ms +[2025-09-02 07:51:41] [Rank 0] step:4641/10000 train_time:340430ms step_avg:73.35ms +[2025-09-02 07:51:43] [Rank 0] step:4661/10000 train_time:341958ms step_avg:73.37ms +[2025-09-02 07:51:43] [Rank 0] step:4661/10000 train_time:341958ms step_avg:73.37ms +[2025-09-02 07:51:44] [Rank 0] step:4681/10000 train_time:343486ms step_avg:73.38ms +[2025-09-02 07:51:44] [Rank 0] step:4681/10000 train_time:343486ms step_avg:73.38ms +[2025-09-02 07:51:46] [Rank 0] step:4701/10000 train_time:345016ms step_avg:73.39ms +[2025-09-02 07:51:46] [Rank 0] step:4701/10000 train_time:345016ms step_avg:73.39ms +[2025-09-02 07:51:47] [Rank 0] step:4721/10000 train_time:346544ms step_avg:73.40ms +[2025-09-02 07:51:47] [Rank 0] step:4721/10000 train_time:346544ms step_avg:73.40ms +[2025-09-02 07:51:49] [Rank 0] step:4741/10000 train_time:348073ms step_avg:73.42ms +[2025-09-02 07:51:49] [Rank 0] step:4741/10000 train_time:348073ms step_avg:73.42ms +[2025-09-02 07:51:50] [Rank 0] step:4761/10000 train_time:349604ms step_avg:73.43ms +[2025-09-02 07:51:50] [Rank 0] step:4761/10000 train_time:349604ms step_avg:73.43ms +[2025-09-02 07:51:52] [Rank 0] step:4781/10000 train_time:351132ms step_avg:73.44ms +[2025-09-02 07:51:52] [Rank 0] step:4781/10000 train_time:351132ms step_avg:73.44ms +[2025-09-02 07:51:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:51:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:52:05] [Rank 0] PRINT: step:4800/10000 val_loss:4.1652 svd_entropy: attn_qk:H=0.7248,top10E=0.29,eRank=131.3,q75/q25=106.40 attn_vo:H=0.8125,top10E=0.17,eRank=249.2,q75/q25=81.05 mlp_w1:H=0.7097,top10E=0.36,eRank=138.6,q75/q25=12.85 mlp_w2:H=0.8193,top10E=0.16,eRank=246.0,q75/q25=47.74 vo_prod:H=0.7186,top10E=0.27,eRank=125.6,q75/q25=7398.01 train_time:352818ms step_avg:73.50ms +[2025-09-02 07:52:05] [Rank 0] PRINT: step:4800/10000 val_loss:4.1652 svd_entropy: attn_qk:H=0.7248,top10E=0.29,eRank=131.3,q75/q25=106.40 attn_vo:H=0.8125,top10E=0.17,eRank=249.2,q75/q25=81.05 mlp_w1:H=0.7097,top10E=0.36,eRank=138.6,q75/q25=12.85 mlp_w2:H=0.8193,top10E=0.16,eRank=246.0,q75/q25=47.74 vo_prod:H=0.7186,top10E=0.27,eRank=125.6,q75/q25=7398.01 train_time:352818ms step_avg:73.50ms +[2025-09-02 07:52:05] [Rank 0] step:4801/10000 train_time:352829ms step_avg:73.49ms +[2025-09-02 07:52:05] [Rank 0] step:4801/10000 train_time:352829ms step_avg:73.49ms +[2025-09-02 07:52:07] [Rank 0] step:4821/10000 train_time:354232ms step_avg:73.48ms +[2025-09-02 07:52:07] [Rank 0] step:4821/10000 train_time:354232ms step_avg:73.48ms +[2025-09-02 07:52:08] [Rank 0] step:4841/10000 train_time:355760ms step_avg:73.49ms +[2025-09-02 07:52:08] [Rank 0] step:4841/10000 train_time:355760ms step_avg:73.49ms +[2025-09-02 07:52:10] [Rank 0] step:4861/10000 train_time:357292ms step_avg:73.50ms +[2025-09-02 07:52:10] [Rank 0] step:4861/10000 train_time:357292ms step_avg:73.50ms +[2025-09-02 07:52:12] [Rank 0] step:4881/10000 train_time:358823ms step_avg:73.51ms +[2025-09-02 07:52:12] [Rank 0] step:4881/10000 train_time:358823ms step_avg:73.51ms +[2025-09-02 07:52:13] [Rank 0] step:4901/10000 train_time:360352ms step_avg:73.53ms +[2025-09-02 07:52:13] [Rank 0] step:4901/10000 train_time:360352ms step_avg:73.53ms +[2025-09-02 07:52:15] [Rank 0] step:4921/10000 train_time:361885ms step_avg:73.54ms +[2025-09-02 07:52:15] [Rank 0] step:4921/10000 train_time:361885ms step_avg:73.54ms +[2025-09-02 07:52:16] [Rank 0] step:4941/10000 train_time:363417ms step_avg:73.55ms +[2025-09-02 07:52:16] [Rank 0] step:4941/10000 train_time:363417ms step_avg:73.55ms +[2025-09-02 07:52:18] [Rank 0] step:4961/10000 train_time:364947ms step_avg:73.56ms +[2025-09-02 07:52:18] [Rank 0] step:4961/10000 train_time:364947ms step_avg:73.56ms +[2025-09-02 07:52:19] [Rank 0] step:4981/10000 train_time:366480ms step_avg:73.58ms +[2025-09-02 07:52:19] [Rank 0] step:4981/10000 train_time:366480ms step_avg:73.58ms +[2025-09-02 07:52:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:52:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:52:32] [Rank 0] PRINT: step:5000/10000 val_loss:4.1445 svd_entropy: attn_qk:H=0.7276,top10E=0.29,eRank=133.5,q75/q25=108.09 attn_vo:H=0.8150,top10E=0.17,eRank=252.7,q75/q25=78.39 mlp_w1:H=0.7133,top10E=0.35,eRank=141.3,q75/q25=13.33 mlp_w2:H=0.8210,top10E=0.16,eRank=249.1,q75/q25=48.28 vo_prod:H=0.7218,top10E=0.26,eRank=128.2,q75/q25=6859.14 train_time:368165ms step_avg:73.63ms +[2025-09-02 07:52:32] [Rank 0] PRINT: step:5000/10000 val_loss:4.1445 svd_entropy: attn_qk:H=0.7276,top10E=0.29,eRank=133.5,q75/q25=108.09 attn_vo:H=0.8150,top10E=0.17,eRank=252.7,q75/q25=78.39 mlp_w1:H=0.7133,top10E=0.35,eRank=141.3,q75/q25=13.33 mlp_w2:H=0.8210,top10E=0.16,eRank=249.1,q75/q25=48.28 vo_prod:H=0.7218,top10E=0.26,eRank=128.2,q75/q25=6859.14 train_time:368165ms step_avg:73.63ms +[2025-09-02 07:52:32] [Rank 0] step:5001/10000 train_time:368176ms step_avg:73.62ms +[2025-09-02 07:52:32] [Rank 0] step:5001/10000 train_time:368176ms step_avg:73.62ms +[2025-09-02 07:52:34] [Rank 0] step:5021/10000 train_time:369557ms step_avg:73.60ms +[2025-09-02 07:52:34] [Rank 0] step:5021/10000 train_time:369557ms step_avg:73.60ms +[2025-09-02 07:52:35] [Rank 0] step:5041/10000 train_time:371087ms step_avg:73.61ms +[2025-09-02 07:52:35] [Rank 0] step:5041/10000 train_time:371087ms step_avg:73.61ms +[2025-09-02 07:52:37] [Rank 0] step:5061/10000 train_time:372612ms step_avg:73.62ms +[2025-09-02 07:52:37] [Rank 0] step:5061/10000 train_time:372612ms step_avg:73.62ms +[2025-09-02 07:52:39] [Rank 0] step:5081/10000 train_time:374140ms step_avg:73.64ms +[2025-09-02 07:52:39] [Rank 0] step:5081/10000 train_time:374140ms step_avg:73.64ms +[2025-09-02 07:52:40] [Rank 0] step:5101/10000 train_time:375671ms step_avg:73.65ms +[2025-09-02 07:52:40] [Rank 0] step:5101/10000 train_time:375671ms step_avg:73.65ms +[2025-09-02 07:52:42] [Rank 0] step:5121/10000 train_time:377200ms step_avg:73.66ms +[2025-09-02 07:52:42] [Rank 0] step:5121/10000 train_time:377200ms step_avg:73.66ms +[2025-09-02 07:52:43] [Rank 0] step:5141/10000 train_time:378732ms step_avg:73.67ms +[2025-09-02 07:52:43] [Rank 0] step:5141/10000 train_time:378732ms step_avg:73.67ms +[2025-09-02 07:52:45] [Rank 0] step:5161/10000 train_time:380261ms step_avg:73.68ms +[2025-09-02 07:52:45] [Rank 0] step:5161/10000 train_time:380261ms step_avg:73.68ms +[2025-09-02 07:52:46] [Rank 0] step:5181/10000 train_time:381792ms step_avg:73.69ms +[2025-09-02 07:52:46] [Rank 0] step:5181/10000 train_time:381792ms step_avg:73.69ms +[2025-09-02 07:52:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:52:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:52:59] [Rank 0] PRINT: step:5200/10000 val_loss:4.1235 svd_entropy: attn_qk:H=0.7303,top10E=0.28,eRank=135.7,q75/q25=109.16 attn_vo:H=0.8174,top10E=0.16,eRank=256.2,q75/q25=75.89 mlp_w1:H=0.7166,top10E=0.35,eRank=143.8,q75/q25=13.79 mlp_w2:H=0.8225,top10E=0.15,eRank=251.7,q75/q25=49.54 vo_prod:H=0.7247,top10E=0.26,eRank=130.7,q75/q25=6132.64 train_time:383499ms step_avg:73.75ms +[2025-09-02 07:52:59] [Rank 0] PRINT: step:5200/10000 val_loss:4.1235 svd_entropy: attn_qk:H=0.7303,top10E=0.28,eRank=135.7,q75/q25=109.16 attn_vo:H=0.8174,top10E=0.16,eRank=256.2,q75/q25=75.89 mlp_w1:H=0.7166,top10E=0.35,eRank=143.8,q75/q25=13.79 mlp_w2:H=0.8225,top10E=0.15,eRank=251.7,q75/q25=49.54 vo_prod:H=0.7247,top10E=0.26,eRank=130.7,q75/q25=6132.64 train_time:383499ms step_avg:73.75ms +[2025-09-02 07:52:59] [Rank 0] step:5201/10000 train_time:383510ms step_avg:73.74ms +[2025-09-02 07:52:59] [Rank 0] step:5201/10000 train_time:383510ms step_avg:73.74ms +[2025-09-02 07:53:01] [Rank 0] step:5221/10000 train_time:384934ms step_avg:73.73ms +[2025-09-02 07:53:01] [Rank 0] step:5221/10000 train_time:384934ms step_avg:73.73ms +[2025-09-02 07:53:02] [Rank 0] step:5241/10000 train_time:386493ms step_avg:73.74ms +[2025-09-02 07:53:02] [Rank 0] step:5241/10000 train_time:386493ms step_avg:73.74ms +[2025-09-02 07:53:04] [Rank 0] step:5261/10000 train_time:388054ms step_avg:73.76ms +[2025-09-02 07:53:04] [Rank 0] step:5261/10000 train_time:388054ms step_avg:73.76ms +[2025-09-02 07:53:06] [Rank 0] step:5281/10000 train_time:389617ms step_avg:73.78ms +[2025-09-02 07:53:06] [Rank 0] step:5281/10000 train_time:389617ms step_avg:73.78ms +[2025-09-02 07:53:07] [Rank 0] step:5301/10000 train_time:391186ms step_avg:73.79ms +[2025-09-02 07:53:07] [Rank 0] step:5301/10000 train_time:391186ms step_avg:73.79ms +[2025-09-02 07:53:09] [Rank 0] step:5321/10000 train_time:392746ms step_avg:73.81ms +[2025-09-02 07:53:09] [Rank 0] step:5321/10000 train_time:392746ms step_avg:73.81ms +[2025-09-02 07:53:10] [Rank 0] step:5341/10000 train_time:394306ms step_avg:73.83ms +[2025-09-02 07:53:10] [Rank 0] step:5341/10000 train_time:394306ms step_avg:73.83ms +[2025-09-02 07:53:12] [Rank 0] step:5361/10000 train_time:395872ms step_avg:73.84ms +[2025-09-02 07:53:12] [Rank 0] step:5361/10000 train_time:395872ms step_avg:73.84ms +[2025-09-02 07:53:13] [Rank 0] step:5381/10000 train_time:397436ms step_avg:73.86ms +[2025-09-02 07:53:13] [Rank 0] step:5381/10000 train_time:397436ms step_avg:73.86ms +[2025-09-02 07:53:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:53:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:53:27] [Rank 0] PRINT: step:5400/10000 val_loss:4.1064 svd_entropy: attn_qk:H=0.7327,top10E=0.28,eRank=137.6,q75/q25=110.30 attn_vo:H=0.8197,top10E=0.16,eRank=259.4,q75/q25=73.79 mlp_w1:H=0.7200,top10E=0.34,eRank=146.5,q75/q25=14.23 mlp_w2:H=0.8238,top10E=0.15,eRank=254.3,q75/q25=50.43 vo_prod:H=0.7275,top10E=0.26,eRank=133.1,q75/q25=5706.39 train_time:399153ms step_avg:73.92ms +[2025-09-02 07:53:27] [Rank 0] PRINT: step:5400/10000 val_loss:4.1064 svd_entropy: attn_qk:H=0.7327,top10E=0.28,eRank=137.6,q75/q25=110.30 attn_vo:H=0.8197,top10E=0.16,eRank=259.4,q75/q25=73.79 mlp_w1:H=0.7200,top10E=0.34,eRank=146.5,q75/q25=14.23 mlp_w2:H=0.8238,top10E=0.15,eRank=254.3,q75/q25=50.43 vo_prod:H=0.7275,top10E=0.26,eRank=133.1,q75/q25=5706.39 train_time:399153ms step_avg:73.92ms +[2025-09-02 07:53:27] [Rank 0] step:5401/10000 train_time:399165ms step_avg:73.91ms +[2025-09-02 07:53:27] [Rank 0] step:5401/10000 train_time:399165ms step_avg:73.91ms +[2025-09-02 07:53:28] [Rank 0] step:5421/10000 train_time:400585ms step_avg:73.89ms +[2025-09-02 07:53:28] [Rank 0] step:5421/10000 train_time:400585ms step_avg:73.89ms +[2025-09-02 07:53:30] [Rank 0] step:5441/10000 train_time:402140ms step_avg:73.91ms +[2025-09-02 07:53:30] [Rank 0] step:5441/10000 train_time:402140ms step_avg:73.91ms +[2025-09-02 07:53:32] [Rank 0] step:5461/10000 train_time:403702ms step_avg:73.92ms +[2025-09-02 07:53:32] [Rank 0] step:5461/10000 train_time:403702ms step_avg:73.92ms +[2025-09-02 07:53:33] [Rank 0] step:5481/10000 train_time:405265ms step_avg:73.94ms +[2025-09-02 07:53:33] [Rank 0] step:5481/10000 train_time:405265ms step_avg:73.94ms +[2025-09-02 07:53:35] [Rank 0] step:5501/10000 train_time:406831ms step_avg:73.96ms +[2025-09-02 07:53:35] [Rank 0] step:5501/10000 train_time:406831ms step_avg:73.96ms +[2025-09-02 07:53:36] [Rank 0] step:5521/10000 train_time:408399ms step_avg:73.97ms +[2025-09-02 07:53:36] [Rank 0] step:5521/10000 train_time:408399ms step_avg:73.97ms +[2025-09-02 07:53:38] [Rank 0] step:5541/10000 train_time:409961ms step_avg:73.99ms +[2025-09-02 07:53:38] [Rank 0] step:5541/10000 train_time:409961ms step_avg:73.99ms +[2025-09-02 07:53:39] [Rank 0] step:5561/10000 train_time:411522ms step_avg:74.00ms +[2025-09-02 07:53:39] [Rank 0] step:5561/10000 train_time:411522ms step_avg:74.00ms +[2025-09-02 07:53:41] [Rank 0] step:5581/10000 train_time:413086ms step_avg:74.02ms +[2025-09-02 07:53:41] [Rank 0] step:5581/10000 train_time:413086ms step_avg:74.02ms +[2025-09-02 07:53:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:53:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:53:54] [Rank 0] PRINT: step:5600/10000 val_loss:4.0936 svd_entropy: attn_qk:H=0.7351,top10E=0.28,eRank=139.6,q75/q25=110.86 attn_vo:H=0.8218,top10E=0.16,eRank=262.6,q75/q25=71.36 mlp_w1:H=0.7233,top10E=0.34,eRank=149.1,q75/q25=14.74 mlp_w2:H=0.8252,top10E=0.15,eRank=256.9,q75/q25=50.66 vo_prod:H=0.7302,top10E=0.25,eRank=135.5,q75/q25=5254.58 train_time:414805ms step_avg:74.07ms +[2025-09-02 07:53:54] [Rank 0] PRINT: step:5600/10000 val_loss:4.0936 svd_entropy: attn_qk:H=0.7351,top10E=0.28,eRank=139.6,q75/q25=110.86 attn_vo:H=0.8218,top10E=0.16,eRank=262.6,q75/q25=71.36 mlp_w1:H=0.7233,top10E=0.34,eRank=149.1,q75/q25=14.74 mlp_w2:H=0.8252,top10E=0.15,eRank=256.9,q75/q25=50.66 vo_prod:H=0.7302,top10E=0.25,eRank=135.5,q75/q25=5254.58 train_time:414805ms step_avg:74.07ms +[2025-09-02 07:53:54] [Rank 0] step:5601/10000 train_time:414816ms step_avg:74.06ms +[2025-09-02 07:53:54] [Rank 0] step:5601/10000 train_time:414816ms step_avg:74.06ms +[2025-09-02 07:53:56] [Rank 0] step:5621/10000 train_time:416231ms step_avg:74.05ms +[2025-09-02 07:53:56] [Rank 0] step:5621/10000 train_time:416231ms step_avg:74.05ms +[2025-09-02 07:53:57] [Rank 0] step:5641/10000 train_time:417791ms step_avg:74.06ms +[2025-09-02 07:53:57] [Rank 0] step:5641/10000 train_time:417791ms step_avg:74.06ms +[2025-09-02 07:53:59] [Rank 0] step:5661/10000 train_time:419351ms step_avg:74.08ms +[2025-09-02 07:53:59] [Rank 0] step:5661/10000 train_time:419351ms step_avg:74.08ms +[2025-09-02 07:54:01] [Rank 0] step:5681/10000 train_time:420916ms step_avg:74.09ms +[2025-09-02 07:54:01] [Rank 0] step:5681/10000 train_time:420916ms step_avg:74.09ms +[2025-09-02 07:54:02] [Rank 0] step:5701/10000 train_time:422476ms step_avg:74.11ms +[2025-09-02 07:54:02] [Rank 0] step:5701/10000 train_time:422476ms step_avg:74.11ms +[2025-09-02 07:54:04] [Rank 0] step:5721/10000 train_time:424039ms step_avg:74.12ms +[2025-09-02 07:54:04] [Rank 0] step:5721/10000 train_time:424039ms step_avg:74.12ms +[2025-09-02 07:54:05] [Rank 0] step:5741/10000 train_time:425601ms step_avg:74.13ms +[2025-09-02 07:54:05] [Rank 0] step:5741/10000 train_time:425601ms step_avg:74.13ms +[2025-09-02 07:54:07] [Rank 0] step:5761/10000 train_time:427164ms step_avg:74.15ms +[2025-09-02 07:54:07] [Rank 0] step:5761/10000 train_time:427164ms step_avg:74.15ms +[2025-09-02 07:54:08] [Rank 0] step:5781/10000 train_time:428728ms step_avg:74.16ms +[2025-09-02 07:54:08] [Rank 0] step:5781/10000 train_time:428728ms step_avg:74.16ms +[2025-09-02 07:54:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:54:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:54:22] [Rank 0] PRINT: step:5800/10000 val_loss:4.0841 svd_entropy: attn_qk:H=0.7374,top10E=0.28,eRank=141.5,q75/q25=111.31 attn_vo:H=0.8238,top10E=0.16,eRank=265.5,q75/q25=69.12 mlp_w1:H=0.7262,top10E=0.33,eRank=151.4,q75/q25=15.27 mlp_w2:H=0.8264,top10E=0.15,eRank=259.2,q75/q25=51.75 vo_prod:H=0.7328,top10E=0.25,eRank=137.8,q75/q25=4775.77 train_time:430450ms step_avg:74.22ms +[2025-09-02 07:54:22] [Rank 0] PRINT: step:5800/10000 val_loss:4.0841 svd_entropy: attn_qk:H=0.7374,top10E=0.28,eRank=141.5,q75/q25=111.31 attn_vo:H=0.8238,top10E=0.16,eRank=265.5,q75/q25=69.12 mlp_w1:H=0.7262,top10E=0.33,eRank=151.4,q75/q25=15.27 mlp_w2:H=0.8264,top10E=0.15,eRank=259.2,q75/q25=51.75 vo_prod:H=0.7328,top10E=0.25,eRank=137.8,q75/q25=4775.77 train_time:430450ms step_avg:74.22ms +[2025-09-02 07:54:22] [Rank 0] step:5801/10000 train_time:430461ms step_avg:74.20ms +[2025-09-02 07:54:22] [Rank 0] step:5801/10000 train_time:430461ms step_avg:74.20ms +[2025-09-02 07:54:23] [Rank 0] step:5821/10000 train_time:431870ms step_avg:74.19ms +[2025-09-02 07:54:23] [Rank 0] step:5821/10000 train_time:431870ms step_avg:74.19ms +[2025-09-02 07:54:25] [Rank 0] step:5841/10000 train_time:433433ms step_avg:74.21ms +[2025-09-02 07:54:25] [Rank 0] step:5841/10000 train_time:433433ms step_avg:74.21ms +[2025-09-02 07:54:27] [Rank 0] step:5861/10000 train_time:434998ms step_avg:74.22ms +[2025-09-02 07:54:27] [Rank 0] step:5861/10000 train_time:434998ms step_avg:74.22ms +[2025-09-02 07:54:28] [Rank 0] step:5881/10000 train_time:436563ms step_avg:74.23ms +[2025-09-02 07:54:28] [Rank 0] step:5881/10000 train_time:436563ms step_avg:74.23ms +[2025-09-02 07:54:30] [Rank 0] step:5901/10000 train_time:438127ms step_avg:74.25ms +[2025-09-02 07:54:30] [Rank 0] step:5901/10000 train_time:438127ms step_avg:74.25ms +[2025-09-02 07:54:31] [Rank 0] step:5921/10000 train_time:439693ms step_avg:74.26ms +[2025-09-02 07:54:31] [Rank 0] step:5921/10000 train_time:439693ms step_avg:74.26ms +[2025-09-02 07:54:33] [Rank 0] step:5941/10000 train_time:441260ms step_avg:74.27ms +[2025-09-02 07:54:33] [Rank 0] step:5941/10000 train_time:441260ms step_avg:74.27ms +[2025-09-02 07:54:34] [Rank 0] step:5961/10000 train_time:442829ms step_avg:74.29ms +[2025-09-02 07:54:34] [Rank 0] step:5961/10000 train_time:442829ms step_avg:74.29ms +[2025-09-02 07:54:36] [Rank 0] step:5981/10000 train_time:444396ms step_avg:74.30ms +[2025-09-02 07:54:36] [Rank 0] step:5981/10000 train_time:444396ms step_avg:74.30ms +[2025-09-02 07:54:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:54:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:54:49] [Rank 0] PRINT: step:6000/10000 val_loss:4.0595 svd_entropy: attn_qk:H=0.7396,top10E=0.27,eRank=143.3,q75/q25=111.49 attn_vo:H=0.8258,top10E=0.15,eRank=268.5,q75/q25=67.14 mlp_w1:H=0.7290,top10E=0.33,eRank=153.8,q75/q25=15.81 mlp_w2:H=0.8275,top10E=0.15,eRank=261.3,q75/q25=52.82 vo_prod:H=0.7355,top10E=0.25,eRank=140.3,q75/q25=4407.26 train_time:446117ms step_avg:74.35ms +[2025-09-02 07:54:49] [Rank 0] PRINT: step:6000/10000 val_loss:4.0595 svd_entropy: attn_qk:H=0.7396,top10E=0.27,eRank=143.3,q75/q25=111.49 attn_vo:H=0.8258,top10E=0.15,eRank=268.5,q75/q25=67.14 mlp_w1:H=0.7290,top10E=0.33,eRank=153.8,q75/q25=15.81 mlp_w2:H=0.8275,top10E=0.15,eRank=261.3,q75/q25=52.82 vo_prod:H=0.7355,top10E=0.25,eRank=140.3,q75/q25=4407.26 train_time:446117ms step_avg:74.35ms +[2025-09-02 07:54:49] [Rank 0] step:6001/10000 train_time:446128ms step_avg:74.34ms +[2025-09-02 07:54:49] [Rank 0] step:6001/10000 train_time:446128ms step_avg:74.34ms +[2025-09-02 07:54:51] [Rank 0] step:6021/10000 train_time:447553ms step_avg:74.33ms +[2025-09-02 07:54:51] [Rank 0] step:6021/10000 train_time:447553ms step_avg:74.33ms +[2025-09-02 07:54:53] [Rank 0] step:6041/10000 train_time:449118ms step_avg:74.34ms +[2025-09-02 07:54:53] [Rank 0] step:6041/10000 train_time:449118ms step_avg:74.34ms +[2025-09-02 07:54:54] [Rank 0] step:6061/10000 train_time:450690ms step_avg:74.36ms +[2025-09-02 07:54:54] [Rank 0] step:6061/10000 train_time:450690ms step_avg:74.36ms +[2025-09-02 07:54:56] [Rank 0] step:6081/10000 train_time:452257ms step_avg:74.37ms +[2025-09-02 07:54:56] [Rank 0] step:6081/10000 train_time:452257ms step_avg:74.37ms +[2025-09-02 07:54:57] [Rank 0] step:6101/10000 train_time:453825ms step_avg:74.39ms +[2025-09-02 07:54:57] [Rank 0] step:6101/10000 train_time:453825ms step_avg:74.39ms +[2025-09-02 07:54:59] [Rank 0] step:6121/10000 train_time:455651ms step_avg:74.44ms +[2025-09-02 07:54:59] [Rank 0] step:6121/10000 train_time:455651ms step_avg:74.44ms +[2025-09-02 07:55:01] [Rank 0] step:6141/10000 train_time:457228ms step_avg:74.45ms +[2025-09-02 07:55:01] [Rank 0] step:6141/10000 train_time:457228ms step_avg:74.45ms +[2025-09-02 07:55:02] [Rank 0] step:6161/10000 train_time:458794ms step_avg:74.47ms +[2025-09-02 07:55:02] [Rank 0] step:6161/10000 train_time:458794ms step_avg:74.47ms +[2025-09-02 07:55:04] [Rank 0] step:6181/10000 train_time:460360ms step_avg:74.48ms +[2025-09-02 07:55:04] [Rank 0] step:6181/10000 train_time:460360ms step_avg:74.48ms +[2025-09-02 07:55:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:55:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:55:17] [Rank 0] PRINT: step:6200/10000 val_loss:4.0454 svd_entropy: attn_qk:H=0.7417,top10E=0.27,eRank=145.1,q75/q25=112.67 attn_vo:H=0.8277,top10E=0.15,eRank=271.3,q75/q25=65.03 mlp_w1:H=0.7315,top10E=0.33,eRank=156.0,q75/q25=16.24 mlp_w2:H=0.8286,top10E=0.15,eRank=263.5,q75/q25=53.47 vo_prod:H=0.7380,top10E=0.24,eRank=142.6,q75/q25=4028.47 train_time:462086ms step_avg:74.53ms +[2025-09-02 07:55:17] [Rank 0] PRINT: step:6200/10000 val_loss:4.0454 svd_entropy: attn_qk:H=0.7417,top10E=0.27,eRank=145.1,q75/q25=112.67 attn_vo:H=0.8277,top10E=0.15,eRank=271.3,q75/q25=65.03 mlp_w1:H=0.7315,top10E=0.33,eRank=156.0,q75/q25=16.24 mlp_w2:H=0.8286,top10E=0.15,eRank=263.5,q75/q25=53.47 vo_prod:H=0.7380,top10E=0.24,eRank=142.6,q75/q25=4028.47 train_time:462086ms step_avg:74.53ms +[2025-09-02 07:55:17] [Rank 0] step:6201/10000 train_time:462097ms step_avg:74.52ms +[2025-09-02 07:55:17] [Rank 0] step:6201/10000 train_time:462097ms step_avg:74.52ms +[2025-09-02 07:55:19] [Rank 0] step:6221/10000 train_time:463521ms step_avg:74.51ms +[2025-09-02 07:55:19] [Rank 0] step:6221/10000 train_time:463521ms step_avg:74.51ms +[2025-09-02 07:55:20] [Rank 0] step:6241/10000 train_time:465082ms step_avg:74.52ms +[2025-09-02 07:55:20] [Rank 0] step:6241/10000 train_time:465082ms step_avg:74.52ms +[2025-09-02 07:55:22] [Rank 0] step:6261/10000 train_time:466648ms step_avg:74.53ms +[2025-09-02 07:55:22] [Rank 0] step:6261/10000 train_time:466648ms step_avg:74.53ms +[2025-09-02 07:55:24] [Rank 0] step:6281/10000 train_time:468216ms step_avg:74.54ms +[2025-09-02 07:55:24] [Rank 0] step:6281/10000 train_time:468216ms step_avg:74.54ms +[2025-09-02 07:55:25] [Rank 0] step:6301/10000 train_time:469784ms step_avg:74.56ms +[2025-09-02 07:55:25] [Rank 0] step:6301/10000 train_time:469784ms step_avg:74.56ms +[2025-09-02 07:55:27] [Rank 0] step:6321/10000 train_time:471350ms step_avg:74.57ms +[2025-09-02 07:55:27] [Rank 0] step:6321/10000 train_time:471350ms step_avg:74.57ms +[2025-09-02 07:55:28] [Rank 0] step:6341/10000 train_time:472922ms step_avg:74.58ms +[2025-09-02 07:55:28] [Rank 0] step:6341/10000 train_time:472922ms step_avg:74.58ms +[2025-09-02 07:55:30] [Rank 0] step:6361/10000 train_time:474495ms step_avg:74.59ms +[2025-09-02 07:55:30] [Rank 0] step:6361/10000 train_time:474495ms step_avg:74.59ms +[2025-09-02 07:55:31] [Rank 0] step:6381/10000 train_time:476068ms step_avg:74.61ms +[2025-09-02 07:55:31] [Rank 0] step:6381/10000 train_time:476068ms step_avg:74.61ms +[2025-09-02 07:55:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:55:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:55:45] [Rank 0] PRINT: step:6400/10000 val_loss:4.0299 svd_entropy: attn_qk:H=0.7436,top10E=0.27,eRank=146.8,q75/q25=112.82 attn_vo:H=0.8293,top10E=0.15,eRank=273.8,q75/q25=63.24 mlp_w1:H=0.7339,top10E=0.32,eRank=158.1,q75/q25=16.75 mlp_w2:H=0.8294,top10E=0.14,eRank=265.2,q75/q25=54.03 vo_prod:H=0.7402,top10E=0.24,eRank=144.7,q75/q25=3724.86 train_time:477793ms step_avg:74.66ms +[2025-09-02 07:55:45] [Rank 0] PRINT: step:6400/10000 val_loss:4.0299 svd_entropy: attn_qk:H=0.7436,top10E=0.27,eRank=146.8,q75/q25=112.82 attn_vo:H=0.8293,top10E=0.15,eRank=273.8,q75/q25=63.24 mlp_w1:H=0.7339,top10E=0.32,eRank=158.1,q75/q25=16.75 mlp_w2:H=0.8294,top10E=0.14,eRank=265.2,q75/q25=54.03 vo_prod:H=0.7402,top10E=0.24,eRank=144.7,q75/q25=3724.86 train_time:477793ms step_avg:74.66ms +[2025-09-02 07:55:45] [Rank 0] step:6401/10000 train_time:477804ms step_avg:74.65ms +[2025-09-02 07:55:45] [Rank 0] step:6401/10000 train_time:477804ms step_avg:74.65ms +[2025-09-02 07:55:46] [Rank 0] step:6421/10000 train_time:479239ms step_avg:74.64ms +[2025-09-02 07:55:46] [Rank 0] step:6421/10000 train_time:479239ms step_avg:74.64ms +[2025-09-02 07:55:48] [Rank 0] step:6441/10000 train_time:480806ms step_avg:74.65ms +[2025-09-02 07:55:48] [Rank 0] step:6441/10000 train_time:480806ms step_avg:74.65ms +[2025-09-02 07:55:50] [Rank 0] step:6461/10000 train_time:482376ms step_avg:74.66ms +[2025-09-02 07:55:50] [Rank 0] step:6461/10000 train_time:482376ms step_avg:74.66ms +[2025-09-02 07:55:51] [Rank 0] step:6481/10000 train_time:483952ms step_avg:74.67ms +[2025-09-02 07:55:51] [Rank 0] step:6481/10000 train_time:483952ms step_avg:74.67ms +[2025-09-02 07:55:53] [Rank 0] step:6501/10000 train_time:485514ms step_avg:74.68ms +[2025-09-02 07:55:53] [Rank 0] step:6501/10000 train_time:485514ms step_avg:74.68ms +[2025-09-02 07:55:54] [Rank 0] step:6521/10000 train_time:487077ms step_avg:74.69ms +[2025-09-02 07:55:54] [Rank 0] step:6521/10000 train_time:487077ms step_avg:74.69ms +[2025-09-02 07:55:56] [Rank 0] step:6541/10000 train_time:488645ms step_avg:74.70ms +[2025-09-02 07:55:56] [Rank 0] step:6541/10000 train_time:488645ms step_avg:74.70ms +[2025-09-02 07:55:57] [Rank 0] step:6561/10000 train_time:490215ms step_avg:74.72ms +[2025-09-02 07:55:57] [Rank 0] step:6561/10000 train_time:490215ms step_avg:74.72ms +[2025-09-02 07:55:59] [Rank 0] step:6581/10000 train_time:491779ms step_avg:74.73ms +[2025-09-02 07:55:59] [Rank 0] step:6581/10000 train_time:491779ms step_avg:74.73ms +[2025-09-02 07:56:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:56:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:56:12] [Rank 0] PRINT: step:6600/10000 val_loss:4.0172 svd_entropy: attn_qk:H=0.7454,top10E=0.26,eRank=148.4,q75/q25=113.20 attn_vo:H=0.8309,top10E=0.15,eRank=276.2,q75/q25=61.63 mlp_w1:H=0.7362,top10E=0.32,eRank=160.2,q75/q25=17.15 mlp_w2:H=0.8303,top10E=0.14,eRank=266.9,q75/q25=55.01 vo_prod:H=0.7423,top10E=0.24,eRank=146.7,q75/q25=3392.31 train_time:493506ms step_avg:74.77ms +[2025-09-02 07:56:12] [Rank 0] PRINT: step:6600/10000 val_loss:4.0172 svd_entropy: attn_qk:H=0.7454,top10E=0.26,eRank=148.4,q75/q25=113.20 attn_vo:H=0.8309,top10E=0.15,eRank=276.2,q75/q25=61.63 mlp_w1:H=0.7362,top10E=0.32,eRank=160.2,q75/q25=17.15 mlp_w2:H=0.8303,top10E=0.14,eRank=266.9,q75/q25=55.01 vo_prod:H=0.7423,top10E=0.24,eRank=146.7,q75/q25=3392.31 train_time:493506ms step_avg:74.77ms +[2025-09-02 07:56:12] [Rank 0] step:6601/10000 train_time:493517ms step_avg:74.76ms +[2025-09-02 07:56:12] [Rank 0] step:6601/10000 train_time:493517ms step_avg:74.76ms +[2025-09-02 07:56:14] [Rank 0] step:6621/10000 train_time:494933ms step_avg:74.75ms +[2025-09-02 07:56:14] [Rank 0] step:6621/10000 train_time:494933ms step_avg:74.75ms +[2025-09-02 07:56:16] [Rank 0] step:6641/10000 train_time:496503ms step_avg:74.76ms +[2025-09-02 07:56:16] [Rank 0] step:6641/10000 train_time:496503ms step_avg:74.76ms +[2025-09-02 07:56:17] [Rank 0] step:6661/10000 train_time:498072ms step_avg:74.77ms +[2025-09-02 07:56:17] [Rank 0] step:6661/10000 train_time:498072ms step_avg:74.77ms +[2025-09-02 07:56:19] [Rank 0] step:6681/10000 train_time:499657ms step_avg:74.79ms +[2025-09-02 07:56:19] [Rank 0] step:6681/10000 train_time:499657ms step_avg:74.79ms +[2025-09-02 07:56:20] [Rank 0] step:6701/10000 train_time:501260ms step_avg:74.80ms +[2025-09-02 07:56:20] [Rank 0] step:6701/10000 train_time:501260ms step_avg:74.80ms +[2025-09-02 07:56:22] [Rank 0] step:6721/10000 train_time:502857ms step_avg:74.82ms +[2025-09-02 07:56:22] [Rank 0] step:6721/10000 train_time:502857ms step_avg:74.82ms +[2025-09-02 07:56:24] [Rank 0] step:6741/10000 train_time:504451ms step_avg:74.83ms +[2025-09-02 07:56:24] [Rank 0] step:6741/10000 train_time:504451ms step_avg:74.83ms +[2025-09-02 07:56:25] [Rank 0] step:6761/10000 train_time:506045ms step_avg:74.85ms +[2025-09-02 07:56:25] [Rank 0] step:6761/10000 train_time:506045ms step_avg:74.85ms +[2025-09-02 07:56:27] [Rank 0] step:6781/10000 train_time:507647ms step_avg:74.86ms +[2025-09-02 07:56:27] [Rank 0] step:6781/10000 train_time:507647ms step_avg:74.86ms +[2025-09-02 07:56:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:56:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:56:40] [Rank 0] PRINT: step:6800/10000 val_loss:4.0029 svd_entropy: attn_qk:H=0.7470,top10E=0.26,eRank=149.9,q75/q25=113.41 attn_vo:H=0.8322,top10E=0.15,eRank=278.3,q75/q25=59.96 mlp_w1:H=0.7383,top10E=0.32,eRank=162.1,q75/q25=17.49 mlp_w2:H=0.8311,top10E=0.14,eRank=268.6,q75/q25=55.52 vo_prod:H=0.7440,top10E=0.24,eRank=148.4,q75/q25=3182.01 train_time:509408ms step_avg:74.91ms +[2025-09-02 07:56:40] [Rank 0] PRINT: step:6800/10000 val_loss:4.0029 svd_entropy: attn_qk:H=0.7470,top10E=0.26,eRank=149.9,q75/q25=113.41 attn_vo:H=0.8322,top10E=0.15,eRank=278.3,q75/q25=59.96 mlp_w1:H=0.7383,top10E=0.32,eRank=162.1,q75/q25=17.49 mlp_w2:H=0.8311,top10E=0.14,eRank=268.6,q75/q25=55.52 vo_prod:H=0.7440,top10E=0.24,eRank=148.4,q75/q25=3182.01 train_time:509408ms step_avg:74.91ms +[2025-09-02 07:56:40] [Rank 0] step:6801/10000 train_time:509419ms step_avg:74.90ms +[2025-09-02 07:56:40] [Rank 0] step:6801/10000 train_time:509419ms step_avg:74.90ms +[2025-09-02 07:56:42] [Rank 0] step:6821/10000 train_time:510874ms step_avg:74.90ms +[2025-09-02 07:56:42] [Rank 0] step:6821/10000 train_time:510874ms step_avg:74.90ms +[2025-09-02 07:56:43] [Rank 0] step:6841/10000 train_time:512465ms step_avg:74.91ms +[2025-09-02 07:56:43] [Rank 0] step:6841/10000 train_time:512465ms step_avg:74.91ms +[2025-09-02 07:56:45] [Rank 0] step:6861/10000 train_time:514063ms step_avg:74.93ms +[2025-09-02 07:56:45] [Rank 0] step:6861/10000 train_time:514063ms step_avg:74.93ms +[2025-09-02 07:56:47] [Rank 0] step:6881/10000 train_time:515659ms step_avg:74.94ms +[2025-09-02 07:56:47] [Rank 0] step:6881/10000 train_time:515659ms step_avg:74.94ms +[2025-09-02 07:56:48] [Rank 0] step:6901/10000 train_time:517254ms step_avg:74.95ms +[2025-09-02 07:56:48] [Rank 0] step:6901/10000 train_time:517254ms step_avg:74.95ms +[2025-09-02 07:56:50] [Rank 0] step:6921/10000 train_time:518848ms step_avg:74.97ms +[2025-09-02 07:56:50] [Rank 0] step:6921/10000 train_time:518848ms step_avg:74.97ms +[2025-09-02 07:56:51] [Rank 0] step:6941/10000 train_time:520451ms step_avg:74.98ms +[2025-09-02 07:56:51] [Rank 0] step:6941/10000 train_time:520451ms step_avg:74.98ms +[2025-09-02 07:56:53] [Rank 0] step:6961/10000 train_time:522061ms step_avg:75.00ms +[2025-09-02 07:56:53] [Rank 0] step:6961/10000 train_time:522061ms step_avg:75.00ms +[2025-09-02 07:56:55] [Rank 0] step:6981/10000 train_time:523663ms step_avg:75.01ms +[2025-09-02 07:56:55] [Rank 0] step:6981/10000 train_time:523663ms step_avg:75.01ms +[2025-09-02 07:56:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:56:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:57:08] [Rank 0] PRINT: step:7000/10000 val_loss:3.9855 svd_entropy: attn_qk:H=0.7485,top10E=0.26,eRank=151.2,q75/q25=113.64 attn_vo:H=0.8335,top10E=0.15,eRank=280.3,q75/q25=58.83 mlp_w1:H=0.7401,top10E=0.31,eRank=163.7,q75/q25=17.91 mlp_w2:H=0.8318,top10E=0.14,eRank=270.1,q75/q25=55.47 vo_prod:H=0.7459,top10E=0.24,eRank=150.2,q75/q25=2930.19 train_time:525427ms step_avg:75.06ms +[2025-09-02 07:57:08] [Rank 0] PRINT: step:7000/10000 val_loss:3.9855 svd_entropy: attn_qk:H=0.7485,top10E=0.26,eRank=151.2,q75/q25=113.64 attn_vo:H=0.8335,top10E=0.15,eRank=280.3,q75/q25=58.83 mlp_w1:H=0.7401,top10E=0.31,eRank=163.7,q75/q25=17.91 mlp_w2:H=0.8318,top10E=0.14,eRank=270.1,q75/q25=55.47 vo_prod:H=0.7459,top10E=0.24,eRank=150.2,q75/q25=2930.19 train_time:525427ms step_avg:75.06ms +[2025-09-02 07:57:08] [Rank 0] step:7001/10000 train_time:525438ms step_avg:75.05ms +[2025-09-02 07:57:08] [Rank 0] step:7001/10000 train_time:525438ms step_avg:75.05ms +[2025-09-02 07:57:10] [Rank 0] step:7021/10000 train_time:526899ms step_avg:75.05ms +[2025-09-02 07:57:10] [Rank 0] step:7021/10000 train_time:526899ms step_avg:75.05ms +[2025-09-02 07:57:11] [Rank 0] step:7041/10000 train_time:528496ms step_avg:75.06ms +[2025-09-02 07:57:11] [Rank 0] step:7041/10000 train_time:528496ms step_avg:75.06ms +[2025-09-02 07:57:13] [Rank 0] step:7061/10000 train_time:530090ms step_avg:75.07ms +[2025-09-02 07:57:13] [Rank 0] step:7061/10000 train_time:530090ms step_avg:75.07ms +[2025-09-02 07:57:14] [Rank 0] step:7081/10000 train_time:531686ms step_avg:75.09ms +[2025-09-02 07:57:14] [Rank 0] step:7081/10000 train_time:531686ms step_avg:75.09ms +[2025-09-02 07:57:16] [Rank 0] step:7101/10000 train_time:533283ms step_avg:75.10ms +[2025-09-02 07:57:16] [Rank 0] step:7101/10000 train_time:533283ms step_avg:75.10ms +[2025-09-02 07:57:18] [Rank 0] step:7121/10000 train_time:534879ms step_avg:75.11ms +[2025-09-02 07:57:18] [Rank 0] step:7121/10000 train_time:534879ms step_avg:75.11ms +[2025-09-02 07:57:19] [Rank 0] step:7141/10000 train_time:536475ms step_avg:75.13ms +[2025-09-02 07:57:19] [Rank 0] step:7141/10000 train_time:536475ms step_avg:75.13ms +[2025-09-02 07:57:21] [Rank 0] step:7161/10000 train_time:538075ms step_avg:75.14ms +[2025-09-02 07:57:21] [Rank 0] step:7161/10000 train_time:538075ms step_avg:75.14ms +[2025-09-02 07:57:22] [Rank 0] step:7181/10000 train_time:539674ms step_avg:75.15ms +[2025-09-02 07:57:22] [Rank 0] step:7181/10000 train_time:539674ms step_avg:75.15ms +[2025-09-02 07:57:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:57:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:57:36] [Rank 0] PRINT: step:7200/10000 val_loss:3.9748 svd_entropy: attn_qk:H=0.7499,top10E=0.26,eRank=152.5,q75/q25=113.79 attn_vo:H=0.8347,top10E=0.15,eRank=282.1,q75/q25=57.50 mlp_w1:H=0.7418,top10E=0.31,eRank=165.4,q75/q25=18.19 mlp_w2:H=0.8325,top10E=0.14,eRank=271.5,q75/q25=56.21 vo_prod:H=0.7476,top10E=0.23,eRank=151.8,q75/q25=2819.29 train_time:541437ms step_avg:75.20ms +[2025-09-02 07:57:36] [Rank 0] PRINT: step:7200/10000 val_loss:3.9748 svd_entropy: attn_qk:H=0.7499,top10E=0.26,eRank=152.5,q75/q25=113.79 attn_vo:H=0.8347,top10E=0.15,eRank=282.1,q75/q25=57.50 mlp_w1:H=0.7418,top10E=0.31,eRank=165.4,q75/q25=18.19 mlp_w2:H=0.8325,top10E=0.14,eRank=271.5,q75/q25=56.21 vo_prod:H=0.7476,top10E=0.23,eRank=151.8,q75/q25=2819.29 train_time:541437ms step_avg:75.20ms +[2025-09-02 07:57:36] [Rank 0] step:7201/10000 train_time:541448ms step_avg:75.19ms +[2025-09-02 07:57:36] [Rank 0] step:7201/10000 train_time:541448ms step_avg:75.19ms +[2025-09-02 07:57:38] [Rank 0] step:7221/10000 train_time:542911ms step_avg:75.18ms +[2025-09-02 07:57:38] [Rank 0] step:7221/10000 train_time:542911ms step_avg:75.18ms +[2025-09-02 07:57:39] [Rank 0] step:7241/10000 train_time:544500ms step_avg:75.20ms +[2025-09-02 07:57:39] [Rank 0] step:7241/10000 train_time:544500ms step_avg:75.20ms +[2025-09-02 07:57:41] [Rank 0] step:7261/10000 train_time:546093ms step_avg:75.21ms +[2025-09-02 07:57:41] [Rank 0] step:7261/10000 train_time:546093ms step_avg:75.21ms +[2025-09-02 07:57:42] [Rank 0] step:7281/10000 train_time:547698ms step_avg:75.22ms +[2025-09-02 07:57:42] [Rank 0] step:7281/10000 train_time:547698ms step_avg:75.22ms +[2025-09-02 07:57:44] [Rank 0] step:7301/10000 train_time:549294ms step_avg:75.24ms +[2025-09-02 07:57:44] [Rank 0] step:7301/10000 train_time:549294ms step_avg:75.24ms +[2025-09-02 07:57:46] [Rank 0] step:7321/10000 train_time:550898ms step_avg:75.25ms +[2025-09-02 07:57:46] [Rank 0] step:7321/10000 train_time:550898ms step_avg:75.25ms +[2025-09-02 07:57:47] [Rank 0] step:7341/10000 train_time:552497ms step_avg:75.26ms +[2025-09-02 07:57:47] [Rank 0] step:7341/10000 train_time:552497ms step_avg:75.26ms +[2025-09-02 07:57:49] [Rank 0] step:7361/10000 train_time:554099ms step_avg:75.27ms +[2025-09-02 07:57:49] [Rank 0] step:7361/10000 train_time:554099ms step_avg:75.27ms +[2025-09-02 07:57:50] [Rank 0] step:7381/10000 train_time:555707ms step_avg:75.29ms +[2025-09-02 07:57:50] [Rank 0] step:7381/10000 train_time:555707ms step_avg:75.29ms +[2025-09-02 07:57:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:57:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:58:04] [Rank 0] PRINT: step:7400/10000 val_loss:3.9555 svd_entropy: attn_qk:H=0.7511,top10E=0.26,eRank=153.7,q75/q25=114.18 attn_vo:H=0.8357,top10E=0.15,eRank=283.7,q75/q25=56.18 mlp_w1:H=0.7434,top10E=0.31,eRank=166.9,q75/q25=18.43 mlp_w2:H=0.8331,top10E=0.14,eRank=272.7,q75/q25=56.72 vo_prod:H=0.7491,top10E=0.23,eRank=153.3,q75/q25=2658.58 train_time:557450ms step_avg:75.33ms +[2025-09-02 07:58:04] [Rank 0] PRINT: step:7400/10000 val_loss:3.9555 svd_entropy: attn_qk:H=0.7511,top10E=0.26,eRank=153.7,q75/q25=114.18 attn_vo:H=0.8357,top10E=0.15,eRank=283.7,q75/q25=56.18 mlp_w1:H=0.7434,top10E=0.31,eRank=166.9,q75/q25=18.43 mlp_w2:H=0.8331,top10E=0.14,eRank=272.7,q75/q25=56.72 vo_prod:H=0.7491,top10E=0.23,eRank=153.3,q75/q25=2658.58 train_time:557450ms step_avg:75.33ms +[2025-09-02 07:58:04] [Rank 0] step:7401/10000 train_time:557461ms step_avg:75.32ms +[2025-09-02 07:58:04] [Rank 0] step:7401/10000 train_time:557461ms step_avg:75.32ms +[2025-09-02 07:58:05] [Rank 0] step:7421/10000 train_time:558914ms step_avg:75.32ms +[2025-09-02 07:58:05] [Rank 0] step:7421/10000 train_time:558914ms step_avg:75.32ms +[2025-09-02 07:58:07] [Rank 0] step:7441/10000 train_time:560509ms step_avg:75.33ms +[2025-09-02 07:58:07] [Rank 0] step:7441/10000 train_time:560509ms step_avg:75.33ms +[2025-09-02 07:58:09] [Rank 0] step:7461/10000 train_time:562106ms step_avg:75.34ms +[2025-09-02 07:58:09] [Rank 0] step:7461/10000 train_time:562106ms step_avg:75.34ms +[2025-09-02 07:58:10] [Rank 0] step:7481/10000 train_time:563707ms step_avg:75.35ms +[2025-09-02 07:58:10] [Rank 0] step:7481/10000 train_time:563707ms step_avg:75.35ms +[2025-09-02 07:58:12] [Rank 0] step:7501/10000 train_time:565310ms step_avg:75.36ms +[2025-09-02 07:58:12] [Rank 0] step:7501/10000 train_time:565310ms step_avg:75.36ms +[2025-09-02 07:58:13] [Rank 0] step:7521/10000 train_time:566913ms step_avg:75.38ms +[2025-09-02 07:58:13] [Rank 0] step:7521/10000 train_time:566913ms step_avg:75.38ms +[2025-09-02 07:58:15] [Rank 0] step:7541/10000 train_time:568528ms step_avg:75.39ms +[2025-09-02 07:58:15] [Rank 0] step:7541/10000 train_time:568528ms step_avg:75.39ms +[2025-09-02 07:58:17] [Rank 0] step:7561/10000 train_time:570118ms step_avg:75.40ms +[2025-09-02 07:58:17] [Rank 0] step:7561/10000 train_time:570118ms step_avg:75.40ms +[2025-09-02 07:58:18] [Rank 0] step:7581/10000 train_time:571727ms step_avg:75.42ms +[2025-09-02 07:58:18] [Rank 0] step:7581/10000 train_time:571727ms step_avg:75.42ms +[2025-09-02 07:58:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:58:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:58:31] [Rank 0] PRINT: step:7600/10000 val_loss:3.9522 svd_entropy: attn_qk:H=0.7523,top10E=0.26,eRank=154.7,q75/q25=113.75 attn_vo:H=0.8367,top10E=0.14,eRank=285.2,q75/q25=55.06 mlp_w1:H=0.7448,top10E=0.31,eRank=168.3,q75/q25=18.74 mlp_w2:H=0.8336,top10E=0.14,eRank=273.8,q75/q25=57.23 vo_prod:H=0.7505,top10E=0.23,eRank=154.8,q75/q25=2507.26 train_time:573496ms step_avg:75.46ms +[2025-09-02 07:58:31] [Rank 0] PRINT: step:7600/10000 val_loss:3.9522 svd_entropy: attn_qk:H=0.7523,top10E=0.26,eRank=154.7,q75/q25=113.75 attn_vo:H=0.8367,top10E=0.14,eRank=285.2,q75/q25=55.06 mlp_w1:H=0.7448,top10E=0.31,eRank=168.3,q75/q25=18.74 mlp_w2:H=0.8336,top10E=0.14,eRank=273.8,q75/q25=57.23 vo_prod:H=0.7505,top10E=0.23,eRank=154.8,q75/q25=2507.26 train_time:573496ms step_avg:75.46ms +[2025-09-02 07:58:32] [Rank 0] step:7601/10000 train_time:573508ms step_avg:75.45ms +[2025-09-02 07:58:32] [Rank 0] step:7601/10000 train_time:573508ms step_avg:75.45ms +[2025-09-02 07:58:33] [Rank 0] step:7621/10000 train_time:574955ms step_avg:75.44ms +[2025-09-02 07:58:33] [Rank 0] step:7621/10000 train_time:574955ms step_avg:75.44ms +[2025-09-02 07:58:35] [Rank 0] step:7641/10000 train_time:576554ms step_avg:75.46ms +[2025-09-02 07:58:35] [Rank 0] step:7641/10000 train_time:576554ms step_avg:75.46ms +[2025-09-02 07:58:36] [Rank 0] step:7661/10000 train_time:578157ms step_avg:75.47ms +[2025-09-02 07:58:36] [Rank 0] step:7661/10000 train_time:578157ms step_avg:75.47ms +[2025-09-02 07:58:38] [Rank 0] step:7681/10000 train_time:579753ms step_avg:75.48ms +[2025-09-02 07:58:38] [Rank 0] step:7681/10000 train_time:579753ms step_avg:75.48ms +[2025-09-02 07:58:40] [Rank 0] step:7701/10000 train_time:581350ms step_avg:75.49ms +[2025-09-02 07:58:40] [Rank 0] step:7701/10000 train_time:581350ms step_avg:75.49ms +[2025-09-02 07:58:41] [Rank 0] step:7721/10000 train_time:582962ms step_avg:75.50ms +[2025-09-02 07:58:41] [Rank 0] step:7721/10000 train_time:582962ms step_avg:75.50ms +[2025-09-02 07:58:43] [Rank 0] step:7741/10000 train_time:584565ms step_avg:75.52ms +[2025-09-02 07:58:43] [Rank 0] step:7741/10000 train_time:584565ms step_avg:75.52ms +[2025-09-02 07:58:44] [Rank 0] step:7761/10000 train_time:586171ms step_avg:75.53ms +[2025-09-02 07:58:44] [Rank 0] step:7761/10000 train_time:586171ms step_avg:75.53ms +[2025-09-02 07:58:46] [Rank 0] step:7781/10000 train_time:587779ms step_avg:75.54ms +[2025-09-02 07:58:46] [Rank 0] step:7781/10000 train_time:587779ms step_avg:75.54ms +[2025-09-02 07:58:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:58:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:58:59] [Rank 0] PRINT: step:7800/10000 val_loss:3.9373 svd_entropy: attn_qk:H=0.7533,top10E=0.25,eRank=155.7,q75/q25=113.36 attn_vo:H=0.8376,top10E=0.14,eRank=286.6,q75/q25=54.04 mlp_w1:H=0.7461,top10E=0.31,eRank=169.6,q75/q25=18.99 mlp_w2:H=0.8341,top10E=0.14,eRank=274.9,q75/q25=57.72 vo_prod:H=0.7517,top10E=0.23,eRank=156.0,q75/q25=2357.56 train_time:589556ms step_avg:75.58ms +[2025-09-02 07:58:59] [Rank 0] PRINT: step:7800/10000 val_loss:3.9373 svd_entropy: attn_qk:H=0.7533,top10E=0.25,eRank=155.7,q75/q25=113.36 attn_vo:H=0.8376,top10E=0.14,eRank=286.6,q75/q25=54.04 mlp_w1:H=0.7461,top10E=0.31,eRank=169.6,q75/q25=18.99 mlp_w2:H=0.8341,top10E=0.14,eRank=274.9,q75/q25=57.72 vo_prod:H=0.7517,top10E=0.23,eRank=156.0,q75/q25=2357.56 train_time:589556ms step_avg:75.58ms +[2025-09-02 07:58:59] [Rank 0] step:7801/10000 train_time:589567ms step_avg:75.58ms +[2025-09-02 07:58:59] [Rank 0] step:7801/10000 train_time:589567ms step_avg:75.58ms +[2025-09-02 07:59:01] [Rank 0] step:7821/10000 train_time:591033ms step_avg:75.57ms +[2025-09-02 07:59:01] [Rank 0] step:7821/10000 train_time:591033ms step_avg:75.57ms +[2025-09-02 07:59:03] [Rank 0] step:7841/10000 train_time:592632ms step_avg:75.58ms +[2025-09-02 07:59:03] [Rank 0] step:7841/10000 train_time:592632ms step_avg:75.58ms +[2025-09-02 07:59:04] [Rank 0] step:7861/10000 train_time:594240ms step_avg:75.59ms +[2025-09-02 07:59:04] [Rank 0] step:7861/10000 train_time:594240ms step_avg:75.59ms +[2025-09-02 07:59:06] [Rank 0] step:7881/10000 train_time:595850ms step_avg:75.61ms +[2025-09-02 07:59:06] [Rank 0] step:7881/10000 train_time:595850ms step_avg:75.61ms +[2025-09-02 07:59:07] [Rank 0] step:7901/10000 train_time:597446ms step_avg:75.62ms +[2025-09-02 07:59:07] [Rank 0] step:7901/10000 train_time:597446ms step_avg:75.62ms +[2025-09-02 07:59:09] [Rank 0] step:7921/10000 train_time:599052ms step_avg:75.63ms +[2025-09-02 07:59:09] [Rank 0] step:7921/10000 train_time:599052ms step_avg:75.63ms +[2025-09-02 07:59:11] [Rank 0] step:7941/10000 train_time:600659ms step_avg:75.64ms +[2025-09-02 07:59:11] [Rank 0] step:7941/10000 train_time:600659ms step_avg:75.64ms +[2025-09-02 07:59:12] [Rank 0] step:7961/10000 train_time:602271ms step_avg:75.65ms +[2025-09-02 07:59:12] [Rank 0] step:7961/10000 train_time:602271ms step_avg:75.65ms +[2025-09-02 07:59:14] [Rank 0] step:7981/10000 train_time:603870ms step_avg:75.66ms +[2025-09-02 07:59:14] [Rank 0] step:7981/10000 train_time:603870ms step_avg:75.66ms +[2025-09-02 07:59:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:59:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:59:27] [Rank 0] PRINT: step:8000/10000 val_loss:3.9216 svd_entropy: attn_qk:H=0.7543,top10E=0.25,eRank=156.7,q75/q25=113.45 attn_vo:H=0.8383,top10E=0.14,eRank=287.9,q75/q25=53.05 mlp_w1:H=0.7472,top10E=0.30,eRank=170.7,q75/q25=19.17 mlp_w2:H=0.8346,top10E=0.14,eRank=275.9,q75/q25=57.97 vo_prod:H=0.7530,top10E=0.23,eRank=157.3,q75/q25=2242.57 train_time:605634ms step_avg:75.70ms +[2025-09-02 07:59:27] [Rank 0] PRINT: step:8000/10000 val_loss:3.9216 svd_entropy: attn_qk:H=0.7543,top10E=0.25,eRank=156.7,q75/q25=113.45 attn_vo:H=0.8383,top10E=0.14,eRank=287.9,q75/q25=53.05 mlp_w1:H=0.7472,top10E=0.30,eRank=170.7,q75/q25=19.17 mlp_w2:H=0.8346,top10E=0.14,eRank=275.9,q75/q25=57.97 vo_prod:H=0.7530,top10E=0.23,eRank=157.3,q75/q25=2242.57 train_time:605634ms step_avg:75.70ms +[2025-09-02 07:59:27] [Rank 0] step:8001/10000 train_time:605646ms step_avg:75.70ms +[2025-09-02 07:59:27] [Rank 0] step:8001/10000 train_time:605646ms step_avg:75.70ms +[2025-09-02 07:59:29] [Rank 0] step:8021/10000 train_time:607096ms step_avg:75.69ms +[2025-09-02 07:59:29] [Rank 0] step:8021/10000 train_time:607096ms step_avg:75.69ms +[2025-09-02 07:59:31] [Rank 0] step:8041/10000 train_time:608709ms step_avg:75.70ms +[2025-09-02 07:59:31] [Rank 0] step:8041/10000 train_time:608709ms step_avg:75.70ms +[2025-09-02 07:59:32] [Rank 0] step:8061/10000 train_time:610310ms step_avg:75.71ms +[2025-09-02 07:59:32] [Rank 0] step:8061/10000 train_time:610310ms step_avg:75.71ms +[2025-09-02 07:59:34] [Rank 0] step:8081/10000 train_time:611907ms step_avg:75.72ms +[2025-09-02 07:59:34] [Rank 0] step:8081/10000 train_time:611907ms step_avg:75.72ms +[2025-09-02 07:59:35] [Rank 0] step:8101/10000 train_time:613517ms step_avg:75.73ms +[2025-09-02 07:59:35] [Rank 0] step:8101/10000 train_time:613517ms step_avg:75.73ms +[2025-09-02 07:59:37] [Rank 0] step:8121/10000 train_time:615120ms step_avg:75.74ms +[2025-09-02 07:59:37] [Rank 0] step:8121/10000 train_time:615120ms step_avg:75.74ms +[2025-09-02 07:59:39] [Rank 0] step:8141/10000 train_time:616820ms step_avg:75.77ms +[2025-09-02 07:59:39] [Rank 0] step:8141/10000 train_time:616820ms step_avg:75.77ms +[2025-09-02 07:59:40] [Rank 0] step:8161/10000 train_time:618435ms step_avg:75.78ms +[2025-09-02 07:59:40] [Rank 0] step:8161/10000 train_time:618435ms step_avg:75.78ms +[2025-09-02 07:59:42] [Rank 0] step:8181/10000 train_time:620071ms step_avg:75.79ms +[2025-09-02 07:59:42] [Rank 0] step:8181/10000 train_time:620071ms step_avg:75.79ms +[2025-09-02 07:59:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:59:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 07:59:55] [Rank 0] PRINT: step:8200/10000 val_loss:3.9118 svd_entropy: attn_qk:H=0.7552,top10E=0.25,eRank=157.5,q75/q25=113.47 attn_vo:H=0.8391,top10E=0.14,eRank=289.0,q75/q25=52.50 mlp_w1:H=0.7483,top10E=0.30,eRank=171.8,q75/q25=19.36 mlp_w2:H=0.8350,top10E=0.14,eRank=276.9,q75/q25=58.19 vo_prod:H=0.7541,top10E=0.23,eRank=158.4,q75/q25=2141.81 train_time:621890ms step_avg:75.84ms +[2025-09-02 07:59:55] [Rank 0] PRINT: step:8200/10000 val_loss:3.9118 svd_entropy: attn_qk:H=0.7552,top10E=0.25,eRank=157.5,q75/q25=113.47 attn_vo:H=0.8391,top10E=0.14,eRank=289.0,q75/q25=52.50 mlp_w1:H=0.7483,top10E=0.30,eRank=171.8,q75/q25=19.36 mlp_w2:H=0.8350,top10E=0.14,eRank=276.9,q75/q25=58.19 vo_prod:H=0.7541,top10E=0.23,eRank=158.4,q75/q25=2141.81 train_time:621890ms step_avg:75.84ms +[2025-09-02 07:59:56] [Rank 0] step:8201/10000 train_time:621902ms step_avg:75.83ms +[2025-09-02 07:59:56] [Rank 0] step:8201/10000 train_time:621902ms step_avg:75.83ms +[2025-09-02 07:59:57] [Rank 0] step:8221/10000 train_time:623397ms step_avg:75.83ms +[2025-09-02 07:59:57] [Rank 0] step:8221/10000 train_time:623397ms step_avg:75.83ms +[2025-09-02 07:59:59] [Rank 0] step:8241/10000 train_time:625036ms step_avg:75.84ms +[2025-09-02 07:59:59] [Rank 0] step:8241/10000 train_time:625036ms step_avg:75.84ms +[2025-09-02 08:00:01] [Rank 0] step:8261/10000 train_time:626666ms step_avg:75.86ms +[2025-09-02 08:00:01] [Rank 0] step:8261/10000 train_time:626666ms step_avg:75.86ms +[2025-09-02 08:00:02] [Rank 0] step:8281/10000 train_time:628298ms step_avg:75.87ms +[2025-09-02 08:00:02] [Rank 0] step:8281/10000 train_time:628298ms step_avg:75.87ms +[2025-09-02 08:00:04] [Rank 0] step:8301/10000 train_time:629930ms step_avg:75.89ms +[2025-09-02 08:00:04] [Rank 0] step:8301/10000 train_time:629930ms step_avg:75.89ms +[2025-09-02 08:00:05] [Rank 0] step:8321/10000 train_time:631550ms step_avg:75.90ms +[2025-09-02 08:00:05] [Rank 0] step:8321/10000 train_time:631550ms step_avg:75.90ms +[2025-09-02 08:00:07] [Rank 0] step:8341/10000 train_time:633179ms step_avg:75.91ms +[2025-09-02 08:00:07] [Rank 0] step:8341/10000 train_time:633179ms step_avg:75.91ms +[2025-09-02 08:00:09] [Rank 0] step:8361/10000 train_time:634810ms step_avg:75.93ms +[2025-09-02 08:00:09] [Rank 0] step:8361/10000 train_time:634810ms step_avg:75.93ms +[2025-09-02 08:00:10] [Rank 0] step:8381/10000 train_time:636442ms step_avg:75.94ms +[2025-09-02 08:00:10] [Rank 0] step:8381/10000 train_time:636442ms step_avg:75.94ms +[2025-09-02 08:00:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:00:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:00:24] [Rank 0] PRINT: step:8400/10000 val_loss:3.9017 svd_entropy: attn_qk:H=0.7560,top10E=0.25,eRank=158.3,q75/q25=113.52 attn_vo:H=0.8397,top10E=0.14,eRank=290.1,q75/q25=51.81 mlp_w1:H=0.7493,top10E=0.30,eRank=172.9,q75/q25=19.50 mlp_w2:H=0.8354,top10E=0.14,eRank=277.7,q75/q25=58.19 vo_prod:H=0.7551,top10E=0.22,eRank=159.4,q75/q25=2087.77 train_time:638229ms step_avg:75.98ms +[2025-09-02 08:00:24] [Rank 0] PRINT: step:8400/10000 val_loss:3.9017 svd_entropy: attn_qk:H=0.7560,top10E=0.25,eRank=158.3,q75/q25=113.52 attn_vo:H=0.8397,top10E=0.14,eRank=290.1,q75/q25=51.81 mlp_w1:H=0.7493,top10E=0.30,eRank=172.9,q75/q25=19.50 mlp_w2:H=0.8354,top10E=0.14,eRank=277.7,q75/q25=58.19 vo_prod:H=0.7551,top10E=0.22,eRank=159.4,q75/q25=2087.77 train_time:638229ms step_avg:75.98ms +[2025-09-02 08:00:24] [Rank 0] step:8401/10000 train_time:638240ms step_avg:75.97ms +[2025-09-02 08:00:24] [Rank 0] step:8401/10000 train_time:638240ms step_avg:75.97ms +[2025-09-02 08:00:25] [Rank 0] step:8421/10000 train_time:639733ms step_avg:75.97ms +[2025-09-02 08:00:25] [Rank 0] step:8421/10000 train_time:639733ms step_avg:75.97ms +[2025-09-02 08:00:27] [Rank 0] step:8441/10000 train_time:641358ms step_avg:75.98ms +[2025-09-02 08:00:27] [Rank 0] step:8441/10000 train_time:641358ms step_avg:75.98ms +[2025-09-02 08:00:29] [Rank 0] step:8461/10000 train_time:642976ms step_avg:75.99ms +[2025-09-02 08:00:29] [Rank 0] step:8461/10000 train_time:642976ms step_avg:75.99ms +[2025-09-02 08:00:30] [Rank 0] step:8481/10000 train_time:644610ms step_avg:76.01ms +[2025-09-02 08:00:30] [Rank 0] step:8481/10000 train_time:644610ms step_avg:76.01ms +[2025-09-02 08:00:32] [Rank 0] step:8501/10000 train_time:646262ms step_avg:76.02ms +[2025-09-02 08:00:32] [Rank 0] step:8501/10000 train_time:646262ms step_avg:76.02ms +[2025-09-02 08:00:34] [Rank 0] step:8521/10000 train_time:647897ms step_avg:76.04ms +[2025-09-02 08:00:34] [Rank 0] step:8521/10000 train_time:647897ms step_avg:76.04ms +[2025-09-02 08:00:35] [Rank 0] step:8541/10000 train_time:649538ms step_avg:76.05ms +[2025-09-02 08:00:35] [Rank 0] step:8541/10000 train_time:649538ms step_avg:76.05ms +[2025-09-02 08:00:37] [Rank 0] step:8561/10000 train_time:651170ms step_avg:76.06ms +[2025-09-02 08:00:37] [Rank 0] step:8561/10000 train_time:651170ms step_avg:76.06ms +[2025-09-02 08:00:39] [Rank 0] step:8581/10000 train_time:652803ms step_avg:76.08ms +[2025-09-02 08:00:39] [Rank 0] step:8581/10000 train_time:652803ms step_avg:76.08ms +[2025-09-02 08:00:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:00:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:00:52] [Rank 0] PRINT: step:8600/10000 val_loss:3.8933 svd_entropy: attn_qk:H=0.7567,top10E=0.25,eRank=158.9,q75/q25=113.15 attn_vo:H=0.8403,top10E=0.14,eRank=290.9,q75/q25=51.03 mlp_w1:H=0.7501,top10E=0.30,eRank=173.7,q75/q25=19.62 mlp_w2:H=0.8358,top10E=0.14,eRank=278.6,q75/q25=58.37 vo_prod:H=0.7559,top10E=0.22,eRank=160.3,q75/q25=2022.86 train_time:654586ms step_avg:76.11ms +[2025-09-02 08:00:52] [Rank 0] PRINT: step:8600/10000 val_loss:3.8933 svd_entropy: attn_qk:H=0.7567,top10E=0.25,eRank=158.9,q75/q25=113.15 attn_vo:H=0.8403,top10E=0.14,eRank=290.9,q75/q25=51.03 mlp_w1:H=0.7501,top10E=0.30,eRank=173.7,q75/q25=19.62 mlp_w2:H=0.8358,top10E=0.14,eRank=278.6,q75/q25=58.37 vo_prod:H=0.7559,top10E=0.22,eRank=160.3,q75/q25=2022.86 train_time:654586ms step_avg:76.11ms +[2025-09-02 08:00:52] [Rank 0] step:8601/10000 train_time:654598ms step_avg:76.11ms +[2025-09-02 08:00:52] [Rank 0] step:8601/10000 train_time:654598ms step_avg:76.11ms +[2025-09-02 08:00:54] [Rank 0] step:8621/10000 train_time:656086ms step_avg:76.10ms +[2025-09-02 08:00:54] [Rank 0] step:8621/10000 train_time:656086ms step_avg:76.10ms +[2025-09-02 08:00:55] [Rank 0] step:8641/10000 train_time:657715ms step_avg:76.12ms +[2025-09-02 08:00:55] [Rank 0] step:8641/10000 train_time:657715ms step_avg:76.12ms +[2025-09-02 08:00:57] [Rank 0] step:8661/10000 train_time:659343ms step_avg:76.13ms +[2025-09-02 08:00:57] [Rank 0] step:8661/10000 train_time:659343ms step_avg:76.13ms +[2025-09-02 08:00:58] [Rank 0] step:8681/10000 train_time:660972ms step_avg:76.14ms +[2025-09-02 08:00:58] [Rank 0] step:8681/10000 train_time:660972ms step_avg:76.14ms +[2025-09-02 08:01:00] [Rank 0] step:8701/10000 train_time:662598ms step_avg:76.15ms +[2025-09-02 08:01:00] [Rank 0] step:8701/10000 train_time:662598ms step_avg:76.15ms +[2025-09-02 08:01:02] [Rank 0] step:8721/10000 train_time:664228ms step_avg:76.16ms +[2025-09-02 08:01:02] [Rank 0] step:8721/10000 train_time:664228ms step_avg:76.16ms +[2025-09-02 08:01:03] [Rank 0] step:8741/10000 train_time:665847ms step_avg:76.18ms +[2025-09-02 08:01:03] [Rank 0] step:8741/10000 train_time:665847ms step_avg:76.18ms +[2025-09-02 08:01:05] [Rank 0] step:8761/10000 train_time:667473ms step_avg:76.19ms +[2025-09-02 08:01:05] [Rank 0] step:8761/10000 train_time:667473ms step_avg:76.19ms +[2025-09-02 08:01:07] [Rank 0] step:8781/10000 train_time:669112ms step_avg:76.20ms +[2025-09-02 08:01:07] [Rank 0] step:8781/10000 train_time:669112ms step_avg:76.20ms +[2025-09-02 08:01:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:01:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:01:20] [Rank 0] PRINT: step:8800/10000 val_loss:3.8840 svd_entropy: attn_qk:H=0.7573,top10E=0.25,eRank=159.6,q75/q25=112.95 attn_vo:H=0.8408,top10E=0.14,eRank=291.7,q75/q25=50.43 mlp_w1:H=0.7510,top10E=0.30,eRank=174.6,q75/q25=19.73 mlp_w2:H=0.8362,top10E=0.14,eRank=279.3,q75/q25=58.50 vo_prod:H=0.7567,top10E=0.22,eRank=161.2,q75/q25=1959.94 train_time:670907ms step_avg:76.24ms +[2025-09-02 08:01:20] [Rank 0] PRINT: step:8800/10000 val_loss:3.8840 svd_entropy: attn_qk:H=0.7573,top10E=0.25,eRank=159.6,q75/q25=112.95 attn_vo:H=0.8408,top10E=0.14,eRank=291.7,q75/q25=50.43 mlp_w1:H=0.7510,top10E=0.30,eRank=174.6,q75/q25=19.73 mlp_w2:H=0.8362,top10E=0.14,eRank=279.3,q75/q25=58.50 vo_prod:H=0.7567,top10E=0.22,eRank=161.2,q75/q25=1959.94 train_time:670907ms step_avg:76.24ms +[2025-09-02 08:01:20] [Rank 0] step:8801/10000 train_time:670919ms step_avg:76.23ms +[2025-09-02 08:01:20] [Rank 0] step:8801/10000 train_time:670919ms step_avg:76.23ms +[2025-09-02 08:01:22] [Rank 0] step:8821/10000 train_time:672405ms step_avg:76.23ms +[2025-09-02 08:01:22] [Rank 0] step:8821/10000 train_time:672405ms step_avg:76.23ms +[2025-09-02 08:01:23] [Rank 0] step:8841/10000 train_time:674056ms step_avg:76.24ms +[2025-09-02 08:01:23] [Rank 0] step:8841/10000 train_time:674056ms step_avg:76.24ms +[2025-09-02 08:01:25] [Rank 0] step:8861/10000 train_time:675686ms step_avg:76.25ms +[2025-09-02 08:01:25] [Rank 0] step:8861/10000 train_time:675686ms step_avg:76.25ms +[2025-09-02 08:01:26] [Rank 0] step:8881/10000 train_time:677318ms step_avg:76.27ms +[2025-09-02 08:01:26] [Rank 0] step:8881/10000 train_time:677318ms step_avg:76.27ms +[2025-09-02 08:01:28] [Rank 0] step:8901/10000 train_time:678955ms step_avg:76.28ms +[2025-09-02 08:01:28] [Rank 0] step:8901/10000 train_time:678955ms step_avg:76.28ms +[2025-09-02 08:01:30] [Rank 0] step:8921/10000 train_time:680593ms step_avg:76.29ms +[2025-09-02 08:01:30] [Rank 0] step:8921/10000 train_time:680593ms step_avg:76.29ms +[2025-09-02 08:01:31] [Rank 0] step:8941/10000 train_time:682238ms step_avg:76.30ms +[2025-09-02 08:01:31] [Rank 0] step:8941/10000 train_time:682238ms step_avg:76.30ms +[2025-09-02 08:01:33] [Rank 0] step:8961/10000 train_time:683863ms step_avg:76.32ms +[2025-09-02 08:01:33] [Rank 0] step:8961/10000 train_time:683863ms step_avg:76.32ms +[2025-09-02 08:01:35] [Rank 0] step:8981/10000 train_time:685491ms step_avg:76.33ms +[2025-09-02 08:01:35] [Rank 0] step:8981/10000 train_time:685491ms step_avg:76.33ms +[2025-09-02 08:01:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:01:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:01:48] [Rank 0] PRINT: step:9000/10000 val_loss:3.8754 svd_entropy: attn_qk:H=0.7578,top10E=0.25,eRank=160.1,q75/q25=113.04 attn_vo:H=0.8412,top10E=0.14,eRank=292.5,q75/q25=50.01 mlp_w1:H=0.7517,top10E=0.30,eRank=175.4,q75/q25=19.79 mlp_w2:H=0.8365,top10E=0.14,eRank=279.9,q75/q25=58.56 vo_prod:H=0.7575,top10E=0.22,eRank=161.9,q75/q25=1910.93 train_time:687283ms step_avg:76.36ms +[2025-09-02 08:01:48] [Rank 0] PRINT: step:9000/10000 val_loss:3.8754 svd_entropy: attn_qk:H=0.7578,top10E=0.25,eRank=160.1,q75/q25=113.04 attn_vo:H=0.8412,top10E=0.14,eRank=292.5,q75/q25=50.01 mlp_w1:H=0.7517,top10E=0.30,eRank=175.4,q75/q25=19.79 mlp_w2:H=0.8365,top10E=0.14,eRank=279.9,q75/q25=58.56 vo_prod:H=0.7575,top10E=0.22,eRank=161.9,q75/q25=1910.93 train_time:687283ms step_avg:76.36ms +[2025-09-02 08:01:48] [Rank 0] step:9001/10000 train_time:687294ms step_avg:76.36ms +[2025-09-02 08:01:48] [Rank 0] step:9001/10000 train_time:687294ms step_avg:76.36ms +[2025-09-02 08:01:50] [Rank 0] step:9021/10000 train_time:688781ms step_avg:76.35ms +[2025-09-02 08:01:50] [Rank 0] step:9021/10000 train_time:688781ms step_avg:76.35ms +[2025-09-02 08:01:52] [Rank 0] step:9041/10000 train_time:690407ms step_avg:76.36ms +[2025-09-02 08:01:52] [Rank 0] step:9041/10000 train_time:690407ms step_avg:76.36ms +[2025-09-02 08:01:53] [Rank 0] step:9061/10000 train_time:692051ms step_avg:76.38ms +[2025-09-02 08:01:53] [Rank 0] step:9061/10000 train_time:692051ms step_avg:76.38ms +[2025-09-02 08:01:55] [Rank 0] step:9081/10000 train_time:693691ms step_avg:76.39ms +[2025-09-02 08:01:55] [Rank 0] step:9081/10000 train_time:693691ms step_avg:76.39ms +[2025-09-02 08:01:56] [Rank 0] step:9101/10000 train_time:695342ms step_avg:76.40ms +[2025-09-02 08:01:56] [Rank 0] step:9101/10000 train_time:695342ms step_avg:76.40ms +[2025-09-02 08:01:58] [Rank 0] step:9121/10000 train_time:696987ms step_avg:76.42ms +[2025-09-02 08:01:58] [Rank 0] step:9121/10000 train_time:696987ms step_avg:76.42ms +[2025-09-02 08:02:00] [Rank 0] step:9141/10000 train_time:698614ms step_avg:76.43ms +[2025-09-02 08:02:00] [Rank 0] step:9141/10000 train_time:698614ms step_avg:76.43ms +[2025-09-02 08:02:01] [Rank 0] step:9161/10000 train_time:700241ms step_avg:76.44ms +[2025-09-02 08:02:01] [Rank 0] step:9161/10000 train_time:700241ms step_avg:76.44ms +[2025-09-02 08:02:03] [Rank 0] step:9181/10000 train_time:701909ms step_avg:76.45ms +[2025-09-02 08:02:03] [Rank 0] step:9181/10000 train_time:701909ms step_avg:76.45ms +[2025-09-02 08:02:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:02:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:02:16] [Rank 0] PRINT: step:9200/10000 val_loss:3.8678 svd_entropy: attn_qk:H=0.7583,top10E=0.25,eRank=160.5,q75/q25=112.75 attn_vo:H=0.8416,top10E=0.14,eRank=293.1,q75/q25=49.48 mlp_w1:H=0.7523,top10E=0.30,eRank=176.0,q75/q25=19.86 mlp_w2:H=0.8368,top10E=0.13,eRank=280.6,q75/q25=58.69 vo_prod:H=0.7581,top10E=0.22,eRank=162.6,q75/q25=1852.98 train_time:703702ms step_avg:76.49ms +[2025-09-02 08:02:16] [Rank 0] PRINT: step:9200/10000 val_loss:3.8678 svd_entropy: attn_qk:H=0.7583,top10E=0.25,eRank=160.5,q75/q25=112.75 attn_vo:H=0.8416,top10E=0.14,eRank=293.1,q75/q25=49.48 mlp_w1:H=0.7523,top10E=0.30,eRank=176.0,q75/q25=19.86 mlp_w2:H=0.8368,top10E=0.13,eRank=280.6,q75/q25=58.69 vo_prod:H=0.7581,top10E=0.22,eRank=162.6,q75/q25=1852.98 train_time:703702ms step_avg:76.49ms +[2025-09-02 08:02:17] [Rank 0] step:9201/10000 train_time:703713ms step_avg:76.48ms +[2025-09-02 08:02:17] [Rank 0] step:9201/10000 train_time:703713ms step_avg:76.48ms +[2025-09-02 08:02:18] [Rank 0] step:9221/10000 train_time:705209ms step_avg:76.48ms +[2025-09-02 08:02:18] [Rank 0] step:9221/10000 train_time:705209ms step_avg:76.48ms +[2025-09-02 08:02:20] [Rank 0] step:9241/10000 train_time:706851ms step_avg:76.49ms +[2025-09-02 08:02:20] [Rank 0] step:9241/10000 train_time:706851ms step_avg:76.49ms +[2025-09-02 08:02:21] [Rank 0] step:9261/10000 train_time:708497ms step_avg:76.50ms +[2025-09-02 08:02:21] [Rank 0] step:9261/10000 train_time:708497ms step_avg:76.50ms +[2025-09-02 08:02:23] [Rank 0] step:9281/10000 train_time:710122ms step_avg:76.51ms +[2025-09-02 08:02:23] [Rank 0] step:9281/10000 train_time:710122ms step_avg:76.51ms +[2025-09-02 08:02:25] [Rank 0] step:9301/10000 train_time:711756ms step_avg:76.52ms +[2025-09-02 08:02:25] [Rank 0] step:9301/10000 train_time:711756ms step_avg:76.52ms +[2025-09-02 08:02:26] [Rank 0] step:9321/10000 train_time:713392ms step_avg:76.54ms +[2025-09-02 08:02:26] [Rank 0] step:9321/10000 train_time:713392ms step_avg:76.54ms +[2025-09-02 08:02:28] [Rank 0] step:9341/10000 train_time:715023ms step_avg:76.55ms +[2025-09-02 08:02:28] [Rank 0] step:9341/10000 train_time:715023ms step_avg:76.55ms +[2025-09-02 08:02:30] [Rank 0] step:9361/10000 train_time:716662ms step_avg:76.56ms +[2025-09-02 08:02:30] [Rank 0] step:9361/10000 train_time:716662ms step_avg:76.56ms +[2025-09-02 08:02:31] [Rank 0] step:9381/10000 train_time:718306ms step_avg:76.57ms +[2025-09-02 08:02:31] [Rank 0] step:9381/10000 train_time:718306ms step_avg:76.57ms +[2025-09-02 08:02:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:02:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:02:45] [Rank 0] PRINT: step:9400/10000 val_loss:3.8602 svd_entropy: attn_qk:H=0.7587,top10E=0.25,eRank=161.0,q75/q25=113.16 attn_vo:H=0.8420,top10E=0.14,eRank=293.6,q75/q25=49.10 mlp_w1:H=0.7528,top10E=0.30,eRank=176.6,q75/q25=19.93 mlp_w2:H=0.8370,top10E=0.13,eRank=281.1,q75/q25=58.87 vo_prod:H=0.7586,top10E=0.22,eRank=163.1,q75/q25=1801.75 train_time:720110ms step_avg:76.61ms +[2025-09-02 08:02:45] [Rank 0] PRINT: step:9400/10000 val_loss:3.8602 svd_entropy: attn_qk:H=0.7587,top10E=0.25,eRank=161.0,q75/q25=113.16 attn_vo:H=0.8420,top10E=0.14,eRank=293.6,q75/q25=49.10 mlp_w1:H=0.7528,top10E=0.30,eRank=176.6,q75/q25=19.93 mlp_w2:H=0.8370,top10E=0.13,eRank=281.1,q75/q25=58.87 vo_prod:H=0.7586,top10E=0.22,eRank=163.1,q75/q25=1801.75 train_time:720110ms step_avg:76.61ms +[2025-09-02 08:02:45] [Rank 0] step:9401/10000 train_time:720121ms step_avg:76.60ms +[2025-09-02 08:02:45] [Rank 0] step:9401/10000 train_time:720121ms step_avg:76.60ms +[2025-09-02 08:02:46] [Rank 0] step:9421/10000 train_time:721593ms step_avg:76.59ms +[2025-09-02 08:02:46] [Rank 0] step:9421/10000 train_time:721593ms step_avg:76.59ms +[2025-09-02 08:02:48] [Rank 0] step:9441/10000 train_time:723227ms step_avg:76.60ms +[2025-09-02 08:02:48] [Rank 0] step:9441/10000 train_time:723227ms step_avg:76.60ms +[2025-09-02 08:02:50] [Rank 0] step:9461/10000 train_time:724873ms step_avg:76.62ms +[2025-09-02 08:02:50] [Rank 0] step:9461/10000 train_time:724873ms step_avg:76.62ms +[2025-09-02 08:02:51] [Rank 0] step:9481/10000 train_time:726510ms step_avg:76.63ms +[2025-09-02 08:02:51] [Rank 0] step:9481/10000 train_time:726510ms step_avg:76.63ms +[2025-09-02 08:02:53] [Rank 0] step:9501/10000 train_time:728156ms step_avg:76.64ms +[2025-09-02 08:02:53] [Rank 0] step:9501/10000 train_time:728156ms step_avg:76.64ms +[2025-09-02 08:02:55] [Rank 0] step:9521/10000 train_time:729784ms step_avg:76.65ms +[2025-09-02 08:02:55] [Rank 0] step:9521/10000 train_time:729784ms step_avg:76.65ms +[2025-09-02 08:02:56] [Rank 0] step:9541/10000 train_time:731419ms step_avg:76.66ms +[2025-09-02 08:02:56] [Rank 0] step:9541/10000 train_time:731419ms step_avg:76.66ms +[2025-09-02 08:02:58] [Rank 0] step:9561/10000 train_time:733046ms step_avg:76.67ms +[2025-09-02 08:02:58] [Rank 0] step:9561/10000 train_time:733046ms step_avg:76.67ms +[2025-09-02 08:03:00] [Rank 0] step:9581/10000 train_time:734682ms step_avg:76.68ms +[2025-09-02 08:03:00] [Rank 0] step:9581/10000 train_time:734682ms step_avg:76.68ms +[2025-09-02 08:03:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:03:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:03:13] [Rank 0] PRINT: step:9600/10000 val_loss:3.8539 svd_entropy: attn_qk:H=0.7591,top10E=0.25,eRank=161.3,q75/q25=113.16 attn_vo:H=0.8422,top10E=0.14,eRank=294.0,q75/q25=48.90 mlp_w1:H=0.7533,top10E=0.30,eRank=177.1,q75/q25=19.96 mlp_w2:H=0.8372,top10E=0.13,eRank=281.5,q75/q25=58.86 vo_prod:H=0.7591,top10E=0.22,eRank=163.6,q75/q25=1770.19 train_time:736488ms step_avg:76.72ms +[2025-09-02 08:03:13] [Rank 0] PRINT: step:9600/10000 val_loss:3.8539 svd_entropy: attn_qk:H=0.7591,top10E=0.25,eRank=161.3,q75/q25=113.16 attn_vo:H=0.8422,top10E=0.14,eRank=294.0,q75/q25=48.90 mlp_w1:H=0.7533,top10E=0.30,eRank=177.1,q75/q25=19.96 mlp_w2:H=0.8372,top10E=0.13,eRank=281.5,q75/q25=58.86 vo_prod:H=0.7591,top10E=0.22,eRank=163.6,q75/q25=1770.19 train_time:736488ms step_avg:76.72ms +[2025-09-02 08:03:13] [Rank 0] step:9601/10000 train_time:736501ms step_avg:76.71ms +[2025-09-02 08:03:13] [Rank 0] step:9601/10000 train_time:736501ms step_avg:76.71ms +[2025-09-02 08:03:15] [Rank 0] step:9621/10000 train_time:737983ms step_avg:76.71ms +[2025-09-02 08:03:15] [Rank 0] step:9621/10000 train_time:737983ms step_avg:76.71ms +[2025-09-02 08:03:16] [Rank 0] step:9641/10000 train_time:739618ms step_avg:76.72ms +[2025-09-02 08:03:16] [Rank 0] step:9641/10000 train_time:739618ms step_avg:76.72ms +[2025-09-02 08:03:18] [Rank 0] step:9661/10000 train_time:741280ms step_avg:76.73ms +[2025-09-02 08:03:18] [Rank 0] step:9661/10000 train_time:741280ms step_avg:76.73ms +[2025-09-02 08:03:19] [Rank 0] step:9681/10000 train_time:742934ms step_avg:76.74ms +[2025-09-02 08:03:19] [Rank 0] step:9681/10000 train_time:742934ms step_avg:76.74ms +[2025-09-02 08:03:21] [Rank 0] step:9701/10000 train_time:744603ms step_avg:76.76ms +[2025-09-02 08:03:21] [Rank 0] step:9701/10000 train_time:744603ms step_avg:76.76ms +[2025-09-02 08:03:23] [Rank 0] step:9721/10000 train_time:746254ms step_avg:76.77ms +[2025-09-02 08:03:23] [Rank 0] step:9721/10000 train_time:746254ms step_avg:76.77ms +[2025-09-02 08:03:24] [Rank 0] step:9741/10000 train_time:747933ms step_avg:76.78ms +[2025-09-02 08:03:24] [Rank 0] step:9741/10000 train_time:747933ms step_avg:76.78ms +[2025-09-02 08:03:26] [Rank 0] step:9761/10000 train_time:749592ms step_avg:76.79ms +[2025-09-02 08:03:26] [Rank 0] step:9761/10000 train_time:749592ms step_avg:76.79ms +[2025-09-02 08:03:28] [Rank 0] step:9781/10000 train_time:751264ms step_avg:76.81ms +[2025-09-02 08:03:28] [Rank 0] step:9781/10000 train_time:751264ms step_avg:76.81ms +[2025-09-02 08:03:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:03:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:03:41] [Rank 0] PRINT: step:9800/10000 val_loss:3.8477 svd_entropy: attn_qk:H=0.7593,top10E=0.25,eRank=161.5,q75/q25=113.03 attn_vo:H=0.8424,top10E=0.14,eRank=294.3,q75/q25=48.63 mlp_w1:H=0.7536,top10E=0.29,eRank=177.4,q75/q25=19.97 mlp_w2:H=0.8374,top10E=0.13,eRank=281.8,q75/q25=58.74 vo_prod:H=0.7594,top10E=0.22,eRank=163.9,q75/q25=1729.36 train_time:753102ms step_avg:76.85ms +[2025-09-02 08:03:41] [Rank 0] PRINT: step:9800/10000 val_loss:3.8477 svd_entropy: attn_qk:H=0.7593,top10E=0.25,eRank=161.5,q75/q25=113.03 attn_vo:H=0.8424,top10E=0.14,eRank=294.3,q75/q25=48.63 mlp_w1:H=0.7536,top10E=0.29,eRank=177.4,q75/q25=19.97 mlp_w2:H=0.8374,top10E=0.13,eRank=281.8,q75/q25=58.74 vo_prod:H=0.7594,top10E=0.22,eRank=163.9,q75/q25=1729.36 train_time:753102ms step_avg:76.85ms +[2025-09-02 08:03:41] [Rank 0] step:9801/10000 train_time:753114ms step_avg:76.84ms +[2025-09-02 08:03:41] [Rank 0] step:9801/10000 train_time:753114ms step_avg:76.84ms +[2025-09-02 08:03:43] [Rank 0] step:9821/10000 train_time:754621ms step_avg:76.84ms +[2025-09-02 08:03:43] [Rank 0] step:9821/10000 train_time:754621ms step_avg:76.84ms +[2025-09-02 08:03:45] [Rank 0] step:9841/10000 train_time:756293ms step_avg:76.85ms +[2025-09-02 08:03:45] [Rank 0] step:9841/10000 train_time:756293ms step_avg:76.85ms +[2025-09-02 08:03:46] [Rank 0] step:9861/10000 train_time:757943ms step_avg:76.86ms +[2025-09-02 08:03:46] [Rank 0] step:9861/10000 train_time:757943ms step_avg:76.86ms +[2025-09-02 08:03:48] [Rank 0] step:9881/10000 train_time:759592ms step_avg:76.87ms +[2025-09-02 08:03:48] [Rank 0] step:9881/10000 train_time:759592ms step_avg:76.87ms +[2025-09-02 08:03:50] [Rank 0] step:9901/10000 train_time:761256ms step_avg:76.89ms +[2025-09-02 08:03:50] [Rank 0] step:9901/10000 train_time:761256ms step_avg:76.89ms +[2025-09-02 08:03:51] [Rank 0] step:9921/10000 train_time:762913ms step_avg:76.90ms +[2025-09-02 08:03:51] [Rank 0] step:9921/10000 train_time:762913ms step_avg:76.90ms +[2025-09-02 08:03:53] [Rank 0] step:9941/10000 train_time:764641ms step_avg:76.92ms +[2025-09-02 08:03:53] [Rank 0] step:9941/10000 train_time:764641ms step_avg:76.92ms +[2025-09-02 08:03:55] [Rank 0] step:9961/10000 train_time:766300ms step_avg:76.93ms +[2025-09-02 08:03:55] [Rank 0] step:9961/10000 train_time:766300ms step_avg:76.93ms +[2025-09-02 08:03:56] [Rank 0] step:9981/10000 train_time:767958ms step_avg:76.94ms +[2025-09-02 08:03:56] [Rank 0] step:9981/10000 train_time:767958ms step_avg:76.94ms +[2025-09-02 08:03:58] [Rank 0] step:10000/10000 train_time:769541ms step_avg:76.95ms +[2025-09-02 08:03:58] [Rank 0] step:10000/10000 train_time:769541ms step_avg:76.95ms +[2025-09-02 08:03:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:03:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:04:10] [Rank 0] PRINT: step:10000/10000 val_loss:3.8423 svd_entropy: attn_qk:H=0.7595,top10E=0.25,eRank=161.7,q75/q25=113.01 attn_vo:H=0.8426,top10E=0.14,eRank=294.6,q75/q25=48.52 mlp_w1:H=0.7539,top10E=0.29,eRank=177.7,q75/q25=19.98 mlp_w2:H=0.8375,top10E=0.13,eRank=282.0,q75/q25=58.67 vo_prod:H=0.7597,top10E=0.22,eRank=164.2,q75/q25=1723.83 train_time:769799ms step_avg:76.98ms +[2025-09-02 08:04:10] [Rank 0] PRINT: step:10000/10000 val_loss:3.8423 svd_entropy: attn_qk:H=0.7595,top10E=0.25,eRank=161.7,q75/q25=113.01 attn_vo:H=0.8426,top10E=0.14,eRank=294.6,q75/q25=48.52 mlp_w1:H=0.7539,top10E=0.29,eRank=177.7,q75/q25=19.98 mlp_w2:H=0.8375,top10E=0.13,eRank=282.0,q75/q25=58.67 vo_prod:H=0.7597,top10E=0.22,eRank=164.2,q75/q25=1723.83 train_time:769799ms step_avg:76.98ms +[2025-09-02 08:04:10] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 08:04:10 2025 --- +[2025-09-02 08:04:10] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 08:04:10 2025 --- +[2025-09-02 08:04:10] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14416 MiB +[2025-09-02 08:04:10] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14416 MiB diff --git a/logs_svd_qkvo/mode_15_param_qkvo_seed_44/config.json b/logs_svd_qkvo/mode_15_param_qkvo_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..de0479ba906044af03b7d323dd70b5bff9c38305 --- /dev/null +++ b/logs_svd_qkvo/mode_15_param_qkvo_seed_44/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 15, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "262232e2-0969-4135-8727-e6c4d994b499", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_15_param_qkvo_seed_44/training_log_262232e2-0969-4135-8727-e6c4d994b499.txt b/logs_svd_qkvo/mode_15_param_qkvo_seed_44/training_log_262232e2-0969-4135-8727-e6c4d994b499.txt new file mode 100644 index 0000000000000000000000000000000000000000..09366efffc7048eb2e38c218eadfe703754d2133 --- /dev/null +++ b/logs_svd_qkvo/mode_15_param_qkvo_seed_44/training_log_262232e2-0969-4135-8727-e6c4d994b499.txt @@ -0,0 +1,2984 @@ +[2025-09-02 08:53:01] [Rank 0] PRINT: --- Script Start: Tue Sep 2 08:53:01 2025 --- +[2025-09-02 08:53:01] [Rank 0] PRINT: --- Script Start: Tue Sep 2 08:53:01 2025 --- +[2025-09-02 08:53:01] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=15, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 08:53:01] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=15, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 08:53:01] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 08:53:01] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 08:53:01] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-02 08:53:01] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-02 08:53:01] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_15_param_qkvo_seed_44 +[2025-09-02 08:53:01] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_15_param_qkvo_seed_44 +[2025-09-02 08:53:01] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 08:53:01] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 08:53:01] [Rank 0] PRINT: Constructing model... +[2025-09-02 08:53:01] [Rank 0] PRINT: Constructing model... +[2025-09-02 08:53:03] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 08:53:03] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 08:53:03] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 08:53:03] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 08:53:03] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 08:53:03] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 08:53:03] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 15 +[2025-09-02 08:53:03] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 15 +[2025-09-02 08:53:03] [Rank 0] PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 08:53:03] [Rank 0] PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 08:53:03] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 08:53:03] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 08:53:03] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 08:53:03] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 08:53:03] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 08:53:03] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 08:53:03] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 08:53:03] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 08:53:03] [Rank 0] PRINT: Starting warmup... +[2025-09-02 08:53:03] [Rank 0] PRINT: Starting warmup... +[2025-09-02 08:53:43] [Rank 0] PRINT: Warmup complete. +[2025-09-02 08:53:43] [Rank 0] PRINT: Warmup complete. +[2025-09-02 08:53:44] [Rank 0] PRINT: Starting training... +[2025-09-02 08:53:44] [Rank 0] PRINT: Starting training... +[2025-09-02 08:53:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:53:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:53:59] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.6,q75/q25=10.29 attn_vo:H=0.4624,top10E=0.02,eRank=233.0,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.7,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 08:53:59] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.6,q75/q25=10.29 attn_vo:H=0.4624,top10E=0.02,eRank=233.0,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.7,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 08:54:01] [Rank 0] step:21/10000 train_time:1297ms step_avg:61.78ms +[2025-09-02 08:54:01] [Rank 0] step:21/10000 train_time:1297ms step_avg:61.78ms +[2025-09-02 08:54:02] [Rank 0] step:41/10000 train_time:2698ms step_avg:65.81ms +[2025-09-02 08:54:02] [Rank 0] step:41/10000 train_time:2698ms step_avg:65.81ms +[2025-09-02 08:54:04] [Rank 0] step:61/10000 train_time:4103ms step_avg:67.26ms +[2025-09-02 08:54:04] [Rank 0] step:61/10000 train_time:4103ms step_avg:67.26ms +[2025-09-02 08:54:05] [Rank 0] step:81/10000 train_time:5510ms step_avg:68.02ms +[2025-09-02 08:54:05] [Rank 0] step:81/10000 train_time:5510ms step_avg:68.02ms +[2025-09-02 08:54:06] [Rank 0] step:101/10000 train_time:6918ms step_avg:68.49ms +[2025-09-02 08:54:06] [Rank 0] step:101/10000 train_time:6918ms step_avg:68.49ms +[2025-09-02 08:54:08] [Rank 0] step:121/10000 train_time:8326ms step_avg:68.81ms +[2025-09-02 08:54:08] [Rank 0] step:121/10000 train_time:8326ms step_avg:68.81ms +[2025-09-02 08:54:09] [Rank 0] step:141/10000 train_time:9735ms step_avg:69.04ms +[2025-09-02 08:54:09] [Rank 0] step:141/10000 train_time:9735ms step_avg:69.04ms +[2025-09-02 08:54:11] [Rank 0] step:161/10000 train_time:11143ms step_avg:69.21ms +[2025-09-02 08:54:11] [Rank 0] step:161/10000 train_time:11143ms step_avg:69.21ms +[2025-09-02 08:54:12] [Rank 0] step:181/10000 train_time:12553ms step_avg:69.35ms +[2025-09-02 08:54:12] [Rank 0] step:181/10000 train_time:12553ms step_avg:69.35ms +[2025-09-02 08:54:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:54:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:54:25] [Rank 0] PRINT: step:200/10000 val_loss:6.5021 svd_entropy: attn_qk:H=0.4591,top10E=0.78,eRank=38.8,q75/q25=12.30 attn_vo:H=0.5451,top10E=0.64,eRank=109.7,q75/q25=103.99 mlp_w1:H=0.4531,top10E=0.71,eRank=41.6,q75/q25=2.66 mlp_w2:H=0.1477,top10E=0.95,eRank=4.9,q75/q25=416.58 vo_prod:H=0.2503,top10E=0.97,eRank=6.3,q75/q25=670.27 train_time:14106ms step_avg:70.53ms +[2025-09-02 08:54:25] [Rank 0] PRINT: step:200/10000 val_loss:6.5021 svd_entropy: attn_qk:H=0.4591,top10E=0.78,eRank=38.8,q75/q25=12.30 attn_vo:H=0.5451,top10E=0.64,eRank=109.7,q75/q25=103.99 mlp_w1:H=0.4531,top10E=0.71,eRank=41.6,q75/q25=2.66 mlp_w2:H=0.1477,top10E=0.95,eRank=4.9,q75/q25=416.58 vo_prod:H=0.2503,top10E=0.97,eRank=6.3,q75/q25=670.27 train_time:14106ms step_avg:70.53ms +[2025-09-02 08:54:25] [Rank 0] step:201/10000 train_time:14117ms step_avg:70.23ms +[2025-09-02 08:54:25] [Rank 0] step:201/10000 train_time:14117ms step_avg:70.23ms +[2025-09-02 08:54:27] [Rank 0] step:221/10000 train_time:15401ms step_avg:69.69ms +[2025-09-02 08:54:27] [Rank 0] step:221/10000 train_time:15401ms step_avg:69.69ms +[2025-09-02 08:54:28] [Rank 0] step:241/10000 train_time:16811ms step_avg:69.76ms +[2025-09-02 08:54:28] [Rank 0] step:241/10000 train_time:16811ms step_avg:69.76ms +[2025-09-02 08:54:29] [Rank 0] step:261/10000 train_time:18223ms step_avg:69.82ms +[2025-09-02 08:54:29] [Rank 0] step:261/10000 train_time:18223ms step_avg:69.82ms +[2025-09-02 08:54:31] [Rank 0] step:281/10000 train_time:19635ms step_avg:69.88ms +[2025-09-02 08:54:31] [Rank 0] step:281/10000 train_time:19635ms step_avg:69.88ms +[2025-09-02 08:54:32] [Rank 0] step:301/10000 train_time:21046ms step_avg:69.92ms +[2025-09-02 08:54:32] [Rank 0] step:301/10000 train_time:21046ms step_avg:69.92ms +[2025-09-02 08:54:34] [Rank 0] step:321/10000 train_time:22457ms step_avg:69.96ms +[2025-09-02 08:54:34] [Rank 0] step:321/10000 train_time:22457ms step_avg:69.96ms +[2025-09-02 08:54:35] [Rank 0] step:341/10000 train_time:23869ms step_avg:70.00ms +[2025-09-02 08:54:35] [Rank 0] step:341/10000 train_time:23869ms step_avg:70.00ms +[2025-09-02 08:54:37] [Rank 0] step:361/10000 train_time:25282ms step_avg:70.03ms +[2025-09-02 08:54:37] [Rank 0] step:361/10000 train_time:25282ms step_avg:70.03ms +[2025-09-02 08:54:38] [Rank 0] step:381/10000 train_time:26694ms step_avg:70.06ms +[2025-09-02 08:54:38] [Rank 0] step:381/10000 train_time:26694ms step_avg:70.06ms +[2025-09-02 08:54:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:54:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:54:51] [Rank 0] PRINT: step:400/10000 val_loss:6.0084 svd_entropy: attn_qk:H=0.5153,top10E=0.68,eRank=47.9,q75/q25=13.89 attn_vo:H=0.5813,top10E=0.54,eRank=89.2,q75/q25=40.42 mlp_w1:H=0.4770,top10E=0.66,eRank=52.8,q75/q25=3.26 mlp_w2:H=0.5607,top10E=0.58,eRank=43.0,q75/q25=12.17 vo_prod:H=0.4020,top10E=0.84,eRank=16.0,q75/q25=293.64 train_time:28247ms step_avg:70.62ms +[2025-09-02 08:54:51] [Rank 0] PRINT: step:400/10000 val_loss:6.0084 svd_entropy: attn_qk:H=0.5153,top10E=0.68,eRank=47.9,q75/q25=13.89 attn_vo:H=0.5813,top10E=0.54,eRank=89.2,q75/q25=40.42 mlp_w1:H=0.4770,top10E=0.66,eRank=52.8,q75/q25=3.26 mlp_w2:H=0.5607,top10E=0.58,eRank=43.0,q75/q25=12.17 vo_prod:H=0.4020,top10E=0.84,eRank=16.0,q75/q25=293.64 train_time:28247ms step_avg:70.62ms +[2025-09-02 08:54:51] [Rank 0] step:401/10000 train_time:28258ms step_avg:70.47ms +[2025-09-02 08:54:51] [Rank 0] step:401/10000 train_time:28258ms step_avg:70.47ms +[2025-09-02 08:54:53] [Rank 0] step:421/10000 train_time:29553ms step_avg:70.20ms +[2025-09-02 08:54:53] [Rank 0] step:421/10000 train_time:29553ms step_avg:70.20ms +[2025-09-02 08:54:54] [Rank 0] step:441/10000 train_time:30965ms step_avg:70.22ms +[2025-09-02 08:54:54] [Rank 0] step:441/10000 train_time:30965ms step_avg:70.22ms +[2025-09-02 08:54:55] [Rank 0] step:461/10000 train_time:32377ms step_avg:70.23ms +[2025-09-02 08:54:55] [Rank 0] step:461/10000 train_time:32377ms step_avg:70.23ms +[2025-09-02 08:54:57] [Rank 0] step:481/10000 train_time:33786ms step_avg:70.24ms +[2025-09-02 08:54:57] [Rank 0] step:481/10000 train_time:33786ms step_avg:70.24ms +[2025-09-02 08:54:58] [Rank 0] step:501/10000 train_time:35198ms step_avg:70.25ms +[2025-09-02 08:54:58] [Rank 0] step:501/10000 train_time:35198ms step_avg:70.25ms +[2025-09-02 08:55:00] [Rank 0] step:521/10000 train_time:36610ms step_avg:70.27ms +[2025-09-02 08:55:00] [Rank 0] step:521/10000 train_time:36610ms step_avg:70.27ms +[2025-09-02 08:55:01] [Rank 0] step:541/10000 train_time:38024ms step_avg:70.28ms +[2025-09-02 08:55:01] [Rank 0] step:541/10000 train_time:38024ms step_avg:70.28ms +[2025-09-02 08:55:02] [Rank 0] step:561/10000 train_time:39438ms step_avg:70.30ms +[2025-09-02 08:55:02] [Rank 0] step:561/10000 train_time:39438ms step_avg:70.30ms +[2025-09-02 08:55:04] [Rank 0] step:581/10000 train_time:40851ms step_avg:70.31ms +[2025-09-02 08:55:04] [Rank 0] step:581/10000 train_time:40851ms step_avg:70.31ms +[2025-09-02 08:55:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:55:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:55:17] [Rank 0] PRINT: step:600/10000 val_loss:5.7013 svd_entropy: attn_qk:H=0.5511,top10E=0.60,eRank=55.4,q75/q25=15.56 attn_vo:H=0.6195,top10E=0.44,eRank=98.8,q75/q25=29.66 mlp_w1:H=0.5119,top10E=0.61,eRank=61.6,q75/q25=3.68 mlp_w2:H=0.6485,top10E=0.44,eRank=75.8,q75/q25=9.24 vo_prod:H=0.4796,top10E=0.69,eRank=26.2,q75/q25=255.86 train_time:42406ms step_avg:70.68ms +[2025-09-02 08:55:17] [Rank 0] PRINT: step:600/10000 val_loss:5.7013 svd_entropy: attn_qk:H=0.5511,top10E=0.60,eRank=55.4,q75/q25=15.56 attn_vo:H=0.6195,top10E=0.44,eRank=98.8,q75/q25=29.66 mlp_w1:H=0.5119,top10E=0.61,eRank=61.6,q75/q25=3.68 mlp_w2:H=0.6485,top10E=0.44,eRank=75.8,q75/q25=9.24 vo_prod:H=0.4796,top10E=0.69,eRank=26.2,q75/q25=255.86 train_time:42406ms step_avg:70.68ms +[2025-09-02 08:55:17] [Rank 0] step:601/10000 train_time:42416ms step_avg:70.58ms +[2025-09-02 08:55:17] [Rank 0] step:601/10000 train_time:42416ms step_avg:70.58ms +[2025-09-02 08:55:18] [Rank 0] step:621/10000 train_time:43706ms step_avg:70.38ms +[2025-09-02 08:55:18] [Rank 0] step:621/10000 train_time:43706ms step_avg:70.38ms +[2025-09-02 08:55:20] [Rank 0] step:641/10000 train_time:45118ms step_avg:70.39ms +[2025-09-02 08:55:20] [Rank 0] step:641/10000 train_time:45118ms step_avg:70.39ms +[2025-09-02 08:55:21] [Rank 0] step:661/10000 train_time:46531ms step_avg:70.39ms +[2025-09-02 08:55:21] [Rank 0] step:661/10000 train_time:46531ms step_avg:70.39ms +[2025-09-02 08:55:23] [Rank 0] step:681/10000 train_time:47945ms step_avg:70.40ms +[2025-09-02 08:55:23] [Rank 0] step:681/10000 train_time:47945ms step_avg:70.40ms +[2025-09-02 08:55:24] [Rank 0] step:701/10000 train_time:49360ms step_avg:70.41ms +[2025-09-02 08:55:24] [Rank 0] step:701/10000 train_time:49360ms step_avg:70.41ms +[2025-09-02 08:55:25] [Rank 0] step:721/10000 train_time:50773ms step_avg:70.42ms +[2025-09-02 08:55:25] [Rank 0] step:721/10000 train_time:50773ms step_avg:70.42ms +[2025-09-02 08:55:27] [Rank 0] step:741/10000 train_time:52188ms step_avg:70.43ms +[2025-09-02 08:55:27] [Rank 0] step:741/10000 train_time:52188ms step_avg:70.43ms +[2025-09-02 08:55:28] [Rank 0] step:761/10000 train_time:53615ms step_avg:70.45ms +[2025-09-02 08:55:28] [Rank 0] step:761/10000 train_time:53615ms step_avg:70.45ms +[2025-09-02 08:55:30] [Rank 0] step:781/10000 train_time:55043ms step_avg:70.48ms +[2025-09-02 08:55:30] [Rank 0] step:781/10000 train_time:55043ms step_avg:70.48ms +[2025-09-02 08:55:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:55:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:55:43] [Rank 0] PRINT: step:800/10000 val_loss:5.4765 svd_entropy: attn_qk:H=0.5773,top10E=0.55,eRank=61.8,q75/q25=17.79 attn_vo:H=0.6488,top10E=0.39,eRank=109.4,q75/q25=29.07 mlp_w1:H=0.5435,top10E=0.58,eRank=69.4,q75/q25=4.02 mlp_w2:H=0.6981,top10E=0.36,eRank=104.5,q75/q25=8.76 vo_prod:H=0.5255,top10E=0.59,eRank=34.9,q75/q25=352.47 train_time:56614ms step_avg:70.77ms +[2025-09-02 08:55:43] [Rank 0] PRINT: step:800/10000 val_loss:5.4765 svd_entropy: attn_qk:H=0.5773,top10E=0.55,eRank=61.8,q75/q25=17.79 attn_vo:H=0.6488,top10E=0.39,eRank=109.4,q75/q25=29.07 mlp_w1:H=0.5435,top10E=0.58,eRank=69.4,q75/q25=4.02 mlp_w2:H=0.6981,top10E=0.36,eRank=104.5,q75/q25=8.76 vo_prod:H=0.5255,top10E=0.59,eRank=34.9,q75/q25=352.47 train_time:56614ms step_avg:70.77ms +[2025-09-02 08:55:43] [Rank 0] step:801/10000 train_time:56625ms step_avg:70.69ms +[2025-09-02 08:55:43] [Rank 0] step:801/10000 train_time:56625ms step_avg:70.69ms +[2025-09-02 08:55:44] [Rank 0] step:821/10000 train_time:57918ms step_avg:70.55ms +[2025-09-02 08:55:44] [Rank 0] step:821/10000 train_time:57918ms step_avg:70.55ms +[2025-09-02 08:55:46] [Rank 0] step:841/10000 train_time:59344ms step_avg:70.56ms +[2025-09-02 08:55:46] [Rank 0] step:841/10000 train_time:59344ms step_avg:70.56ms +[2025-09-02 08:55:47] [Rank 0] step:861/10000 train_time:60769ms step_avg:70.58ms +[2025-09-02 08:55:47] [Rank 0] step:861/10000 train_time:60769ms step_avg:70.58ms +[2025-09-02 08:55:49] [Rank 0] step:881/10000 train_time:62196ms step_avg:70.60ms +[2025-09-02 08:55:49] [Rank 0] step:881/10000 train_time:62196ms step_avg:70.60ms +[2025-09-02 08:55:50] [Rank 0] step:901/10000 train_time:63622ms step_avg:70.61ms +[2025-09-02 08:55:50] [Rank 0] step:901/10000 train_time:63622ms step_avg:70.61ms +[2025-09-02 08:55:51] [Rank 0] step:921/10000 train_time:65051ms step_avg:70.63ms +[2025-09-02 08:55:51] [Rank 0] step:921/10000 train_time:65051ms step_avg:70.63ms +[2025-09-02 08:55:53] [Rank 0] step:941/10000 train_time:66478ms step_avg:70.65ms +[2025-09-02 08:55:53] [Rank 0] step:941/10000 train_time:66478ms step_avg:70.65ms +[2025-09-02 08:55:54] [Rank 0] step:961/10000 train_time:67905ms step_avg:70.66ms +[2025-09-02 08:55:54] [Rank 0] step:961/10000 train_time:67905ms step_avg:70.66ms +[2025-09-02 08:55:56] [Rank 0] step:981/10000 train_time:69333ms step_avg:70.68ms +[2025-09-02 08:55:56] [Rank 0] step:981/10000 train_time:69333ms step_avg:70.68ms +[2025-09-02 08:55:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:55:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:56:09] [Rank 0] PRINT: step:1000/10000 val_loss:5.3221 svd_entropy: attn_qk:H=0.5989,top10E=0.50,eRank=68.2,q75/q25=20.38 attn_vo:H=0.6718,top10E=0.35,eRank=120.0,q75/q25=35.62 mlp_w1:H=0.5696,top10E=0.54,eRank=76.4,q75/q25=4.36 mlp_w2:H=0.7255,top10E=0.31,eRank=125.4,q75/q25=9.39 vo_prod:H=0.5556,top10E=0.53,eRank=42.2,q75/q25=724.15 train_time:70903ms step_avg:70.90ms +[2025-09-02 08:56:09] [Rank 0] PRINT: step:1000/10000 val_loss:5.3221 svd_entropy: attn_qk:H=0.5989,top10E=0.50,eRank=68.2,q75/q25=20.38 attn_vo:H=0.6718,top10E=0.35,eRank=120.0,q75/q25=35.62 mlp_w1:H=0.5696,top10E=0.54,eRank=76.4,q75/q25=4.36 mlp_w2:H=0.7255,top10E=0.31,eRank=125.4,q75/q25=9.39 vo_prod:H=0.5556,top10E=0.53,eRank=42.2,q75/q25=724.15 train_time:70903ms step_avg:70.90ms +[2025-09-02 08:56:09] [Rank 0] step:1001/10000 train_time:70914ms step_avg:70.84ms +[2025-09-02 08:56:09] [Rank 0] step:1001/10000 train_time:70914ms step_avg:70.84ms +[2025-09-02 08:56:10] [Rank 0] step:1021/10000 train_time:72217ms step_avg:70.73ms +[2025-09-02 08:56:10] [Rank 0] step:1021/10000 train_time:72217ms step_avg:70.73ms +[2025-09-02 08:56:12] [Rank 0] step:1041/10000 train_time:73641ms step_avg:70.74ms +[2025-09-02 08:56:12] [Rank 0] step:1041/10000 train_time:73641ms step_avg:70.74ms +[2025-09-02 08:56:13] [Rank 0] step:1061/10000 train_time:75068ms step_avg:70.75ms +[2025-09-02 08:56:13] [Rank 0] step:1061/10000 train_time:75068ms step_avg:70.75ms +[2025-09-02 08:56:15] [Rank 0] step:1081/10000 train_time:76494ms step_avg:70.76ms +[2025-09-02 08:56:15] [Rank 0] step:1081/10000 train_time:76494ms step_avg:70.76ms +[2025-09-02 08:56:16] [Rank 0] step:1101/10000 train_time:77921ms step_avg:70.77ms +[2025-09-02 08:56:16] [Rank 0] step:1101/10000 train_time:77921ms step_avg:70.77ms +[2025-09-02 08:56:18] [Rank 0] step:1121/10000 train_time:79347ms step_avg:70.78ms +[2025-09-02 08:56:18] [Rank 0] step:1121/10000 train_time:79347ms step_avg:70.78ms +[2025-09-02 08:56:19] [Rank 0] step:1141/10000 train_time:80775ms step_avg:70.79ms +[2025-09-02 08:56:19] [Rank 0] step:1141/10000 train_time:80775ms step_avg:70.79ms +[2025-09-02 08:56:20] [Rank 0] step:1161/10000 train_time:82203ms step_avg:70.80ms +[2025-09-02 08:56:20] [Rank 0] step:1161/10000 train_time:82203ms step_avg:70.80ms +[2025-09-02 08:56:22] [Rank 0] step:1181/10000 train_time:83631ms step_avg:70.81ms +[2025-09-02 08:56:22] [Rank 0] step:1181/10000 train_time:83631ms step_avg:70.81ms +[2025-09-02 08:56:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:56:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:56:35] [Rank 0] PRINT: step:1200/10000 val_loss:5.1801 svd_entropy: attn_qk:H=0.6170,top10E=0.47,eRank=74.5,q75/q25=23.71 attn_vo:H=0.6914,top10E=0.32,eRank=130.7,q75/q25=47.49 mlp_w1:H=0.5908,top10E=0.52,eRank=82.9,q75/q25=4.70 mlp_w2:H=0.7469,top10E=0.28,eRank=144.6,q75/q25=10.65 vo_prod:H=0.5797,top10E=0.48,eRank=49.4,q75/q25=1583.82 train_time:85201ms step_avg:71.00ms +[2025-09-02 08:56:35] [Rank 0] PRINT: step:1200/10000 val_loss:5.1801 svd_entropy: attn_qk:H=0.6170,top10E=0.47,eRank=74.5,q75/q25=23.71 attn_vo:H=0.6914,top10E=0.32,eRank=130.7,q75/q25=47.49 mlp_w1:H=0.5908,top10E=0.52,eRank=82.9,q75/q25=4.70 mlp_w2:H=0.7469,top10E=0.28,eRank=144.6,q75/q25=10.65 vo_prod:H=0.5797,top10E=0.48,eRank=49.4,q75/q25=1583.82 train_time:85201ms step_avg:71.00ms +[2025-09-02 08:56:35] [Rank 0] step:1201/10000 train_time:85212ms step_avg:70.95ms +[2025-09-02 08:56:35] [Rank 0] step:1201/10000 train_time:85212ms step_avg:70.95ms +[2025-09-02 08:56:37] [Rank 0] step:1221/10000 train_time:86506ms step_avg:70.85ms +[2025-09-02 08:56:37] [Rank 0] step:1221/10000 train_time:86506ms step_avg:70.85ms +[2025-09-02 08:56:38] [Rank 0] step:1241/10000 train_time:87931ms step_avg:70.85ms +[2025-09-02 08:56:38] [Rank 0] step:1241/10000 train_time:87931ms step_avg:70.85ms +[2025-09-02 08:56:39] [Rank 0] step:1261/10000 train_time:89358ms step_avg:70.86ms +[2025-09-02 08:56:39] [Rank 0] step:1261/10000 train_time:89358ms step_avg:70.86ms +[2025-09-02 08:56:41] [Rank 0] step:1281/10000 train_time:90785ms step_avg:70.87ms +[2025-09-02 08:56:41] [Rank 0] step:1281/10000 train_time:90785ms step_avg:70.87ms +[2025-09-02 08:56:42] [Rank 0] step:1301/10000 train_time:92213ms step_avg:70.88ms +[2025-09-02 08:56:42] [Rank 0] step:1301/10000 train_time:92213ms step_avg:70.88ms +[2025-09-02 08:56:44] [Rank 0] step:1321/10000 train_time:93639ms step_avg:70.89ms +[2025-09-02 08:56:44] [Rank 0] step:1321/10000 train_time:93639ms step_avg:70.89ms +[2025-09-02 08:56:45] [Rank 0] step:1341/10000 train_time:95066ms step_avg:70.89ms +[2025-09-02 08:56:45] [Rank 0] step:1341/10000 train_time:95066ms step_avg:70.89ms +[2025-09-02 08:56:47] [Rank 0] step:1361/10000 train_time:96492ms step_avg:70.90ms +[2025-09-02 08:56:47] [Rank 0] step:1361/10000 train_time:96492ms step_avg:70.90ms +[2025-09-02 08:56:48] [Rank 0] step:1381/10000 train_time:97920ms step_avg:70.90ms +[2025-09-02 08:56:48] [Rank 0] step:1381/10000 train_time:97920ms step_avg:70.90ms +[2025-09-02 08:56:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:56:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:57:01] [Rank 0] PRINT: step:1400/10000 val_loss:5.0661 svd_entropy: attn_qk:H=0.6316,top10E=0.44,eRank=80.4,q75/q25=28.22 attn_vo:H=0.7077,top10E=0.29,eRank=140.9,q75/q25=60.67 mlp_w1:H=0.6107,top10E=0.49,eRank=89.7,q75/q25=5.13 mlp_w2:H=0.7638,top10E=0.25,eRank=161.9,q75/q25=12.14 vo_prod:H=0.5977,top10E=0.44,eRank=55.6,q75/q25=2898.68 train_time:99490ms step_avg:71.06ms +[2025-09-02 08:57:01] [Rank 0] PRINT: step:1400/10000 val_loss:5.0661 svd_entropy: attn_qk:H=0.6316,top10E=0.44,eRank=80.4,q75/q25=28.22 attn_vo:H=0.7077,top10E=0.29,eRank=140.9,q75/q25=60.67 mlp_w1:H=0.6107,top10E=0.49,eRank=89.7,q75/q25=5.13 mlp_w2:H=0.7638,top10E=0.25,eRank=161.9,q75/q25=12.14 vo_prod:H=0.5977,top10E=0.44,eRank=55.6,q75/q25=2898.68 train_time:99490ms step_avg:71.06ms +[2025-09-02 08:57:01] [Rank 0] step:1401/10000 train_time:99501ms step_avg:71.02ms +[2025-09-02 08:57:01] [Rank 0] step:1401/10000 train_time:99501ms step_avg:71.02ms +[2025-09-02 08:57:03] [Rank 0] step:1421/10000 train_time:100800ms step_avg:70.94ms +[2025-09-02 08:57:03] [Rank 0] step:1421/10000 train_time:100800ms step_avg:70.94ms +[2025-09-02 08:57:04] [Rank 0] step:1441/10000 train_time:102228ms step_avg:70.94ms +[2025-09-02 08:57:04] [Rank 0] step:1441/10000 train_time:102228ms step_avg:70.94ms +[2025-09-02 08:57:06] [Rank 0] step:1461/10000 train_time:103655ms step_avg:70.95ms +[2025-09-02 08:57:06] [Rank 0] step:1461/10000 train_time:103655ms step_avg:70.95ms +[2025-09-02 08:57:07] [Rank 0] step:1481/10000 train_time:105083ms step_avg:70.95ms +[2025-09-02 08:57:07] [Rank 0] step:1481/10000 train_time:105083ms step_avg:70.95ms +[2025-09-02 08:57:09] [Rank 0] step:1501/10000 train_time:106520ms step_avg:70.97ms +[2025-09-02 08:57:09] [Rank 0] step:1501/10000 train_time:106520ms step_avg:70.97ms +[2025-09-02 08:57:10] [Rank 0] step:1521/10000 train_time:107959ms step_avg:70.98ms +[2025-09-02 08:57:10] [Rank 0] step:1521/10000 train_time:107959ms step_avg:70.98ms +[2025-09-02 08:57:11] [Rank 0] step:1541/10000 train_time:109397ms step_avg:70.99ms +[2025-09-02 08:57:11] [Rank 0] step:1541/10000 train_time:109397ms step_avg:70.99ms +[2025-09-02 08:57:13] [Rank 0] step:1561/10000 train_time:110837ms step_avg:71.00ms +[2025-09-02 08:57:13] [Rank 0] step:1561/10000 train_time:110837ms step_avg:71.00ms +[2025-09-02 08:57:14] [Rank 0] step:1581/10000 train_time:112278ms step_avg:71.02ms +[2025-09-02 08:57:14] [Rank 0] step:1581/10000 train_time:112278ms step_avg:71.02ms +[2025-09-02 08:57:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:57:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:57:27] [Rank 0] PRINT: step:1600/10000 val_loss:4.9279 svd_entropy: attn_qk:H=0.6433,top10E=0.42,eRank=85.2,q75/q25=33.49 attn_vo:H=0.7216,top10E=0.27,eRank=150.8,q75/q25=73.22 mlp_w1:H=0.6278,top10E=0.47,eRank=96.4,q75/q25=5.59 mlp_w2:H=0.7775,top10E=0.23,eRank=177.3,q75/q25=13.69 vo_prod:H=0.6125,top10E=0.42,eRank=61.2,q75/q25=4769.61 train_time:113864ms step_avg:71.17ms +[2025-09-02 08:57:27] [Rank 0] PRINT: step:1600/10000 val_loss:4.9279 svd_entropy: attn_qk:H=0.6433,top10E=0.42,eRank=85.2,q75/q25=33.49 attn_vo:H=0.7216,top10E=0.27,eRank=150.8,q75/q25=73.22 mlp_w1:H=0.6278,top10E=0.47,eRank=96.4,q75/q25=5.59 mlp_w2:H=0.7775,top10E=0.23,eRank=177.3,q75/q25=13.69 vo_prod:H=0.6125,top10E=0.42,eRank=61.2,q75/q25=4769.61 train_time:113864ms step_avg:71.17ms +[2025-09-02 08:57:28] [Rank 0] step:1601/10000 train_time:113876ms step_avg:71.13ms +[2025-09-02 08:57:28] [Rank 0] step:1601/10000 train_time:113876ms step_avg:71.13ms +[2025-09-02 08:57:29] [Rank 0] step:1621/10000 train_time:115180ms step_avg:71.05ms +[2025-09-02 08:57:29] [Rank 0] step:1621/10000 train_time:115180ms step_avg:71.05ms +[2025-09-02 08:57:30] [Rank 0] step:1641/10000 train_time:116617ms step_avg:71.06ms +[2025-09-02 08:57:30] [Rank 0] step:1641/10000 train_time:116617ms step_avg:71.06ms +[2025-09-02 08:57:32] [Rank 0] step:1661/10000 train_time:118055ms step_avg:71.07ms +[2025-09-02 08:57:32] [Rank 0] step:1661/10000 train_time:118055ms step_avg:71.07ms +[2025-09-02 08:57:33] [Rank 0] step:1681/10000 train_time:119493ms step_avg:71.08ms +[2025-09-02 08:57:33] [Rank 0] step:1681/10000 train_time:119493ms step_avg:71.08ms +[2025-09-02 08:57:35] [Rank 0] step:1701/10000 train_time:120931ms step_avg:71.09ms +[2025-09-02 08:57:35] [Rank 0] step:1701/10000 train_time:120931ms step_avg:71.09ms +[2025-09-02 08:57:36] [Rank 0] step:1721/10000 train_time:122369ms step_avg:71.10ms +[2025-09-02 08:57:36] [Rank 0] step:1721/10000 train_time:122369ms step_avg:71.10ms +[2025-09-02 08:57:38] [Rank 0] step:1741/10000 train_time:123807ms step_avg:71.11ms +[2025-09-02 08:57:38] [Rank 0] step:1741/10000 train_time:123807ms step_avg:71.11ms +[2025-09-02 08:57:39] [Rank 0] step:1761/10000 train_time:125244ms step_avg:71.12ms +[2025-09-02 08:57:39] [Rank 0] step:1761/10000 train_time:125244ms step_avg:71.12ms +[2025-09-02 08:57:41] [Rank 0] step:1781/10000 train_time:126682ms step_avg:71.13ms +[2025-09-02 08:57:41] [Rank 0] step:1781/10000 train_time:126682ms step_avg:71.13ms +[2025-09-02 08:57:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:57:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:57:54] [Rank 0] PRINT: step:1800/10000 val_loss:4.8153 svd_entropy: attn_qk:H=0.6532,top10E=0.40,eRank=89.7,q75/q25=39.20 attn_vo:H=0.7336,top10E=0.26,eRank=160.2,q75/q25=83.90 mlp_w1:H=0.6438,top10E=0.45,eRank=103.2,q75/q25=6.10 mlp_w2:H=0.7894,top10E=0.21,eRank=192.2,q75/q25=14.92 vo_prod:H=0.6255,top10E=0.40,eRank=66.7,q75/q25=6845.70 train_time:128265ms step_avg:71.26ms +[2025-09-02 08:57:54] [Rank 0] PRINT: step:1800/10000 val_loss:4.8153 svd_entropy: attn_qk:H=0.6532,top10E=0.40,eRank=89.7,q75/q25=39.20 attn_vo:H=0.7336,top10E=0.26,eRank=160.2,q75/q25=83.90 mlp_w1:H=0.6438,top10E=0.45,eRank=103.2,q75/q25=6.10 mlp_w2:H=0.7894,top10E=0.21,eRank=192.2,q75/q25=14.92 vo_prod:H=0.6255,top10E=0.40,eRank=66.7,q75/q25=6845.70 train_time:128265ms step_avg:71.26ms +[2025-09-02 08:57:54] [Rank 0] step:1801/10000 train_time:128277ms step_avg:71.23ms +[2025-09-02 08:57:54] [Rank 0] step:1801/10000 train_time:128277ms step_avg:71.23ms +[2025-09-02 08:57:55] [Rank 0] step:1821/10000 train_time:129579ms step_avg:71.16ms +[2025-09-02 08:57:55] [Rank 0] step:1821/10000 train_time:129579ms step_avg:71.16ms +[2025-09-02 08:57:57] [Rank 0] step:1841/10000 train_time:131015ms step_avg:71.17ms +[2025-09-02 08:57:57] [Rank 0] step:1841/10000 train_time:131015ms step_avg:71.17ms +[2025-09-02 08:57:58] [Rank 0] step:1861/10000 train_time:132453ms step_avg:71.17ms +[2025-09-02 08:57:58] [Rank 0] step:1861/10000 train_time:132453ms step_avg:71.17ms +[2025-09-02 08:58:00] [Rank 0] step:1881/10000 train_time:133891ms step_avg:71.18ms +[2025-09-02 08:58:00] [Rank 0] step:1881/10000 train_time:133891ms step_avg:71.18ms +[2025-09-02 08:58:01] [Rank 0] step:1901/10000 train_time:135328ms step_avg:71.19ms +[2025-09-02 08:58:01] [Rank 0] step:1901/10000 train_time:135328ms step_avg:71.19ms +[2025-09-02 08:58:02] [Rank 0] step:1921/10000 train_time:136766ms step_avg:71.20ms +[2025-09-02 08:58:02] [Rank 0] step:1921/10000 train_time:136766ms step_avg:71.20ms +[2025-09-02 08:58:04] [Rank 0] step:1941/10000 train_time:138205ms step_avg:71.20ms +[2025-09-02 08:58:04] [Rank 0] step:1941/10000 train_time:138205ms step_avg:71.20ms +[2025-09-02 08:58:05] [Rank 0] step:1961/10000 train_time:139643ms step_avg:71.21ms +[2025-09-02 08:58:05] [Rank 0] step:1961/10000 train_time:139643ms step_avg:71.21ms +[2025-09-02 08:58:07] [Rank 0] step:1981/10000 train_time:141082ms step_avg:71.22ms +[2025-09-02 08:58:07] [Rank 0] step:1981/10000 train_time:141082ms step_avg:71.22ms +[2025-09-02 08:58:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:58:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:58:20] [Rank 0] PRINT: step:2000/10000 val_loss:4.7424 svd_entropy: attn_qk:H=0.6621,top10E=0.39,eRank=94.0,q75/q25=45.11 attn_vo:H=0.7436,top10E=0.24,eRank=168.6,q75/q25=92.29 mlp_w1:H=0.6568,top10E=0.43,eRank=109.5,q75/q25=6.63 mlp_w2:H=0.7979,top10E=0.20,eRank=203.6,q75/q25=16.22 vo_prod:H=0.6365,top10E=0.38,eRank=71.7,q75/q25=9053.89 train_time:142666ms step_avg:71.33ms +[2025-09-02 08:58:20] [Rank 0] PRINT: step:2000/10000 val_loss:4.7424 svd_entropy: attn_qk:H=0.6621,top10E=0.39,eRank=94.0,q75/q25=45.11 attn_vo:H=0.7436,top10E=0.24,eRank=168.6,q75/q25=92.29 mlp_w1:H=0.6568,top10E=0.43,eRank=109.5,q75/q25=6.63 mlp_w2:H=0.7979,top10E=0.20,eRank=203.6,q75/q25=16.22 vo_prod:H=0.6365,top10E=0.38,eRank=71.7,q75/q25=9053.89 train_time:142666ms step_avg:71.33ms +[2025-09-02 08:58:20] [Rank 0] step:2001/10000 train_time:142677ms step_avg:71.30ms +[2025-09-02 08:58:20] [Rank 0] step:2001/10000 train_time:142677ms step_avg:71.30ms +[2025-09-02 08:58:21] [Rank 0] step:2021/10000 train_time:143979ms step_avg:71.24ms +[2025-09-02 08:58:21] [Rank 0] step:2021/10000 train_time:143979ms step_avg:71.24ms +[2025-09-02 08:58:23] [Rank 0] step:2041/10000 train_time:145542ms step_avg:71.31ms +[2025-09-02 08:58:23] [Rank 0] step:2041/10000 train_time:145542ms step_avg:71.31ms +[2025-09-02 08:58:24] [Rank 0] step:2061/10000 train_time:146977ms step_avg:71.31ms +[2025-09-02 08:58:24] [Rank 0] step:2061/10000 train_time:146977ms step_avg:71.31ms +[2025-09-02 08:58:26] [Rank 0] step:2081/10000 train_time:148414ms step_avg:71.32ms +[2025-09-02 08:58:26] [Rank 0] step:2081/10000 train_time:148414ms step_avg:71.32ms +[2025-09-02 08:58:27] [Rank 0] step:2101/10000 train_time:149850ms step_avg:71.32ms +[2025-09-02 08:58:27] [Rank 0] step:2101/10000 train_time:149850ms step_avg:71.32ms +[2025-09-02 08:58:29] [Rank 0] step:2121/10000 train_time:151286ms step_avg:71.33ms +[2025-09-02 08:58:29] [Rank 0] step:2121/10000 train_time:151286ms step_avg:71.33ms +[2025-09-02 08:58:30] [Rank 0] step:2141/10000 train_time:152724ms step_avg:71.33ms +[2025-09-02 08:58:30] [Rank 0] step:2141/10000 train_time:152724ms step_avg:71.33ms +[2025-09-02 08:58:32] [Rank 0] step:2161/10000 train_time:154161ms step_avg:71.34ms +[2025-09-02 08:58:32] [Rank 0] step:2161/10000 train_time:154161ms step_avg:71.34ms +[2025-09-02 08:58:33] [Rank 0] step:2181/10000 train_time:155600ms step_avg:71.34ms +[2025-09-02 08:58:33] [Rank 0] step:2181/10000 train_time:155600ms step_avg:71.34ms +[2025-09-02 08:58:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:58:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:58:46] [Rank 0] PRINT: step:2200/10000 val_loss:4.6583 svd_entropy: attn_qk:H=0.6694,top10E=0.37,eRank=97.8,q75/q25=50.97 attn_vo:H=0.7520,top10E=0.23,eRank=176.2,q75/q25=97.35 mlp_w1:H=0.6684,top10E=0.42,eRank=115.5,q75/q25=7.16 mlp_w2:H=0.8049,top10E=0.19,eRank=213.6,q75/q25=17.34 vo_prod:H=0.6458,top10E=0.36,eRank=76.2,q75/q25=10771.32 train_time:157183ms step_avg:71.45ms +[2025-09-02 08:58:46] [Rank 0] PRINT: step:2200/10000 val_loss:4.6583 svd_entropy: attn_qk:H=0.6694,top10E=0.37,eRank=97.8,q75/q25=50.97 attn_vo:H=0.7520,top10E=0.23,eRank=176.2,q75/q25=97.35 mlp_w1:H=0.6684,top10E=0.42,eRank=115.5,q75/q25=7.16 mlp_w2:H=0.8049,top10E=0.19,eRank=213.6,q75/q25=17.34 vo_prod:H=0.6458,top10E=0.36,eRank=76.2,q75/q25=10771.32 train_time:157183ms step_avg:71.45ms +[2025-09-02 08:58:46] [Rank 0] step:2201/10000 train_time:157194ms step_avg:71.42ms +[2025-09-02 08:58:46] [Rank 0] step:2201/10000 train_time:157194ms step_avg:71.42ms +[2025-09-02 08:58:48] [Rank 0] step:2221/10000 train_time:158496ms step_avg:71.36ms +[2025-09-02 08:58:48] [Rank 0] step:2221/10000 train_time:158496ms step_avg:71.36ms +[2025-09-02 08:58:49] [Rank 0] step:2241/10000 train_time:159965ms step_avg:71.38ms +[2025-09-02 08:58:49] [Rank 0] step:2241/10000 train_time:159965ms step_avg:71.38ms +[2025-09-02 08:58:51] [Rank 0] step:2261/10000 train_time:161447ms step_avg:71.41ms +[2025-09-02 08:58:51] [Rank 0] step:2261/10000 train_time:161447ms step_avg:71.41ms +[2025-09-02 08:58:52] [Rank 0] step:2281/10000 train_time:162928ms step_avg:71.43ms +[2025-09-02 08:58:52] [Rank 0] step:2281/10000 train_time:162928ms step_avg:71.43ms +[2025-09-02 08:58:54] [Rank 0] step:2301/10000 train_time:164412ms step_avg:71.45ms +[2025-09-02 08:58:54] [Rank 0] step:2301/10000 train_time:164412ms step_avg:71.45ms +[2025-09-02 08:58:55] [Rank 0] step:2321/10000 train_time:165895ms step_avg:71.48ms +[2025-09-02 08:58:55] [Rank 0] step:2321/10000 train_time:165895ms step_avg:71.48ms +[2025-09-02 08:58:57] [Rank 0] step:2341/10000 train_time:167379ms step_avg:71.50ms +[2025-09-02 08:58:57] [Rank 0] step:2341/10000 train_time:167379ms step_avg:71.50ms +[2025-09-02 08:58:58] [Rank 0] step:2361/10000 train_time:168863ms step_avg:71.52ms +[2025-09-02 08:58:58] [Rank 0] step:2361/10000 train_time:168863ms step_avg:71.52ms +[2025-09-02 08:59:00] [Rank 0] step:2381/10000 train_time:170348ms step_avg:71.54ms +[2025-09-02 08:59:00] [Rank 0] step:2381/10000 train_time:170348ms step_avg:71.54ms +[2025-09-02 08:59:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:59:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:59:13] [Rank 0] PRINT: step:2400/10000 val_loss:4.5802 svd_entropy: attn_qk:H=0.6754,top10E=0.37,eRank=101.0,q75/q25=56.72 attn_vo:H=0.7594,top10E=0.22,eRank=183.4,q75/q25=101.43 mlp_w1:H=0.6786,top10E=0.41,eRank=121.3,q75/q25=7.72 mlp_w2:H=0.8109,top10E=0.18,eRank=222.8,q75/q25=18.50 vo_prod:H=0.6538,top10E=0.35,eRank=80.4,q75/q25=12196.30 train_time:171982ms step_avg:71.66ms +[2025-09-02 08:59:13] [Rank 0] PRINT: step:2400/10000 val_loss:4.5802 svd_entropy: attn_qk:H=0.6754,top10E=0.37,eRank=101.0,q75/q25=56.72 attn_vo:H=0.7594,top10E=0.22,eRank=183.4,q75/q25=101.43 mlp_w1:H=0.6786,top10E=0.41,eRank=121.3,q75/q25=7.72 mlp_w2:H=0.8109,top10E=0.18,eRank=222.8,q75/q25=18.50 vo_prod:H=0.6538,top10E=0.35,eRank=80.4,q75/q25=12196.30 train_time:171982ms step_avg:71.66ms +[2025-09-02 08:59:13] [Rank 0] step:2401/10000 train_time:171992ms step_avg:71.63ms +[2025-09-02 08:59:13] [Rank 0] step:2401/10000 train_time:171992ms step_avg:71.63ms +[2025-09-02 08:59:15] [Rank 0] step:2421/10000 train_time:173327ms step_avg:71.59ms +[2025-09-02 08:59:15] [Rank 0] step:2421/10000 train_time:173327ms step_avg:71.59ms +[2025-09-02 08:59:16] [Rank 0] step:2441/10000 train_time:174807ms step_avg:71.61ms +[2025-09-02 08:59:16] [Rank 0] step:2441/10000 train_time:174807ms step_avg:71.61ms +[2025-09-02 08:59:17] [Rank 0] step:2461/10000 train_time:176288ms step_avg:71.63ms +[2025-09-02 08:59:17] [Rank 0] step:2461/10000 train_time:176288ms step_avg:71.63ms +[2025-09-02 08:59:19] [Rank 0] step:2481/10000 train_time:177768ms step_avg:71.65ms +[2025-09-02 08:59:19] [Rank 0] step:2481/10000 train_time:177768ms step_avg:71.65ms +[2025-09-02 08:59:20] [Rank 0] step:2501/10000 train_time:179250ms step_avg:71.67ms +[2025-09-02 08:59:20] [Rank 0] step:2501/10000 train_time:179250ms step_avg:71.67ms +[2025-09-02 08:59:22] [Rank 0] step:2521/10000 train_time:180732ms step_avg:71.69ms +[2025-09-02 08:59:22] [Rank 0] step:2521/10000 train_time:180732ms step_avg:71.69ms +[2025-09-02 08:59:23] [Rank 0] step:2541/10000 train_time:182213ms step_avg:71.71ms +[2025-09-02 08:59:23] [Rank 0] step:2541/10000 train_time:182213ms step_avg:71.71ms +[2025-09-02 08:59:25] [Rank 0] step:2561/10000 train_time:183695ms step_avg:71.73ms +[2025-09-02 08:59:25] [Rank 0] step:2561/10000 train_time:183695ms step_avg:71.73ms +[2025-09-02 08:59:26] [Rank 0] step:2581/10000 train_time:185177ms step_avg:71.75ms +[2025-09-02 08:59:26] [Rank 0] step:2581/10000 train_time:185177ms step_avg:71.75ms +[2025-09-02 08:59:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:59:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:59:40] [Rank 0] PRINT: step:2600/10000 val_loss:4.5180 svd_entropy: attn_qk:H=0.6811,top10E=0.36,eRank=104.1,q75/q25=62.28 attn_vo:H=0.7661,top10E=0.21,eRank=190.1,q75/q25=104.20 mlp_w1:H=0.6876,top10E=0.39,eRank=126.7,q75/q25=8.25 mlp_w2:H=0.8165,top10E=0.18,eRank=231.5,q75/q25=19.21 vo_prod:H=0.6615,top10E=0.34,eRank=84.6,q75/q25=13008.09 train_time:186809ms step_avg:71.85ms +[2025-09-02 08:59:40] [Rank 0] PRINT: step:2600/10000 val_loss:4.5180 svd_entropy: attn_qk:H=0.6811,top10E=0.36,eRank=104.1,q75/q25=62.28 attn_vo:H=0.7661,top10E=0.21,eRank=190.1,q75/q25=104.20 mlp_w1:H=0.6876,top10E=0.39,eRank=126.7,q75/q25=8.25 mlp_w2:H=0.8165,top10E=0.18,eRank=231.5,q75/q25=19.21 vo_prod:H=0.6615,top10E=0.34,eRank=84.6,q75/q25=13008.09 train_time:186809ms step_avg:71.85ms +[2025-09-02 08:59:40] [Rank 0] step:2601/10000 train_time:186819ms step_avg:71.83ms +[2025-09-02 08:59:40] [Rank 0] step:2601/10000 train_time:186819ms step_avg:71.83ms +[2025-09-02 08:59:41] [Rank 0] step:2621/10000 train_time:188170ms step_avg:71.79ms +[2025-09-02 08:59:41] [Rank 0] step:2621/10000 train_time:188170ms step_avg:71.79ms +[2025-09-02 08:59:43] [Rank 0] step:2641/10000 train_time:189649ms step_avg:71.81ms +[2025-09-02 08:59:43] [Rank 0] step:2641/10000 train_time:189649ms step_avg:71.81ms +[2025-09-02 08:59:44] [Rank 0] step:2661/10000 train_time:191130ms step_avg:71.83ms +[2025-09-02 08:59:44] [Rank 0] step:2661/10000 train_time:191130ms step_avg:71.83ms +[2025-09-02 08:59:46] [Rank 0] step:2681/10000 train_time:192611ms step_avg:71.84ms +[2025-09-02 08:59:46] [Rank 0] step:2681/10000 train_time:192611ms step_avg:71.84ms +[2025-09-02 08:59:47] [Rank 0] step:2701/10000 train_time:194092ms step_avg:71.86ms +[2025-09-02 08:59:47] [Rank 0] step:2701/10000 train_time:194092ms step_avg:71.86ms +[2025-09-02 08:59:49] [Rank 0] step:2721/10000 train_time:195573ms step_avg:71.88ms +[2025-09-02 08:59:49] [Rank 0] step:2721/10000 train_time:195573ms step_avg:71.88ms +[2025-09-02 08:59:50] [Rank 0] step:2741/10000 train_time:197055ms step_avg:71.89ms +[2025-09-02 08:59:50] [Rank 0] step:2741/10000 train_time:197055ms step_avg:71.89ms +[2025-09-02 08:59:52] [Rank 0] step:2761/10000 train_time:198538ms step_avg:71.91ms +[2025-09-02 08:59:52] [Rank 0] step:2761/10000 train_time:198538ms step_avg:71.91ms +[2025-09-02 08:59:53] [Rank 0] step:2781/10000 train_time:200019ms step_avg:71.92ms +[2025-09-02 08:59:53] [Rank 0] step:2781/10000 train_time:200019ms step_avg:71.92ms +[2025-09-02 08:59:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 08:59:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:00:06] [Rank 0] PRINT: step:2800/10000 val_loss:4.4726 svd_entropy: attn_qk:H=0.6867,top10E=0.35,eRank=107.4,q75/q25=67.49 attn_vo:H=0.7722,top10E=0.21,eRank=196.5,q75/q25=105.31 mlp_w1:H=0.6960,top10E=0.38,eRank=132.0,q75/q25=8.75 mlp_w2:H=0.8220,top10E=0.17,eRank=240.2,q75/q25=19.56 vo_prod:H=0.6683,top10E=0.33,eRank=88.6,q75/q25=13714.07 train_time:201649ms step_avg:72.02ms +[2025-09-02 09:00:06] [Rank 0] PRINT: step:2800/10000 val_loss:4.4726 svd_entropy: attn_qk:H=0.6867,top10E=0.35,eRank=107.4,q75/q25=67.49 attn_vo:H=0.7722,top10E=0.21,eRank=196.5,q75/q25=105.31 mlp_w1:H=0.6960,top10E=0.38,eRank=132.0,q75/q25=8.75 mlp_w2:H=0.8220,top10E=0.17,eRank=240.2,q75/q25=19.56 vo_prod:H=0.6683,top10E=0.33,eRank=88.6,q75/q25=13714.07 train_time:201649ms step_avg:72.02ms +[2025-09-02 09:00:07] [Rank 0] step:2801/10000 train_time:201659ms step_avg:72.00ms +[2025-09-02 09:00:07] [Rank 0] step:2801/10000 train_time:201659ms step_avg:72.00ms +[2025-09-02 09:00:08] [Rank 0] step:2821/10000 train_time:203010ms step_avg:71.96ms +[2025-09-02 09:00:08] [Rank 0] step:2821/10000 train_time:203010ms step_avg:71.96ms +[2025-09-02 09:00:09] [Rank 0] step:2841/10000 train_time:204490ms step_avg:71.98ms +[2025-09-02 09:00:09] [Rank 0] step:2841/10000 train_time:204490ms step_avg:71.98ms +[2025-09-02 09:00:11] [Rank 0] step:2861/10000 train_time:205984ms step_avg:72.00ms +[2025-09-02 09:00:11] [Rank 0] step:2861/10000 train_time:205984ms step_avg:72.00ms +[2025-09-02 09:00:12] [Rank 0] step:2881/10000 train_time:207465ms step_avg:72.01ms +[2025-09-02 09:00:12] [Rank 0] step:2881/10000 train_time:207465ms step_avg:72.01ms +[2025-09-02 09:00:14] [Rank 0] step:2901/10000 train_time:208946ms step_avg:72.03ms +[2025-09-02 09:00:14] [Rank 0] step:2901/10000 train_time:208946ms step_avg:72.03ms +[2025-09-02 09:00:15] [Rank 0] step:2921/10000 train_time:210426ms step_avg:72.04ms +[2025-09-02 09:00:15] [Rank 0] step:2921/10000 train_time:210426ms step_avg:72.04ms +[2025-09-02 09:00:17] [Rank 0] step:2941/10000 train_time:211906ms step_avg:72.05ms +[2025-09-02 09:00:17] [Rank 0] step:2941/10000 train_time:211906ms step_avg:72.05ms +[2025-09-02 09:00:18] [Rank 0] step:2961/10000 train_time:213388ms step_avg:72.07ms +[2025-09-02 09:00:18] [Rank 0] step:2961/10000 train_time:213388ms step_avg:72.07ms +[2025-09-02 09:00:20] [Rank 0] step:2981/10000 train_time:214876ms step_avg:72.08ms +[2025-09-02 09:00:20] [Rank 0] step:2981/10000 train_time:214876ms step_avg:72.08ms +[2025-09-02 09:00:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:00:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:00:33] [Rank 0] PRINT: step:3000/10000 val_loss:4.4310 svd_entropy: attn_qk:H=0.6915,top10E=0.34,eRank=110.3,q75/q25=71.98 attn_vo:H=0.7775,top10E=0.20,eRank=202.3,q75/q25=105.21 mlp_w1:H=0.7034,top10E=0.37,eRank=137.0,q75/q25=9.22 mlp_w2:H=0.8266,top10E=0.16,eRank=247.9,q75/q25=19.84 vo_prod:H=0.6741,top10E=0.32,eRank=92.1,q75/q25=14148.03 train_time:216516ms step_avg:72.17ms +[2025-09-02 09:00:33] [Rank 0] PRINT: step:3000/10000 val_loss:4.4310 svd_entropy: attn_qk:H=0.6915,top10E=0.34,eRank=110.3,q75/q25=71.98 attn_vo:H=0.7775,top10E=0.20,eRank=202.3,q75/q25=105.21 mlp_w1:H=0.7034,top10E=0.37,eRank=137.0,q75/q25=9.22 mlp_w2:H=0.8266,top10E=0.16,eRank=247.9,q75/q25=19.84 vo_prod:H=0.6741,top10E=0.32,eRank=92.1,q75/q25=14148.03 train_time:216516ms step_avg:72.17ms +[2025-09-02 09:00:33] [Rank 0] step:3001/10000 train_time:216526ms step_avg:72.15ms +[2025-09-02 09:00:33] [Rank 0] step:3001/10000 train_time:216526ms step_avg:72.15ms +[2025-09-02 09:00:35] [Rank 0] step:3021/10000 train_time:217887ms step_avg:72.12ms +[2025-09-02 09:00:35] [Rank 0] step:3021/10000 train_time:217887ms step_avg:72.12ms +[2025-09-02 09:00:36] [Rank 0] step:3041/10000 train_time:219375ms step_avg:72.14ms +[2025-09-02 09:00:36] [Rank 0] step:3041/10000 train_time:219375ms step_avg:72.14ms +[2025-09-02 09:00:37] [Rank 0] step:3061/10000 train_time:220864ms step_avg:72.15ms +[2025-09-02 09:00:37] [Rank 0] step:3061/10000 train_time:220864ms step_avg:72.15ms +[2025-09-02 09:00:39] [Rank 0] step:3081/10000 train_time:222352ms step_avg:72.17ms +[2025-09-02 09:00:39] [Rank 0] step:3081/10000 train_time:222352ms step_avg:72.17ms +[2025-09-02 09:00:40] [Rank 0] step:3101/10000 train_time:223842ms step_avg:72.18ms +[2025-09-02 09:00:40] [Rank 0] step:3101/10000 train_time:223842ms step_avg:72.18ms +[2025-09-02 09:00:42] [Rank 0] step:3121/10000 train_time:225338ms step_avg:72.20ms +[2025-09-02 09:00:42] [Rank 0] step:3121/10000 train_time:225338ms step_avg:72.20ms +[2025-09-02 09:00:43] [Rank 0] step:3141/10000 train_time:226828ms step_avg:72.22ms +[2025-09-02 09:00:43] [Rank 0] step:3141/10000 train_time:226828ms step_avg:72.22ms +[2025-09-02 09:00:45] [Rank 0] step:3161/10000 train_time:228319ms step_avg:72.23ms +[2025-09-02 09:00:45] [Rank 0] step:3161/10000 train_time:228319ms step_avg:72.23ms +[2025-09-02 09:00:46] [Rank 0] step:3181/10000 train_time:229811ms step_avg:72.24ms +[2025-09-02 09:00:46] [Rank 0] step:3181/10000 train_time:229811ms step_avg:72.24ms +[2025-09-02 09:00:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:00:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:01:00] [Rank 0] PRINT: step:3200/10000 val_loss:4.3926 svd_entropy: attn_qk:H=0.6961,top10E=0.33,eRank=113.1,q75/q25=76.27 attn_vo:H=0.7823,top10E=0.19,eRank=207.7,q75/q25=105.30 mlp_w1:H=0.7100,top10E=0.36,eRank=141.7,q75/q25=9.70 mlp_w2:H=0.8306,top10E=0.16,eRank=254.8,q75/q25=20.25 vo_prod:H=0.6795,top10E=0.31,eRank=95.5,q75/q25=14092.57 train_time:231451ms step_avg:72.33ms +[2025-09-02 09:01:00] [Rank 0] PRINT: step:3200/10000 val_loss:4.3926 svd_entropy: attn_qk:H=0.6961,top10E=0.33,eRank=113.1,q75/q25=76.27 attn_vo:H=0.7823,top10E=0.19,eRank=207.7,q75/q25=105.30 mlp_w1:H=0.7100,top10E=0.36,eRank=141.7,q75/q25=9.70 mlp_w2:H=0.8306,top10E=0.16,eRank=254.8,q75/q25=20.25 vo_prod:H=0.6795,top10E=0.31,eRank=95.5,q75/q25=14092.57 train_time:231451ms step_avg:72.33ms +[2025-09-02 09:01:00] [Rank 0] step:3201/10000 train_time:231462ms step_avg:72.31ms +[2025-09-02 09:01:00] [Rank 0] step:3201/10000 train_time:231462ms step_avg:72.31ms +[2025-09-02 09:01:01] [Rank 0] step:3221/10000 train_time:232814ms step_avg:72.28ms +[2025-09-02 09:01:01] [Rank 0] step:3221/10000 train_time:232814ms step_avg:72.28ms +[2025-09-02 09:01:03] [Rank 0] step:3241/10000 train_time:234304ms step_avg:72.29ms +[2025-09-02 09:01:03] [Rank 0] step:3241/10000 train_time:234304ms step_avg:72.29ms +[2025-09-02 09:01:04] [Rank 0] step:3261/10000 train_time:235793ms step_avg:72.31ms +[2025-09-02 09:01:04] [Rank 0] step:3261/10000 train_time:235793ms step_avg:72.31ms +[2025-09-02 09:01:06] [Rank 0] step:3281/10000 train_time:237285ms step_avg:72.32ms +[2025-09-02 09:01:06] [Rank 0] step:3281/10000 train_time:237285ms step_avg:72.32ms +[2025-09-02 09:01:07] [Rank 0] step:3301/10000 train_time:238775ms step_avg:72.33ms +[2025-09-02 09:01:07] [Rank 0] step:3301/10000 train_time:238775ms step_avg:72.33ms +[2025-09-02 09:01:09] [Rank 0] step:3321/10000 train_time:240266ms step_avg:72.35ms +[2025-09-02 09:01:09] [Rank 0] step:3321/10000 train_time:240266ms step_avg:72.35ms +[2025-09-02 09:01:10] [Rank 0] step:3341/10000 train_time:241758ms step_avg:72.36ms +[2025-09-02 09:01:10] [Rank 0] step:3341/10000 train_time:241758ms step_avg:72.36ms +[2025-09-02 09:01:12] [Rank 0] step:3361/10000 train_time:243251ms step_avg:72.37ms +[2025-09-02 09:01:12] [Rank 0] step:3361/10000 train_time:243251ms step_avg:72.37ms +[2025-09-02 09:01:13] [Rank 0] step:3381/10000 train_time:244743ms step_avg:72.39ms +[2025-09-02 09:01:13] [Rank 0] step:3381/10000 train_time:244743ms step_avg:72.39ms +[2025-09-02 09:01:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:01:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:01:26] [Rank 0] PRINT: step:3400/10000 val_loss:4.3490 svd_entropy: attn_qk:H=0.7004,top10E=0.33,eRank=115.9,q75/q25=80.59 attn_vo:H=0.7870,top10E=0.19,eRank=213.2,q75/q25=104.57 mlp_w1:H=0.7160,top10E=0.36,eRank=146.1,q75/q25=10.13 mlp_w2:H=0.8340,top10E=0.15,eRank=261.0,q75/q25=20.48 vo_prod:H=0.6848,top10E=0.31,eRank=98.9,q75/q25=14121.04 train_time:246384ms step_avg:72.47ms +[2025-09-02 09:01:26] [Rank 0] PRINT: step:3400/10000 val_loss:4.3490 svd_entropy: attn_qk:H=0.7004,top10E=0.33,eRank=115.9,q75/q25=80.59 attn_vo:H=0.7870,top10E=0.19,eRank=213.2,q75/q25=104.57 mlp_w1:H=0.7160,top10E=0.36,eRank=146.1,q75/q25=10.13 mlp_w2:H=0.8340,top10E=0.15,eRank=261.0,q75/q25=20.48 vo_prod:H=0.6848,top10E=0.31,eRank=98.9,q75/q25=14121.04 train_time:246384ms step_avg:72.47ms +[2025-09-02 09:01:26] [Rank 0] step:3401/10000 train_time:246395ms step_avg:72.45ms +[2025-09-02 09:01:26] [Rank 0] step:3401/10000 train_time:246395ms step_avg:72.45ms +[2025-09-02 09:01:28] [Rank 0] step:3421/10000 train_time:247739ms step_avg:72.42ms +[2025-09-02 09:01:28] [Rank 0] step:3421/10000 train_time:247739ms step_avg:72.42ms +[2025-09-02 09:01:29] [Rank 0] step:3441/10000 train_time:249228ms step_avg:72.43ms +[2025-09-02 09:01:29] [Rank 0] step:3441/10000 train_time:249228ms step_avg:72.43ms +[2025-09-02 09:01:31] [Rank 0] step:3461/10000 train_time:250717ms step_avg:72.44ms +[2025-09-02 09:01:31] [Rank 0] step:3461/10000 train_time:250717ms step_avg:72.44ms +[2025-09-02 09:01:32] [Rank 0] step:3481/10000 train_time:252205ms step_avg:72.45ms +[2025-09-02 09:01:32] [Rank 0] step:3481/10000 train_time:252205ms step_avg:72.45ms +[2025-09-02 09:01:34] [Rank 0] step:3501/10000 train_time:253696ms step_avg:72.46ms +[2025-09-02 09:01:34] [Rank 0] step:3501/10000 train_time:253696ms step_avg:72.46ms +[2025-09-02 09:01:35] [Rank 0] step:3521/10000 train_time:255187ms step_avg:72.48ms +[2025-09-02 09:01:35] [Rank 0] step:3521/10000 train_time:255187ms step_avg:72.48ms +[2025-09-02 09:01:37] [Rank 0] step:3541/10000 train_time:256677ms step_avg:72.49ms +[2025-09-02 09:01:37] [Rank 0] step:3541/10000 train_time:256677ms step_avg:72.49ms +[2025-09-02 09:01:38] [Rank 0] step:3561/10000 train_time:258166ms step_avg:72.50ms +[2025-09-02 09:01:38] [Rank 0] step:3561/10000 train_time:258166ms step_avg:72.50ms +[2025-09-02 09:01:40] [Rank 0] step:3581/10000 train_time:259658ms step_avg:72.51ms +[2025-09-02 09:01:40] [Rank 0] step:3581/10000 train_time:259658ms step_avg:72.51ms +[2025-09-02 09:01:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:01:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:01:53] [Rank 0] PRINT: step:3600/10000 val_loss:4.3399 svd_entropy: attn_qk:H=0.7043,top10E=0.32,eRank=118.4,q75/q25=84.55 attn_vo:H=0.7910,top10E=0.18,eRank=218.1,q75/q25=103.27 mlp_w1:H=0.7215,top10E=0.35,eRank=150.3,q75/q25=10.56 mlp_w2:H=0.8369,top10E=0.15,eRank=266.4,q75/q25=20.92 vo_prod:H=0.6895,top10E=0.30,eRank=102.1,q75/q25=13538.19 train_time:261299ms step_avg:72.58ms +[2025-09-02 09:01:53] [Rank 0] PRINT: step:3600/10000 val_loss:4.3399 svd_entropy: attn_qk:H=0.7043,top10E=0.32,eRank=118.4,q75/q25=84.55 attn_vo:H=0.7910,top10E=0.18,eRank=218.1,q75/q25=103.27 mlp_w1:H=0.7215,top10E=0.35,eRank=150.3,q75/q25=10.56 mlp_w2:H=0.8369,top10E=0.15,eRank=266.4,q75/q25=20.92 vo_prod:H=0.6895,top10E=0.30,eRank=102.1,q75/q25=13538.19 train_time:261299ms step_avg:72.58ms +[2025-09-02 09:01:53] [Rank 0] step:3601/10000 train_time:261310ms step_avg:72.57ms +[2025-09-02 09:01:53] [Rank 0] step:3601/10000 train_time:261310ms step_avg:72.57ms +[2025-09-02 09:01:55] [Rank 0] step:3621/10000 train_time:262654ms step_avg:72.54ms +[2025-09-02 09:01:55] [Rank 0] step:3621/10000 train_time:262654ms step_avg:72.54ms +[2025-09-02 09:01:56] [Rank 0] step:3641/10000 train_time:264142ms step_avg:72.55ms +[2025-09-02 09:01:56] [Rank 0] step:3641/10000 train_time:264142ms step_avg:72.55ms +[2025-09-02 09:01:57] [Rank 0] step:3661/10000 train_time:265631ms step_avg:72.56ms +[2025-09-02 09:01:57] [Rank 0] step:3661/10000 train_time:265631ms step_avg:72.56ms +[2025-09-02 09:01:59] [Rank 0] step:3681/10000 train_time:267120ms step_avg:72.57ms +[2025-09-02 09:01:59] [Rank 0] step:3681/10000 train_time:267120ms step_avg:72.57ms +[2025-09-02 09:02:00] [Rank 0] step:3701/10000 train_time:268610ms step_avg:72.58ms +[2025-09-02 09:02:00] [Rank 0] step:3701/10000 train_time:268610ms step_avg:72.58ms +[2025-09-02 09:02:02] [Rank 0] step:3721/10000 train_time:270126ms step_avg:72.59ms +[2025-09-02 09:02:02] [Rank 0] step:3721/10000 train_time:270126ms step_avg:72.59ms +[2025-09-02 09:02:04] [Rank 0] step:3741/10000 train_time:271654ms step_avg:72.62ms +[2025-09-02 09:02:04] [Rank 0] step:3741/10000 train_time:271654ms step_avg:72.62ms +[2025-09-02 09:02:05] [Rank 0] step:3761/10000 train_time:273180ms step_avg:72.64ms +[2025-09-02 09:02:05] [Rank 0] step:3761/10000 train_time:273180ms step_avg:72.64ms +[2025-09-02 09:02:07] [Rank 0] step:3781/10000 train_time:274719ms step_avg:72.66ms +[2025-09-02 09:02:07] [Rank 0] step:3781/10000 train_time:274719ms step_avg:72.66ms +[2025-09-02 09:02:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:02:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:02:20] [Rank 0] PRINT: step:3800/10000 val_loss:4.2783 svd_entropy: attn_qk:H=0.7077,top10E=0.32,eRank=120.7,q75/q25=86.87 attn_vo:H=0.7948,top10E=0.18,eRank=222.9,q75/q25=101.78 mlp_w1:H=0.7267,top10E=0.34,eRank=154.4,q75/q25=10.88 mlp_w2:H=0.8397,top10E=0.15,eRank=271.7,q75/q25=21.20 vo_prod:H=0.6938,top10E=0.29,eRank=105.2,q75/q25=13095.62 train_time:276399ms step_avg:72.74ms +[2025-09-02 09:02:20] [Rank 0] PRINT: step:3800/10000 val_loss:4.2783 svd_entropy: attn_qk:H=0.7077,top10E=0.32,eRank=120.7,q75/q25=86.87 attn_vo:H=0.7948,top10E=0.18,eRank=222.9,q75/q25=101.78 mlp_w1:H=0.7267,top10E=0.34,eRank=154.4,q75/q25=10.88 mlp_w2:H=0.8397,top10E=0.15,eRank=271.7,q75/q25=21.20 vo_prod:H=0.6938,top10E=0.29,eRank=105.2,q75/q25=13095.62 train_time:276399ms step_avg:72.74ms +[2025-09-02 09:02:20] [Rank 0] step:3801/10000 train_time:276409ms step_avg:72.72ms +[2025-09-02 09:02:20] [Rank 0] step:3801/10000 train_time:276409ms step_avg:72.72ms +[2025-09-02 09:02:21] [Rank 0] step:3821/10000 train_time:277793ms step_avg:72.70ms +[2025-09-02 09:02:21] [Rank 0] step:3821/10000 train_time:277793ms step_avg:72.70ms +[2025-09-02 09:02:23] [Rank 0] step:3841/10000 train_time:279323ms step_avg:72.72ms +[2025-09-02 09:02:23] [Rank 0] step:3841/10000 train_time:279323ms step_avg:72.72ms +[2025-09-02 09:02:24] [Rank 0] step:3861/10000 train_time:280848ms step_avg:72.74ms +[2025-09-02 09:02:24] [Rank 0] step:3861/10000 train_time:280848ms step_avg:72.74ms +[2025-09-02 09:02:26] [Rank 0] step:3881/10000 train_time:282376ms step_avg:72.76ms +[2025-09-02 09:02:26] [Rank 0] step:3881/10000 train_time:282376ms step_avg:72.76ms +[2025-09-02 09:02:27] [Rank 0] step:3901/10000 train_time:283903ms step_avg:72.78ms +[2025-09-02 09:02:27] [Rank 0] step:3901/10000 train_time:283903ms step_avg:72.78ms +[2025-09-02 09:02:29] [Rank 0] step:3921/10000 train_time:285428ms step_avg:72.79ms +[2025-09-02 09:02:29] [Rank 0] step:3921/10000 train_time:285428ms step_avg:72.79ms +[2025-09-02 09:02:30] [Rank 0] step:3941/10000 train_time:286955ms step_avg:72.81ms +[2025-09-02 09:02:30] [Rank 0] step:3941/10000 train_time:286955ms step_avg:72.81ms +[2025-09-02 09:02:32] [Rank 0] step:3961/10000 train_time:288481ms step_avg:72.83ms +[2025-09-02 09:02:32] [Rank 0] step:3961/10000 train_time:288481ms step_avg:72.83ms +[2025-09-02 09:02:33] [Rank 0] step:3981/10000 train_time:290007ms step_avg:72.85ms +[2025-09-02 09:02:33] [Rank 0] step:3981/10000 train_time:290007ms step_avg:72.85ms +[2025-09-02 09:02:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:02:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:02:47] [Rank 0] PRINT: step:4000/10000 val_loss:4.2511 svd_entropy: attn_qk:H=0.7111,top10E=0.31,eRank=123.1,q75/q25=90.13 attn_vo:H=0.7983,top10E=0.18,eRank=227.3,q75/q25=98.86 mlp_w1:H=0.7316,top10E=0.33,eRank=158.5,q75/q25=11.32 mlp_w2:H=0.8422,top10E=0.14,eRank=276.6,q75/q25=21.48 vo_prod:H=0.6979,top10E=0.29,eRank=108.1,q75/q25=12434.51 train_time:291686ms step_avg:72.92ms +[2025-09-02 09:02:47] [Rank 0] PRINT: step:4000/10000 val_loss:4.2511 svd_entropy: attn_qk:H=0.7111,top10E=0.31,eRank=123.1,q75/q25=90.13 attn_vo:H=0.7983,top10E=0.18,eRank=227.3,q75/q25=98.86 mlp_w1:H=0.7316,top10E=0.33,eRank=158.5,q75/q25=11.32 mlp_w2:H=0.8422,top10E=0.14,eRank=276.6,q75/q25=21.48 vo_prod:H=0.6979,top10E=0.29,eRank=108.1,q75/q25=12434.51 train_time:291686ms step_avg:72.92ms +[2025-09-02 09:02:47] [Rank 0] step:4001/10000 train_time:291697ms step_avg:72.91ms +[2025-09-02 09:02:47] [Rank 0] step:4001/10000 train_time:291697ms step_avg:72.91ms +[2025-09-02 09:02:48] [Rank 0] step:4021/10000 train_time:293091ms step_avg:72.89ms +[2025-09-02 09:02:48] [Rank 0] step:4021/10000 train_time:293091ms step_avg:72.89ms +[2025-09-02 09:02:50] [Rank 0] step:4041/10000 train_time:294620ms step_avg:72.91ms +[2025-09-02 09:02:50] [Rank 0] step:4041/10000 train_time:294620ms step_avg:72.91ms +[2025-09-02 09:02:51] [Rank 0] step:4061/10000 train_time:296146ms step_avg:72.92ms +[2025-09-02 09:02:51] [Rank 0] step:4061/10000 train_time:296146ms step_avg:72.92ms +[2025-09-02 09:02:53] [Rank 0] step:4081/10000 train_time:297675ms step_avg:72.94ms +[2025-09-02 09:02:53] [Rank 0] step:4081/10000 train_time:297675ms step_avg:72.94ms +[2025-09-02 09:02:54] [Rank 0] step:4101/10000 train_time:299203ms step_avg:72.96ms +[2025-09-02 09:02:54] [Rank 0] step:4101/10000 train_time:299203ms step_avg:72.96ms +[2025-09-02 09:02:56] [Rank 0] step:4121/10000 train_time:300731ms step_avg:72.98ms +[2025-09-02 09:02:56] [Rank 0] step:4121/10000 train_time:300731ms step_avg:72.98ms +[2025-09-02 09:02:57] [Rank 0] step:4141/10000 train_time:302260ms step_avg:72.99ms +[2025-09-02 09:02:57] [Rank 0] step:4141/10000 train_time:302260ms step_avg:72.99ms +[2025-09-02 09:02:59] [Rank 0] step:4161/10000 train_time:303788ms step_avg:73.01ms +[2025-09-02 09:02:59] [Rank 0] step:4161/10000 train_time:303788ms step_avg:73.01ms +[2025-09-02 09:03:00] [Rank 0] step:4181/10000 train_time:305318ms step_avg:73.03ms +[2025-09-02 09:03:00] [Rank 0] step:4181/10000 train_time:305318ms step_avg:73.03ms +[2025-09-02 09:03:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:03:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:03:14] [Rank 0] PRINT: step:4200/10000 val_loss:4.2305 svd_entropy: attn_qk:H=0.7144,top10E=0.31,eRank=125.4,q75/q25=92.16 attn_vo:H=0.8016,top10E=0.17,eRank=231.6,q75/q25=96.97 mlp_w1:H=0.7360,top10E=0.33,eRank=162.2,q75/q25=11.72 mlp_w2:H=0.8445,top10E=0.14,eRank=281.1,q75/q25=21.78 vo_prod:H=0.7017,top10E=0.28,eRank=111.0,q75/q25=11912.48 train_time:306998ms step_avg:73.09ms +[2025-09-02 09:03:14] [Rank 0] PRINT: step:4200/10000 val_loss:4.2305 svd_entropy: attn_qk:H=0.7144,top10E=0.31,eRank=125.4,q75/q25=92.16 attn_vo:H=0.8016,top10E=0.17,eRank=231.6,q75/q25=96.97 mlp_w1:H=0.7360,top10E=0.33,eRank=162.2,q75/q25=11.72 mlp_w2:H=0.8445,top10E=0.14,eRank=281.1,q75/q25=21.78 vo_prod:H=0.7017,top10E=0.28,eRank=111.0,q75/q25=11912.48 train_time:306998ms step_avg:73.09ms +[2025-09-02 09:03:14] [Rank 0] step:4201/10000 train_time:307009ms step_avg:73.08ms +[2025-09-02 09:03:14] [Rank 0] step:4201/10000 train_time:307009ms step_avg:73.08ms +[2025-09-02 09:03:15] [Rank 0] step:4221/10000 train_time:308406ms step_avg:73.06ms +[2025-09-02 09:03:15] [Rank 0] step:4221/10000 train_time:308406ms step_avg:73.06ms +[2025-09-02 09:03:17] [Rank 0] step:4241/10000 train_time:309932ms step_avg:73.08ms +[2025-09-02 09:03:17] [Rank 0] step:4241/10000 train_time:309932ms step_avg:73.08ms +[2025-09-02 09:03:18] [Rank 0] step:4261/10000 train_time:311458ms step_avg:73.10ms +[2025-09-02 09:03:18] [Rank 0] step:4261/10000 train_time:311458ms step_avg:73.10ms +[2025-09-02 09:03:20] [Rank 0] step:4281/10000 train_time:312985ms step_avg:73.11ms +[2025-09-02 09:03:20] [Rank 0] step:4281/10000 train_time:312985ms step_avg:73.11ms +[2025-09-02 09:03:21] [Rank 0] step:4301/10000 train_time:314513ms step_avg:73.13ms +[2025-09-02 09:03:21] [Rank 0] step:4301/10000 train_time:314513ms step_avg:73.13ms +[2025-09-02 09:03:23] [Rank 0] step:4321/10000 train_time:316040ms step_avg:73.14ms +[2025-09-02 09:03:23] [Rank 0] step:4321/10000 train_time:316040ms step_avg:73.14ms +[2025-09-02 09:03:24] [Rank 0] step:4341/10000 train_time:317565ms step_avg:73.15ms +[2025-09-02 09:03:24] [Rank 0] step:4341/10000 train_time:317565ms step_avg:73.15ms +[2025-09-02 09:03:26] [Rank 0] step:4361/10000 train_time:319093ms step_avg:73.17ms +[2025-09-02 09:03:26] [Rank 0] step:4361/10000 train_time:319093ms step_avg:73.17ms +[2025-09-02 09:03:27] [Rank 0] step:4381/10000 train_time:320618ms step_avg:73.18ms +[2025-09-02 09:03:27] [Rank 0] step:4381/10000 train_time:320618ms step_avg:73.18ms +[2025-09-02 09:03:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:03:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:03:41] [Rank 0] PRINT: step:4400/10000 val_loss:4.2060 svd_entropy: attn_qk:H=0.7174,top10E=0.30,eRank=127.6,q75/q25=94.02 attn_vo:H=0.8046,top10E=0.17,eRank=235.6,q75/q25=94.54 mlp_w1:H=0.7402,top10E=0.32,eRank=165.9,q75/q25=12.02 mlp_w2:H=0.8468,top10E=0.14,eRank=285.7,q75/q25=21.61 vo_prod:H=0.7050,top10E=0.28,eRank=113.6,q75/q25=11349.28 train_time:322298ms step_avg:73.25ms +[2025-09-02 09:03:41] [Rank 0] PRINT: step:4400/10000 val_loss:4.2060 svd_entropy: attn_qk:H=0.7174,top10E=0.30,eRank=127.6,q75/q25=94.02 attn_vo:H=0.8046,top10E=0.17,eRank=235.6,q75/q25=94.54 mlp_w1:H=0.7402,top10E=0.32,eRank=165.9,q75/q25=12.02 mlp_w2:H=0.8468,top10E=0.14,eRank=285.7,q75/q25=21.61 vo_prod:H=0.7050,top10E=0.28,eRank=113.6,q75/q25=11349.28 train_time:322298ms step_avg:73.25ms +[2025-09-02 09:03:41] [Rank 0] step:4401/10000 train_time:322314ms step_avg:73.24ms +[2025-09-02 09:03:41] [Rank 0] step:4401/10000 train_time:322314ms step_avg:73.24ms +[2025-09-02 09:03:42] [Rank 0] step:4421/10000 train_time:323688ms step_avg:73.22ms +[2025-09-02 09:03:42] [Rank 0] step:4421/10000 train_time:323688ms step_avg:73.22ms +[2025-09-02 09:03:44] [Rank 0] step:4441/10000 train_time:325212ms step_avg:73.23ms +[2025-09-02 09:03:44] [Rank 0] step:4441/10000 train_time:325212ms step_avg:73.23ms +[2025-09-02 09:03:45] [Rank 0] step:4461/10000 train_time:326742ms step_avg:73.24ms +[2025-09-02 09:03:45] [Rank 0] step:4461/10000 train_time:326742ms step_avg:73.24ms +[2025-09-02 09:03:47] [Rank 0] step:4481/10000 train_time:328272ms step_avg:73.26ms +[2025-09-02 09:03:47] [Rank 0] step:4481/10000 train_time:328272ms step_avg:73.26ms +[2025-09-02 09:03:48] [Rank 0] step:4501/10000 train_time:329803ms step_avg:73.27ms +[2025-09-02 09:03:48] [Rank 0] step:4501/10000 train_time:329803ms step_avg:73.27ms +[2025-09-02 09:03:50] [Rank 0] step:4521/10000 train_time:331333ms step_avg:73.29ms +[2025-09-02 09:03:50] [Rank 0] step:4521/10000 train_time:331333ms step_avg:73.29ms +[2025-09-02 09:03:51] [Rank 0] step:4541/10000 train_time:332863ms step_avg:73.30ms +[2025-09-02 09:03:51] [Rank 0] step:4541/10000 train_time:332863ms step_avg:73.30ms +[2025-09-02 09:03:53] [Rank 0] step:4561/10000 train_time:334395ms step_avg:73.32ms +[2025-09-02 09:03:53] [Rank 0] step:4561/10000 train_time:334395ms step_avg:73.32ms +[2025-09-02 09:03:54] [Rank 0] step:4581/10000 train_time:335928ms step_avg:73.33ms +[2025-09-02 09:03:54] [Rank 0] step:4581/10000 train_time:335928ms step_avg:73.33ms +[2025-09-02 09:03:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:03:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:04:08] [Rank 0] PRINT: step:4600/10000 val_loss:4.1742 svd_entropy: attn_qk:H=0.7204,top10E=0.30,eRank=129.8,q75/q25=95.82 attn_vo:H=0.8076,top10E=0.17,eRank=239.7,q75/q25=92.61 mlp_w1:H=0.7444,top10E=0.32,eRank=169.6,q75/q25=12.33 mlp_w2:H=0.8490,top10E=0.14,eRank=290.1,q75/q25=21.67 vo_prod:H=0.7087,top10E=0.28,eRank=116.5,q75/q25=10609.75 train_time:337615ms step_avg:73.39ms +[2025-09-02 09:04:08] [Rank 0] PRINT: step:4600/10000 val_loss:4.1742 svd_entropy: attn_qk:H=0.7204,top10E=0.30,eRank=129.8,q75/q25=95.82 attn_vo:H=0.8076,top10E=0.17,eRank=239.7,q75/q25=92.61 mlp_w1:H=0.7444,top10E=0.32,eRank=169.6,q75/q25=12.33 mlp_w2:H=0.8490,top10E=0.14,eRank=290.1,q75/q25=21.67 vo_prod:H=0.7087,top10E=0.28,eRank=116.5,q75/q25=10609.75 train_time:337615ms step_avg:73.39ms +[2025-09-02 09:04:08] [Rank 0] step:4601/10000 train_time:337627ms step_avg:73.38ms +[2025-09-02 09:04:08] [Rank 0] step:4601/10000 train_time:337627ms step_avg:73.38ms +[2025-09-02 09:04:09] [Rank 0] step:4621/10000 train_time:339005ms step_avg:73.36ms +[2025-09-02 09:04:09] [Rank 0] step:4621/10000 train_time:339005ms step_avg:73.36ms +[2025-09-02 09:04:11] [Rank 0] step:4641/10000 train_time:340536ms step_avg:73.38ms +[2025-09-02 09:04:11] [Rank 0] step:4641/10000 train_time:340536ms step_avg:73.38ms +[2025-09-02 09:04:12] [Rank 0] step:4661/10000 train_time:342067ms step_avg:73.39ms +[2025-09-02 09:04:12] [Rank 0] step:4661/10000 train_time:342067ms step_avg:73.39ms +[2025-09-02 09:04:14] [Rank 0] step:4681/10000 train_time:343599ms step_avg:73.40ms +[2025-09-02 09:04:14] [Rank 0] step:4681/10000 train_time:343599ms step_avg:73.40ms +[2025-09-02 09:04:15] [Rank 0] step:4701/10000 train_time:345132ms step_avg:73.42ms +[2025-09-02 09:04:15] [Rank 0] step:4701/10000 train_time:345132ms step_avg:73.42ms +[2025-09-02 09:04:17] [Rank 0] step:4721/10000 train_time:346663ms step_avg:73.43ms +[2025-09-02 09:04:17] [Rank 0] step:4721/10000 train_time:346663ms step_avg:73.43ms +[2025-09-02 09:04:18] [Rank 0] step:4741/10000 train_time:348195ms step_avg:73.44ms +[2025-09-02 09:04:18] [Rank 0] step:4741/10000 train_time:348195ms step_avg:73.44ms +[2025-09-02 09:04:20] [Rank 0] step:4761/10000 train_time:349729ms step_avg:73.46ms +[2025-09-02 09:04:20] [Rank 0] step:4761/10000 train_time:349729ms step_avg:73.46ms +[2025-09-02 09:04:21] [Rank 0] step:4781/10000 train_time:351260ms step_avg:73.47ms +[2025-09-02 09:04:21] [Rank 0] step:4781/10000 train_time:351260ms step_avg:73.47ms +[2025-09-02 09:04:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:04:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:04:35] [Rank 0] PRINT: step:4800/10000 val_loss:4.1596 svd_entropy: attn_qk:H=0.7233,top10E=0.30,eRank=132.0,q75/q25=97.99 attn_vo:H=0.8104,top10E=0.16,eRank=243.5,q75/q25=89.84 mlp_w1:H=0.7478,top10E=0.31,eRank=172.8,q75/q25=12.63 mlp_w2:H=0.8508,top10E=0.14,eRank=293.9,q75/q25=21.99 vo_prod:H=0.7120,top10E=0.27,eRank=119.2,q75/q25=9843.77 train_time:352949ms step_avg:73.53ms +[2025-09-02 09:04:35] [Rank 0] PRINT: step:4800/10000 val_loss:4.1596 svd_entropy: attn_qk:H=0.7233,top10E=0.30,eRank=132.0,q75/q25=97.99 attn_vo:H=0.8104,top10E=0.16,eRank=243.5,q75/q25=89.84 mlp_w1:H=0.7478,top10E=0.31,eRank=172.8,q75/q25=12.63 mlp_w2:H=0.8508,top10E=0.14,eRank=293.9,q75/q25=21.99 vo_prod:H=0.7120,top10E=0.27,eRank=119.2,q75/q25=9843.77 train_time:352949ms step_avg:73.53ms +[2025-09-02 09:04:35] [Rank 0] step:4801/10000 train_time:352960ms step_avg:73.52ms +[2025-09-02 09:04:35] [Rank 0] step:4801/10000 train_time:352960ms step_avg:73.52ms +[2025-09-02 09:04:36] [Rank 0] step:4821/10000 train_time:354344ms step_avg:73.50ms +[2025-09-02 09:04:36] [Rank 0] step:4821/10000 train_time:354344ms step_avg:73.50ms +[2025-09-02 09:04:38] [Rank 0] step:4841/10000 train_time:355876ms step_avg:73.51ms +[2025-09-02 09:04:38] [Rank 0] step:4841/10000 train_time:355876ms step_avg:73.51ms +[2025-09-02 09:04:39] [Rank 0] step:4861/10000 train_time:357411ms step_avg:73.53ms +[2025-09-02 09:04:39] [Rank 0] step:4861/10000 train_time:357411ms step_avg:73.53ms +[2025-09-02 09:04:41] [Rank 0] step:4881/10000 train_time:358941ms step_avg:73.54ms +[2025-09-02 09:04:41] [Rank 0] step:4881/10000 train_time:358941ms step_avg:73.54ms +[2025-09-02 09:04:42] [Rank 0] step:4901/10000 train_time:360471ms step_avg:73.55ms +[2025-09-02 09:04:42] [Rank 0] step:4901/10000 train_time:360471ms step_avg:73.55ms +[2025-09-02 09:04:44] [Rank 0] step:4921/10000 train_time:362005ms step_avg:73.56ms +[2025-09-02 09:04:44] [Rank 0] step:4921/10000 train_time:362005ms step_avg:73.56ms +[2025-09-02 09:04:45] [Rank 0] step:4941/10000 train_time:363540ms step_avg:73.58ms +[2025-09-02 09:04:45] [Rank 0] step:4941/10000 train_time:363540ms step_avg:73.58ms +[2025-09-02 09:04:47] [Rank 0] step:4961/10000 train_time:365071ms step_avg:73.59ms +[2025-09-02 09:04:47] [Rank 0] step:4961/10000 train_time:365071ms step_avg:73.59ms +[2025-09-02 09:04:49] [Rank 0] step:4981/10000 train_time:366603ms step_avg:73.60ms +[2025-09-02 09:04:49] [Rank 0] step:4981/10000 train_time:366603ms step_avg:73.60ms +[2025-09-02 09:04:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:04:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:05:02] [Rank 0] PRINT: step:5000/10000 val_loss:4.1360 svd_entropy: attn_qk:H=0.7259,top10E=0.29,eRank=134.0,q75/q25=99.77 attn_vo:H=0.8129,top10E=0.16,eRank=247.2,q75/q25=87.35 mlp_w1:H=0.7511,top10E=0.31,eRank=175.9,q75/q25=12.98 mlp_w2:H=0.8525,top10E=0.13,eRank=297.5,q75/q25=22.15 vo_prod:H=0.7150,top10E=0.27,eRank=121.7,q75/q25=9478.68 train_time:368291ms step_avg:73.66ms +[2025-09-02 09:05:02] [Rank 0] PRINT: step:5000/10000 val_loss:4.1360 svd_entropy: attn_qk:H=0.7259,top10E=0.29,eRank=134.0,q75/q25=99.77 attn_vo:H=0.8129,top10E=0.16,eRank=247.2,q75/q25=87.35 mlp_w1:H=0.7511,top10E=0.31,eRank=175.9,q75/q25=12.98 mlp_w2:H=0.8525,top10E=0.13,eRank=297.5,q75/q25=22.15 vo_prod:H=0.7150,top10E=0.27,eRank=121.7,q75/q25=9478.68 train_time:368291ms step_avg:73.66ms +[2025-09-02 09:05:02] [Rank 0] step:5001/10000 train_time:368302ms step_avg:73.65ms +[2025-09-02 09:05:02] [Rank 0] step:5001/10000 train_time:368302ms step_avg:73.65ms +[2025-09-02 09:05:03] [Rank 0] step:5021/10000 train_time:369681ms step_avg:73.63ms +[2025-09-02 09:05:03] [Rank 0] step:5021/10000 train_time:369681ms step_avg:73.63ms +[2025-09-02 09:05:05] [Rank 0] step:5041/10000 train_time:371216ms step_avg:73.64ms +[2025-09-02 09:05:05] [Rank 0] step:5041/10000 train_time:371216ms step_avg:73.64ms +[2025-09-02 09:05:06] [Rank 0] step:5061/10000 train_time:372747ms step_avg:73.65ms +[2025-09-02 09:05:06] [Rank 0] step:5061/10000 train_time:372747ms step_avg:73.65ms +[2025-09-02 09:05:08] [Rank 0] step:5081/10000 train_time:374279ms step_avg:73.66ms +[2025-09-02 09:05:08] [Rank 0] step:5081/10000 train_time:374279ms step_avg:73.66ms +[2025-09-02 09:05:09] [Rank 0] step:5101/10000 train_time:375814ms step_avg:73.67ms +[2025-09-02 09:05:09] [Rank 0] step:5101/10000 train_time:375814ms step_avg:73.67ms +[2025-09-02 09:05:11] [Rank 0] step:5121/10000 train_time:377348ms step_avg:73.69ms +[2025-09-02 09:05:11] [Rank 0] step:5121/10000 train_time:377348ms step_avg:73.69ms +[2025-09-02 09:05:13] [Rank 0] step:5141/10000 train_time:378886ms step_avg:73.70ms +[2025-09-02 09:05:13] [Rank 0] step:5141/10000 train_time:378886ms step_avg:73.70ms +[2025-09-02 09:05:14] [Rank 0] step:5161/10000 train_time:380421ms step_avg:73.71ms +[2025-09-02 09:05:14] [Rank 0] step:5161/10000 train_time:380421ms step_avg:73.71ms +[2025-09-02 09:05:16] [Rank 0] step:5181/10000 train_time:381959ms step_avg:73.72ms +[2025-09-02 09:05:16] [Rank 0] step:5181/10000 train_time:381959ms step_avg:73.72ms +[2025-09-02 09:05:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:05:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:05:29] [Rank 0] PRINT: step:5200/10000 val_loss:4.1143 svd_entropy: attn_qk:H=0.7283,top10E=0.29,eRank=135.9,q75/q25=100.32 attn_vo:H=0.8154,top10E=0.16,eRank=250.7,q75/q25=85.45 mlp_w1:H=0.7545,top10E=0.30,eRank=179.2,q75/q25=13.33 mlp_w2:H=0.8541,top10E=0.13,eRank=300.9,q75/q25=22.33 vo_prod:H=0.7179,top10E=0.27,eRank=124.2,q75/q25=8869.71 train_time:383673ms step_avg:73.78ms +[2025-09-02 09:05:29] [Rank 0] PRINT: step:5200/10000 val_loss:4.1143 svd_entropy: attn_qk:H=0.7283,top10E=0.29,eRank=135.9,q75/q25=100.32 attn_vo:H=0.8154,top10E=0.16,eRank=250.7,q75/q25=85.45 mlp_w1:H=0.7545,top10E=0.30,eRank=179.2,q75/q25=13.33 mlp_w2:H=0.8541,top10E=0.13,eRank=300.9,q75/q25=22.33 vo_prod:H=0.7179,top10E=0.27,eRank=124.2,q75/q25=8869.71 train_time:383673ms step_avg:73.78ms +[2025-09-02 09:05:29] [Rank 0] step:5201/10000 train_time:383684ms step_avg:73.77ms +[2025-09-02 09:05:29] [Rank 0] step:5201/10000 train_time:383684ms step_avg:73.77ms +[2025-09-02 09:05:30] [Rank 0] step:5221/10000 train_time:385112ms step_avg:73.76ms +[2025-09-02 09:05:30] [Rank 0] step:5221/10000 train_time:385112ms step_avg:73.76ms +[2025-09-02 09:05:32] [Rank 0] step:5241/10000 train_time:386673ms step_avg:73.78ms +[2025-09-02 09:05:32] [Rank 0] step:5241/10000 train_time:386673ms step_avg:73.78ms +[2025-09-02 09:05:34] [Rank 0] step:5261/10000 train_time:388236ms step_avg:73.80ms +[2025-09-02 09:05:34] [Rank 0] step:5261/10000 train_time:388236ms step_avg:73.80ms +[2025-09-02 09:05:35] [Rank 0] step:5281/10000 train_time:389800ms step_avg:73.81ms +[2025-09-02 09:05:35] [Rank 0] step:5281/10000 train_time:389800ms step_avg:73.81ms +[2025-09-02 09:05:37] [Rank 0] step:5301/10000 train_time:391376ms step_avg:73.83ms +[2025-09-02 09:05:37] [Rank 0] step:5301/10000 train_time:391376ms step_avg:73.83ms +[2025-09-02 09:05:38] [Rank 0] step:5321/10000 train_time:392938ms step_avg:73.85ms +[2025-09-02 09:05:38] [Rank 0] step:5321/10000 train_time:392938ms step_avg:73.85ms +[2025-09-02 09:05:40] [Rank 0] step:5341/10000 train_time:394503ms step_avg:73.86ms +[2025-09-02 09:05:40] [Rank 0] step:5341/10000 train_time:394503ms step_avg:73.86ms +[2025-09-02 09:05:41] [Rank 0] step:5361/10000 train_time:396070ms step_avg:73.88ms +[2025-09-02 09:05:41] [Rank 0] step:5361/10000 train_time:396070ms step_avg:73.88ms +[2025-09-02 09:05:43] [Rank 0] step:5381/10000 train_time:397637ms step_avg:73.90ms +[2025-09-02 09:05:43] [Rank 0] step:5381/10000 train_time:397637ms step_avg:73.90ms +[2025-09-02 09:05:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:05:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:05:56] [Rank 0] PRINT: step:5400/10000 val_loss:4.0969 svd_entropy: attn_qk:H=0.7305,top10E=0.29,eRank=137.6,q75/q25=101.30 attn_vo:H=0.8176,top10E=0.16,eRank=254.0,q75/q25=82.59 mlp_w1:H=0.7575,top10E=0.30,eRank=182.3,q75/q25=13.66 mlp_w2:H=0.8556,top10E=0.13,eRank=304.1,q75/q25=22.61 vo_prod:H=0.7206,top10E=0.26,eRank=126.5,q75/q25=8186.56 train_time:399357ms step_avg:73.95ms +[2025-09-02 09:05:56] [Rank 0] PRINT: step:5400/10000 val_loss:4.0969 svd_entropy: attn_qk:H=0.7305,top10E=0.29,eRank=137.6,q75/q25=101.30 attn_vo:H=0.8176,top10E=0.16,eRank=254.0,q75/q25=82.59 mlp_w1:H=0.7575,top10E=0.30,eRank=182.3,q75/q25=13.66 mlp_w2:H=0.8556,top10E=0.13,eRank=304.1,q75/q25=22.61 vo_prod:H=0.7206,top10E=0.26,eRank=126.5,q75/q25=8186.56 train_time:399357ms step_avg:73.95ms +[2025-09-02 09:05:56] [Rank 0] step:5401/10000 train_time:399368ms step_avg:73.94ms +[2025-09-02 09:05:56] [Rank 0] step:5401/10000 train_time:399368ms step_avg:73.94ms +[2025-09-02 09:05:58] [Rank 0] step:5421/10000 train_time:400783ms step_avg:73.93ms +[2025-09-02 09:05:58] [Rank 0] step:5421/10000 train_time:400783ms step_avg:73.93ms +[2025-09-02 09:05:59] [Rank 0] step:5441/10000 train_time:402342ms step_avg:73.95ms +[2025-09-02 09:05:59] [Rank 0] step:5441/10000 train_time:402342ms step_avg:73.95ms +[2025-09-02 09:06:01] [Rank 0] step:5461/10000 train_time:403909ms step_avg:73.96ms +[2025-09-02 09:06:01] [Rank 0] step:5461/10000 train_time:403909ms step_avg:73.96ms +[2025-09-02 09:06:03] [Rank 0] step:5481/10000 train_time:405476ms step_avg:73.98ms +[2025-09-02 09:06:03] [Rank 0] step:5481/10000 train_time:405476ms step_avg:73.98ms +[2025-09-02 09:06:04] [Rank 0] step:5501/10000 train_time:407046ms step_avg:73.99ms +[2025-09-02 09:06:04] [Rank 0] step:5501/10000 train_time:407046ms step_avg:73.99ms +[2025-09-02 09:06:06] [Rank 0] step:5521/10000 train_time:408617ms step_avg:74.01ms +[2025-09-02 09:06:06] [Rank 0] step:5521/10000 train_time:408617ms step_avg:74.01ms +[2025-09-02 09:06:07] [Rank 0] step:5541/10000 train_time:410183ms step_avg:74.03ms +[2025-09-02 09:06:07] [Rank 0] step:5541/10000 train_time:410183ms step_avg:74.03ms +[2025-09-02 09:06:09] [Rank 0] step:5561/10000 train_time:411747ms step_avg:74.04ms +[2025-09-02 09:06:09] [Rank 0] step:5561/10000 train_time:411747ms step_avg:74.04ms +[2025-09-02 09:06:10] [Rank 0] step:5581/10000 train_time:413312ms step_avg:74.06ms +[2025-09-02 09:06:10] [Rank 0] step:5581/10000 train_time:413312ms step_avg:74.06ms +[2025-09-02 09:06:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:06:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:06:24] [Rank 0] PRINT: step:5600/10000 val_loss:4.0820 svd_entropy: attn_qk:H=0.7328,top10E=0.28,eRank=139.5,q75/q25=101.76 attn_vo:H=0.8197,top10E=0.15,eRank=257.2,q75/q25=80.37 mlp_w1:H=0.7605,top10E=0.30,eRank=185.2,q75/q25=13.97 mlp_w2:H=0.8570,top10E=0.13,eRank=307.2,q75/q25=22.75 vo_prod:H=0.7232,top10E=0.26,eRank=128.8,q75/q25=7603.51 train_time:415035ms step_avg:74.11ms +[2025-09-02 09:06:24] [Rank 0] PRINT: step:5600/10000 val_loss:4.0820 svd_entropy: attn_qk:H=0.7328,top10E=0.28,eRank=139.5,q75/q25=101.76 attn_vo:H=0.8197,top10E=0.15,eRank=257.2,q75/q25=80.37 mlp_w1:H=0.7605,top10E=0.30,eRank=185.2,q75/q25=13.97 mlp_w2:H=0.8570,top10E=0.13,eRank=307.2,q75/q25=22.75 vo_prod:H=0.7232,top10E=0.26,eRank=128.8,q75/q25=7603.51 train_time:415035ms step_avg:74.11ms +[2025-09-02 09:06:24] [Rank 0] step:5601/10000 train_time:415046ms step_avg:74.10ms +[2025-09-02 09:06:24] [Rank 0] step:5601/10000 train_time:415046ms step_avg:74.10ms +[2025-09-02 09:06:25] [Rank 0] step:5621/10000 train_time:416469ms step_avg:74.09ms +[2025-09-02 09:06:25] [Rank 0] step:5621/10000 train_time:416469ms step_avg:74.09ms +[2025-09-02 09:06:27] [Rank 0] step:5641/10000 train_time:418033ms step_avg:74.11ms +[2025-09-02 09:06:27] [Rank 0] step:5641/10000 train_time:418033ms step_avg:74.11ms +[2025-09-02 09:06:28] [Rank 0] step:5661/10000 train_time:419594ms step_avg:74.12ms +[2025-09-02 09:06:28] [Rank 0] step:5661/10000 train_time:419594ms step_avg:74.12ms +[2025-09-02 09:06:30] [Rank 0] step:5681/10000 train_time:421159ms step_avg:74.13ms +[2025-09-02 09:06:30] [Rank 0] step:5681/10000 train_time:421159ms step_avg:74.13ms +[2025-09-02 09:06:31] [Rank 0] step:5701/10000 train_time:422721ms step_avg:74.15ms +[2025-09-02 09:06:31] [Rank 0] step:5701/10000 train_time:422721ms step_avg:74.15ms +[2025-09-02 09:06:33] [Rank 0] step:5721/10000 train_time:424288ms step_avg:74.16ms +[2025-09-02 09:06:33] [Rank 0] step:5721/10000 train_time:424288ms step_avg:74.16ms +[2025-09-02 09:06:35] [Rank 0] step:5741/10000 train_time:425851ms step_avg:74.18ms +[2025-09-02 09:06:35] [Rank 0] step:5741/10000 train_time:425851ms step_avg:74.18ms +[2025-09-02 09:06:36] [Rank 0] step:5761/10000 train_time:427416ms step_avg:74.19ms +[2025-09-02 09:06:36] [Rank 0] step:5761/10000 train_time:427416ms step_avg:74.19ms +[2025-09-02 09:06:38] [Rank 0] step:5781/10000 train_time:428982ms step_avg:74.21ms +[2025-09-02 09:06:38] [Rank 0] step:5781/10000 train_time:428982ms step_avg:74.21ms +[2025-09-02 09:06:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:06:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:06:51] [Rank 0] PRINT: step:5800/10000 val_loss:4.0715 svd_entropy: attn_qk:H=0.7349,top10E=0.28,eRank=141.2,q75/q25=102.77 attn_vo:H=0.8217,top10E=0.15,eRank=260.2,q75/q25=78.20 mlp_w1:H=0.7633,top10E=0.29,eRank=188.0,q75/q25=14.34 mlp_w2:H=0.8582,top10E=0.13,eRank=310.0,q75/q25=23.03 vo_prod:H=0.7254,top10E=0.26,eRank=130.8,q75/q25=7128.91 train_time:430706ms step_avg:74.26ms +[2025-09-02 09:06:51] [Rank 0] PRINT: step:5800/10000 val_loss:4.0715 svd_entropy: attn_qk:H=0.7349,top10E=0.28,eRank=141.2,q75/q25=102.77 attn_vo:H=0.8217,top10E=0.15,eRank=260.2,q75/q25=78.20 mlp_w1:H=0.7633,top10E=0.29,eRank=188.0,q75/q25=14.34 mlp_w2:H=0.8582,top10E=0.13,eRank=310.0,q75/q25=23.03 vo_prod:H=0.7254,top10E=0.26,eRank=130.8,q75/q25=7128.91 train_time:430706ms step_avg:74.26ms +[2025-09-02 09:06:51] [Rank 0] step:5801/10000 train_time:430716ms step_avg:74.25ms +[2025-09-02 09:06:51] [Rank 0] step:5801/10000 train_time:430716ms step_avg:74.25ms +[2025-09-02 09:06:53] [Rank 0] step:5821/10000 train_time:432147ms step_avg:74.24ms +[2025-09-02 09:06:53] [Rank 0] step:5821/10000 train_time:432147ms step_avg:74.24ms +[2025-09-02 09:06:54] [Rank 0] step:5841/10000 train_time:433708ms step_avg:74.25ms +[2025-09-02 09:06:54] [Rank 0] step:5841/10000 train_time:433708ms step_avg:74.25ms +[2025-09-02 09:06:56] [Rank 0] step:5861/10000 train_time:435276ms step_avg:74.27ms +[2025-09-02 09:06:56] [Rank 0] step:5861/10000 train_time:435276ms step_avg:74.27ms +[2025-09-02 09:06:58] [Rank 0] step:5881/10000 train_time:436842ms step_avg:74.28ms +[2025-09-02 09:06:58] [Rank 0] step:5881/10000 train_time:436842ms step_avg:74.28ms +[2025-09-02 09:06:59] [Rank 0] step:5901/10000 train_time:438408ms step_avg:74.29ms +[2025-09-02 09:06:59] [Rank 0] step:5901/10000 train_time:438408ms step_avg:74.29ms +[2025-09-02 09:07:01] [Rank 0] step:5921/10000 train_time:439973ms step_avg:74.31ms +[2025-09-02 09:07:01] [Rank 0] step:5921/10000 train_time:439973ms step_avg:74.31ms +[2025-09-02 09:07:02] [Rank 0] step:5941/10000 train_time:441541ms step_avg:74.32ms +[2025-09-02 09:07:02] [Rank 0] step:5941/10000 train_time:441541ms step_avg:74.32ms +[2025-09-02 09:07:04] [Rank 0] step:5961/10000 train_time:443113ms step_avg:74.34ms +[2025-09-02 09:07:04] [Rank 0] step:5961/10000 train_time:443113ms step_avg:74.34ms +[2025-09-02 09:07:05] [Rank 0] step:5981/10000 train_time:444681ms step_avg:74.35ms +[2025-09-02 09:07:05] [Rank 0] step:5981/10000 train_time:444681ms step_avg:74.35ms +[2025-09-02 09:07:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:07:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:07:19] [Rank 0] PRINT: step:6000/10000 val_loss:4.0435 svd_entropy: attn_qk:H=0.7370,top10E=0.28,eRank=142.9,q75/q25=103.10 attn_vo:H=0.8237,top10E=0.15,eRank=263.1,q75/q25=76.21 mlp_w1:H=0.7659,top10E=0.29,eRank=190.8,q75/q25=14.60 mlp_w2:H=0.8595,top10E=0.13,eRank=312.7,q75/q25=23.26 vo_prod:H=0.7277,top10E=0.25,eRank=132.9,q75/q25=6504.34 train_time:446402ms step_avg:74.40ms +[2025-09-02 09:07:19] [Rank 0] PRINT: step:6000/10000 val_loss:4.0435 svd_entropy: attn_qk:H=0.7370,top10E=0.28,eRank=142.9,q75/q25=103.10 attn_vo:H=0.8237,top10E=0.15,eRank=263.1,q75/q25=76.21 mlp_w1:H=0.7659,top10E=0.29,eRank=190.8,q75/q25=14.60 mlp_w2:H=0.8595,top10E=0.13,eRank=312.7,q75/q25=23.26 vo_prod:H=0.7277,top10E=0.25,eRank=132.9,q75/q25=6504.34 train_time:446402ms step_avg:74.40ms +[2025-09-02 09:07:19] [Rank 0] step:6001/10000 train_time:446413ms step_avg:74.39ms +[2025-09-02 09:07:19] [Rank 0] step:6001/10000 train_time:446413ms step_avg:74.39ms +[2025-09-02 09:07:20] [Rank 0] step:6021/10000 train_time:447837ms step_avg:74.38ms +[2025-09-02 09:07:20] [Rank 0] step:6021/10000 train_time:447837ms step_avg:74.38ms +[2025-09-02 09:07:22] [Rank 0] step:6041/10000 train_time:449403ms step_avg:74.39ms +[2025-09-02 09:07:22] [Rank 0] step:6041/10000 train_time:449403ms step_avg:74.39ms +[2025-09-02 09:07:24] [Rank 0] step:6061/10000 train_time:450980ms step_avg:74.41ms +[2025-09-02 09:07:24] [Rank 0] step:6061/10000 train_time:450980ms step_avg:74.41ms +[2025-09-02 09:07:25] [Rank 0] step:6081/10000 train_time:452553ms step_avg:74.42ms +[2025-09-02 09:07:25] [Rank 0] step:6081/10000 train_time:452553ms step_avg:74.42ms +[2025-09-02 09:07:27] [Rank 0] step:6101/10000 train_time:454124ms step_avg:74.43ms +[2025-09-02 09:07:27] [Rank 0] step:6101/10000 train_time:454124ms step_avg:74.43ms +[2025-09-02 09:07:29] [Rank 0] step:6121/10000 train_time:455955ms step_avg:74.49ms +[2025-09-02 09:07:29] [Rank 0] step:6121/10000 train_time:455955ms step_avg:74.49ms +[2025-09-02 09:07:30] [Rank 0] step:6141/10000 train_time:457532ms step_avg:74.50ms +[2025-09-02 09:07:30] [Rank 0] step:6141/10000 train_time:457532ms step_avg:74.50ms +[2025-09-02 09:07:32] [Rank 0] step:6161/10000 train_time:459102ms step_avg:74.52ms +[2025-09-02 09:07:32] [Rank 0] step:6161/10000 train_time:459102ms step_avg:74.52ms +[2025-09-02 09:07:33] [Rank 0] step:6181/10000 train_time:460673ms step_avg:74.53ms +[2025-09-02 09:07:33] [Rank 0] step:6181/10000 train_time:460673ms step_avg:74.53ms +[2025-09-02 09:07:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:07:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:07:47] [Rank 0] PRINT: step:6200/10000 val_loss:4.0278 svd_entropy: attn_qk:H=0.7389,top10E=0.28,eRank=144.5,q75/q25=103.77 attn_vo:H=0.8255,top10E=0.15,eRank=265.9,q75/q25=73.99 mlp_w1:H=0.7684,top10E=0.29,eRank=193.5,q75/q25=14.89 mlp_w2:H=0.8606,top10E=0.12,eRank=315.4,q75/q25=23.40 vo_prod:H=0.7300,top10E=0.25,eRank=135.0,q75/q25=6125.52 train_time:462400ms step_avg:74.58ms +[2025-09-02 09:07:47] [Rank 0] PRINT: step:6200/10000 val_loss:4.0278 svd_entropy: attn_qk:H=0.7389,top10E=0.28,eRank=144.5,q75/q25=103.77 attn_vo:H=0.8255,top10E=0.15,eRank=265.9,q75/q25=73.99 mlp_w1:H=0.7684,top10E=0.29,eRank=193.5,q75/q25=14.89 mlp_w2:H=0.8606,top10E=0.12,eRank=315.4,q75/q25=23.40 vo_prod:H=0.7300,top10E=0.25,eRank=135.0,q75/q25=6125.52 train_time:462400ms step_avg:74.58ms +[2025-09-02 09:07:47] [Rank 0] step:6201/10000 train_time:462411ms step_avg:74.57ms +[2025-09-02 09:07:47] [Rank 0] step:6201/10000 train_time:462411ms step_avg:74.57ms +[2025-09-02 09:07:48] [Rank 0] step:6221/10000 train_time:463847ms step_avg:74.56ms +[2025-09-02 09:07:48] [Rank 0] step:6221/10000 train_time:463847ms step_avg:74.56ms +[2025-09-02 09:07:50] [Rank 0] step:6241/10000 train_time:465415ms step_avg:74.57ms +[2025-09-02 09:07:50] [Rank 0] step:6241/10000 train_time:465415ms step_avg:74.57ms +[2025-09-02 09:07:52] [Rank 0] step:6261/10000 train_time:467002ms step_avg:74.59ms +[2025-09-02 09:07:52] [Rank 0] step:6261/10000 train_time:467002ms step_avg:74.59ms +[2025-09-02 09:07:53] [Rank 0] step:6281/10000 train_time:468576ms step_avg:74.60ms +[2025-09-02 09:07:53] [Rank 0] step:6281/10000 train_time:468576ms step_avg:74.60ms +[2025-09-02 09:07:55] [Rank 0] step:6301/10000 train_time:470148ms step_avg:74.61ms +[2025-09-02 09:07:55] [Rank 0] step:6301/10000 train_time:470148ms step_avg:74.61ms +[2025-09-02 09:07:56] [Rank 0] step:6321/10000 train_time:471716ms step_avg:74.63ms +[2025-09-02 09:07:56] [Rank 0] step:6321/10000 train_time:471716ms step_avg:74.63ms +[2025-09-02 09:07:58] [Rank 0] step:6341/10000 train_time:473291ms step_avg:74.64ms +[2025-09-02 09:07:58] [Rank 0] step:6341/10000 train_time:473291ms step_avg:74.64ms +[2025-09-02 09:07:59] [Rank 0] step:6361/10000 train_time:474868ms step_avg:74.65ms +[2025-09-02 09:07:59] [Rank 0] step:6361/10000 train_time:474868ms step_avg:74.65ms +[2025-09-02 09:08:01] [Rank 0] step:6381/10000 train_time:476443ms step_avg:74.67ms +[2025-09-02 09:08:01] [Rank 0] step:6381/10000 train_time:476443ms step_avg:74.67ms +[2025-09-02 09:08:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:08:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:08:14] [Rank 0] PRINT: step:6400/10000 val_loss:4.0109 svd_entropy: attn_qk:H=0.7407,top10E=0.27,eRank=146.1,q75/q25=104.68 attn_vo:H=0.8271,top10E=0.15,eRank=268.5,q75/q25=72.09 mlp_w1:H=0.7706,top10E=0.28,eRank=195.9,q75/q25=15.15 mlp_w2:H=0.8616,top10E=0.12,eRank=317.7,q75/q25=23.70 vo_prod:H=0.7320,top10E=0.25,eRank=136.9,q75/q25=5593.10 train_time:478173ms step_avg:74.71ms +[2025-09-02 09:08:14] [Rank 0] PRINT: step:6400/10000 val_loss:4.0109 svd_entropy: attn_qk:H=0.7407,top10E=0.27,eRank=146.1,q75/q25=104.68 attn_vo:H=0.8271,top10E=0.15,eRank=268.5,q75/q25=72.09 mlp_w1:H=0.7706,top10E=0.28,eRank=195.9,q75/q25=15.15 mlp_w2:H=0.8616,top10E=0.12,eRank=317.7,q75/q25=23.70 vo_prod:H=0.7320,top10E=0.25,eRank=136.9,q75/q25=5593.10 train_time:478173ms step_avg:74.71ms +[2025-09-02 09:08:14] [Rank 0] step:6401/10000 train_time:478184ms step_avg:74.70ms +[2025-09-02 09:08:14] [Rank 0] step:6401/10000 train_time:478184ms step_avg:74.70ms +[2025-09-02 09:08:16] [Rank 0] step:6421/10000 train_time:479614ms step_avg:74.69ms +[2025-09-02 09:08:16] [Rank 0] step:6421/10000 train_time:479614ms step_avg:74.69ms +[2025-09-02 09:08:18] [Rank 0] step:6441/10000 train_time:481180ms step_avg:74.71ms +[2025-09-02 09:08:18] [Rank 0] step:6441/10000 train_time:481180ms step_avg:74.71ms +[2025-09-02 09:08:19] [Rank 0] step:6461/10000 train_time:482754ms step_avg:74.72ms +[2025-09-02 09:08:19] [Rank 0] step:6461/10000 train_time:482754ms step_avg:74.72ms +[2025-09-02 09:08:21] [Rank 0] step:6481/10000 train_time:484332ms step_avg:74.73ms +[2025-09-02 09:08:21] [Rank 0] step:6481/10000 train_time:484332ms step_avg:74.73ms +[2025-09-02 09:08:22] [Rank 0] step:6501/10000 train_time:485899ms step_avg:74.74ms +[2025-09-02 09:08:22] [Rank 0] step:6501/10000 train_time:485899ms step_avg:74.74ms +[2025-09-02 09:08:24] [Rank 0] step:6521/10000 train_time:487466ms step_avg:74.75ms +[2025-09-02 09:08:24] [Rank 0] step:6521/10000 train_time:487466ms step_avg:74.75ms +[2025-09-02 09:08:25] [Rank 0] step:6541/10000 train_time:489036ms step_avg:74.76ms +[2025-09-02 09:08:25] [Rank 0] step:6541/10000 train_time:489036ms step_avg:74.76ms +[2025-09-02 09:08:27] [Rank 0] step:6561/10000 train_time:490609ms step_avg:74.78ms +[2025-09-02 09:08:27] [Rank 0] step:6561/10000 train_time:490609ms step_avg:74.78ms +[2025-09-02 09:08:29] [Rank 0] step:6581/10000 train_time:492177ms step_avg:74.79ms +[2025-09-02 09:08:29] [Rank 0] step:6581/10000 train_time:492177ms step_avg:74.79ms +[2025-09-02 09:08:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:08:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:08:42] [Rank 0] PRINT: step:6600/10000 val_loss:3.9975 svd_entropy: attn_qk:H=0.7424,top10E=0.27,eRank=147.5,q75/q25=104.24 attn_vo:H=0.8286,top10E=0.15,eRank=270.8,q75/q25=70.66 mlp_w1:H=0.7728,top10E=0.28,eRank=198.3,q75/q25=15.38 mlp_w2:H=0.8626,top10E=0.12,eRank=319.9,q75/q25=23.79 vo_prod:H=0.7339,top10E=0.25,eRank=138.8,q75/q25=5326.57 train_time:493904ms step_avg:74.83ms +[2025-09-02 09:08:42] [Rank 0] PRINT: step:6600/10000 val_loss:3.9975 svd_entropy: attn_qk:H=0.7424,top10E=0.27,eRank=147.5,q75/q25=104.24 attn_vo:H=0.8286,top10E=0.15,eRank=270.8,q75/q25=70.66 mlp_w1:H=0.7728,top10E=0.28,eRank=198.3,q75/q25=15.38 mlp_w2:H=0.8626,top10E=0.12,eRank=319.9,q75/q25=23.79 vo_prod:H=0.7339,top10E=0.25,eRank=138.8,q75/q25=5326.57 train_time:493904ms step_avg:74.83ms +[2025-09-02 09:08:42] [Rank 0] step:6601/10000 train_time:493914ms step_avg:74.82ms +[2025-09-02 09:08:42] [Rank 0] step:6601/10000 train_time:493914ms step_avg:74.82ms +[2025-09-02 09:08:44] [Rank 0] step:6621/10000 train_time:495337ms step_avg:74.81ms +[2025-09-02 09:08:44] [Rank 0] step:6621/10000 train_time:495337ms step_avg:74.81ms +[2025-09-02 09:08:45] [Rank 0] step:6641/10000 train_time:496909ms step_avg:74.82ms +[2025-09-02 09:08:45] [Rank 0] step:6641/10000 train_time:496909ms step_avg:74.82ms +[2025-09-02 09:08:47] [Rank 0] step:6661/10000 train_time:498482ms step_avg:74.84ms +[2025-09-02 09:08:47] [Rank 0] step:6661/10000 train_time:498482ms step_avg:74.84ms +[2025-09-02 09:08:48] [Rank 0] step:6681/10000 train_time:500068ms step_avg:74.85ms +[2025-09-02 09:08:48] [Rank 0] step:6681/10000 train_time:500068ms step_avg:74.85ms +[2025-09-02 09:08:50] [Rank 0] step:6701/10000 train_time:501673ms step_avg:74.87ms +[2025-09-02 09:08:50] [Rank 0] step:6701/10000 train_time:501673ms step_avg:74.87ms +[2025-09-02 09:08:52] [Rank 0] step:6721/10000 train_time:503274ms step_avg:74.88ms +[2025-09-02 09:08:52] [Rank 0] step:6721/10000 train_time:503274ms step_avg:74.88ms +[2025-09-02 09:08:53] [Rank 0] step:6741/10000 train_time:504869ms step_avg:74.90ms +[2025-09-02 09:08:53] [Rank 0] step:6741/10000 train_time:504869ms step_avg:74.90ms +[2025-09-02 09:08:55] [Rank 0] step:6761/10000 train_time:506469ms step_avg:74.91ms +[2025-09-02 09:08:55] [Rank 0] step:6761/10000 train_time:506469ms step_avg:74.91ms +[2025-09-02 09:08:56] [Rank 0] step:6781/10000 train_time:508071ms step_avg:74.93ms +[2025-09-02 09:08:56] [Rank 0] step:6781/10000 train_time:508071ms step_avg:74.93ms +[2025-09-02 09:08:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:08:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:09:10] [Rank 0] PRINT: step:6800/10000 val_loss:3.9810 svd_entropy: attn_qk:H=0.7438,top10E=0.27,eRank=148.7,q75/q25=104.77 attn_vo:H=0.8300,top10E=0.14,eRank=273.0,q75/q25=68.89 mlp_w1:H=0.7747,top10E=0.28,eRank=200.4,q75/q25=15.67 mlp_w2:H=0.8635,top10E=0.12,eRank=321.9,q75/q25=23.92 vo_prod:H=0.7358,top10E=0.25,eRank=140.5,q75/q25=4975.76 train_time:509834ms step_avg:74.98ms +[2025-09-02 09:09:10] [Rank 0] PRINT: step:6800/10000 val_loss:3.9810 svd_entropy: attn_qk:H=0.7438,top10E=0.27,eRank=148.7,q75/q25=104.77 attn_vo:H=0.8300,top10E=0.14,eRank=273.0,q75/q25=68.89 mlp_w1:H=0.7747,top10E=0.28,eRank=200.4,q75/q25=15.67 mlp_w2:H=0.8635,top10E=0.12,eRank=321.9,q75/q25=23.92 vo_prod:H=0.7358,top10E=0.25,eRank=140.5,q75/q25=4975.76 train_time:509834ms step_avg:74.98ms +[2025-09-02 09:09:10] [Rank 0] step:6801/10000 train_time:509845ms step_avg:74.97ms +[2025-09-02 09:09:10] [Rank 0] step:6801/10000 train_time:509845ms step_avg:74.97ms +[2025-09-02 09:09:12] [Rank 0] step:6821/10000 train_time:511292ms step_avg:74.96ms +[2025-09-02 09:09:12] [Rank 0] step:6821/10000 train_time:511292ms step_avg:74.96ms +[2025-09-02 09:09:13] [Rank 0] step:6841/10000 train_time:512885ms step_avg:74.97ms +[2025-09-02 09:09:13] [Rank 0] step:6841/10000 train_time:512885ms step_avg:74.97ms +[2025-09-02 09:09:15] [Rank 0] step:6861/10000 train_time:514495ms step_avg:74.99ms +[2025-09-02 09:09:15] [Rank 0] step:6861/10000 train_time:514495ms step_avg:74.99ms +[2025-09-02 09:09:16] [Rank 0] step:6881/10000 train_time:516091ms step_avg:75.00ms +[2025-09-02 09:09:16] [Rank 0] step:6881/10000 train_time:516091ms step_avg:75.00ms +[2025-09-02 09:09:18] [Rank 0] step:6901/10000 train_time:517691ms step_avg:75.02ms +[2025-09-02 09:09:18] [Rank 0] step:6901/10000 train_time:517691ms step_avg:75.02ms +[2025-09-02 09:09:20] [Rank 0] step:6921/10000 train_time:519290ms step_avg:75.03ms +[2025-09-02 09:09:20] [Rank 0] step:6921/10000 train_time:519290ms step_avg:75.03ms +[2025-09-02 09:09:21] [Rank 0] step:6941/10000 train_time:520895ms step_avg:75.05ms +[2025-09-02 09:09:21] [Rank 0] step:6941/10000 train_time:520895ms step_avg:75.05ms +[2025-09-02 09:09:23] [Rank 0] step:6961/10000 train_time:522510ms step_avg:75.06ms +[2025-09-02 09:09:23] [Rank 0] step:6961/10000 train_time:522510ms step_avg:75.06ms +[2025-09-02 09:09:24] [Rank 0] step:6981/10000 train_time:524114ms step_avg:75.08ms +[2025-09-02 09:09:24] [Rank 0] step:6981/10000 train_time:524114ms step_avg:75.08ms +[2025-09-02 09:09:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:09:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:09:38] [Rank 0] PRINT: step:7000/10000 val_loss:3.9632 svd_entropy: attn_qk:H=0.7452,top10E=0.27,eRank=149.9,q75/q25=105.02 attn_vo:H=0.8313,top10E=0.14,eRank=275.0,q75/q25=67.44 mlp_w1:H=0.7764,top10E=0.27,eRank=202.3,q75/q25=15.90 mlp_w2:H=0.8642,top10E=0.12,eRank=323.7,q75/q25=23.99 vo_prod:H=0.7374,top10E=0.24,eRank=142.1,q75/q25=4727.22 train_time:525878ms step_avg:75.13ms +[2025-09-02 09:09:38] [Rank 0] PRINT: step:7000/10000 val_loss:3.9632 svd_entropy: attn_qk:H=0.7452,top10E=0.27,eRank=149.9,q75/q25=105.02 attn_vo:H=0.8313,top10E=0.14,eRank=275.0,q75/q25=67.44 mlp_w1:H=0.7764,top10E=0.27,eRank=202.3,q75/q25=15.90 mlp_w2:H=0.8642,top10E=0.12,eRank=323.7,q75/q25=23.99 vo_prod:H=0.7374,top10E=0.24,eRank=142.1,q75/q25=4727.22 train_time:525878ms step_avg:75.13ms +[2025-09-02 09:09:38] [Rank 0] step:7001/10000 train_time:525888ms step_avg:75.12ms +[2025-09-02 09:09:38] [Rank 0] step:7001/10000 train_time:525888ms step_avg:75.12ms +[2025-09-02 09:09:40] [Rank 0] step:7021/10000 train_time:527353ms step_avg:75.11ms +[2025-09-02 09:09:40] [Rank 0] step:7021/10000 train_time:527353ms step_avg:75.11ms +[2025-09-02 09:09:41] [Rank 0] step:7041/10000 train_time:528953ms step_avg:75.12ms +[2025-09-02 09:09:41] [Rank 0] step:7041/10000 train_time:528953ms step_avg:75.12ms +[2025-09-02 09:09:43] [Rank 0] step:7061/10000 train_time:530552ms step_avg:75.14ms +[2025-09-02 09:09:43] [Rank 0] step:7061/10000 train_time:530552ms step_avg:75.14ms +[2025-09-02 09:09:44] [Rank 0] step:7081/10000 train_time:532159ms step_avg:75.15ms +[2025-09-02 09:09:44] [Rank 0] step:7081/10000 train_time:532159ms step_avg:75.15ms +[2025-09-02 09:09:46] [Rank 0] step:7101/10000 train_time:533758ms step_avg:75.17ms +[2025-09-02 09:09:46] [Rank 0] step:7101/10000 train_time:533758ms step_avg:75.17ms +[2025-09-02 09:09:48] [Rank 0] step:7121/10000 train_time:535359ms step_avg:75.18ms +[2025-09-02 09:09:48] [Rank 0] step:7121/10000 train_time:535359ms step_avg:75.18ms +[2025-09-02 09:09:49] [Rank 0] step:7141/10000 train_time:536961ms step_avg:75.19ms +[2025-09-02 09:09:49] [Rank 0] step:7141/10000 train_time:536961ms step_avg:75.19ms +[2025-09-02 09:09:51] [Rank 0] step:7161/10000 train_time:538563ms step_avg:75.21ms +[2025-09-02 09:09:51] [Rank 0] step:7161/10000 train_time:538563ms step_avg:75.21ms +[2025-09-02 09:09:52] [Rank 0] step:7181/10000 train_time:540166ms step_avg:75.22ms +[2025-09-02 09:09:52] [Rank 0] step:7181/10000 train_time:540166ms step_avg:75.22ms +[2025-09-02 09:09:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:09:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:10:06] [Rank 0] PRINT: step:7200/10000 val_loss:3.9544 svd_entropy: attn_qk:H=0.7465,top10E=0.27,eRank=151.0,q75/q25=105.74 attn_vo:H=0.8325,top10E=0.14,eRank=276.9,q75/q25=65.80 mlp_w1:H=0.7780,top10E=0.27,eRank=204.2,q75/q25=16.09 mlp_w2:H=0.8650,top10E=0.12,eRank=325.4,q75/q25=24.12 vo_prod:H=0.7390,top10E=0.24,eRank=143.7,q75/q25=4444.46 train_time:541936ms step_avg:75.27ms +[2025-09-02 09:10:06] [Rank 0] PRINT: step:7200/10000 val_loss:3.9544 svd_entropy: attn_qk:H=0.7465,top10E=0.27,eRank=151.0,q75/q25=105.74 attn_vo:H=0.8325,top10E=0.14,eRank=276.9,q75/q25=65.80 mlp_w1:H=0.7780,top10E=0.27,eRank=204.2,q75/q25=16.09 mlp_w2:H=0.8650,top10E=0.12,eRank=325.4,q75/q25=24.12 vo_prod:H=0.7390,top10E=0.24,eRank=143.7,q75/q25=4444.46 train_time:541936ms step_avg:75.27ms +[2025-09-02 09:10:06] [Rank 0] step:7201/10000 train_time:541947ms step_avg:75.26ms +[2025-09-02 09:10:06] [Rank 0] step:7201/10000 train_time:541947ms step_avg:75.26ms +[2025-09-02 09:10:08] [Rank 0] step:7221/10000 train_time:543395ms step_avg:75.25ms +[2025-09-02 09:10:08] [Rank 0] step:7221/10000 train_time:543395ms step_avg:75.25ms +[2025-09-02 09:10:09] [Rank 0] step:7241/10000 train_time:544991ms step_avg:75.26ms +[2025-09-02 09:10:09] [Rank 0] step:7241/10000 train_time:544991ms step_avg:75.26ms +[2025-09-02 09:10:11] [Rank 0] step:7261/10000 train_time:546588ms step_avg:75.28ms +[2025-09-02 09:10:11] [Rank 0] step:7261/10000 train_time:546588ms step_avg:75.28ms +[2025-09-02 09:10:12] [Rank 0] step:7281/10000 train_time:548194ms step_avg:75.29ms +[2025-09-02 09:10:12] [Rank 0] step:7281/10000 train_time:548194ms step_avg:75.29ms +[2025-09-02 09:10:14] [Rank 0] step:7301/10000 train_time:549795ms step_avg:75.30ms +[2025-09-02 09:10:14] [Rank 0] step:7301/10000 train_time:549795ms step_avg:75.30ms +[2025-09-02 09:10:16] [Rank 0] step:7321/10000 train_time:551403ms step_avg:75.32ms +[2025-09-02 09:10:16] [Rank 0] step:7321/10000 train_time:551403ms step_avg:75.32ms +[2025-09-02 09:10:17] [Rank 0] step:7341/10000 train_time:553008ms step_avg:75.33ms +[2025-09-02 09:10:17] [Rank 0] step:7341/10000 train_time:553008ms step_avg:75.33ms +[2025-09-02 09:10:19] [Rank 0] step:7361/10000 train_time:554614ms step_avg:75.34ms +[2025-09-02 09:10:19] [Rank 0] step:7361/10000 train_time:554614ms step_avg:75.34ms +[2025-09-02 09:10:20] [Rank 0] step:7381/10000 train_time:556226ms step_avg:75.36ms +[2025-09-02 09:10:20] [Rank 0] step:7381/10000 train_time:556226ms step_avg:75.36ms +[2025-09-02 09:10:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:10:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:10:34] [Rank 0] PRINT: step:7400/10000 val_loss:3.9330 svd_entropy: attn_qk:H=0.7476,top10E=0.26,eRank=152.1,q75/q25=105.40 attn_vo:H=0.8335,top10E=0.14,eRank=278.5,q75/q25=64.39 mlp_w1:H=0.7794,top10E=0.27,eRank=205.8,q75/q25=16.18 mlp_w2:H=0.8657,top10E=0.12,eRank=327.1,q75/q25=24.07 vo_prod:H=0.7404,top10E=0.24,eRank=145.0,q75/q25=4189.45 train_time:557975ms step_avg:75.40ms +[2025-09-02 09:10:34] [Rank 0] PRINT: step:7400/10000 val_loss:3.9330 svd_entropy: attn_qk:H=0.7476,top10E=0.26,eRank=152.1,q75/q25=105.40 attn_vo:H=0.8335,top10E=0.14,eRank=278.5,q75/q25=64.39 mlp_w1:H=0.7794,top10E=0.27,eRank=205.8,q75/q25=16.18 mlp_w2:H=0.8657,top10E=0.12,eRank=327.1,q75/q25=24.07 vo_prod:H=0.7404,top10E=0.24,eRank=145.0,q75/q25=4189.45 train_time:557975ms step_avg:75.40ms +[2025-09-02 09:10:34] [Rank 0] step:7401/10000 train_time:557986ms step_avg:75.39ms +[2025-09-02 09:10:34] [Rank 0] step:7401/10000 train_time:557986ms step_avg:75.39ms +[2025-09-02 09:10:36] [Rank 0] step:7421/10000 train_time:559443ms step_avg:75.39ms +[2025-09-02 09:10:36] [Rank 0] step:7421/10000 train_time:559443ms step_avg:75.39ms +[2025-09-02 09:10:37] [Rank 0] step:7441/10000 train_time:561041ms step_avg:75.40ms +[2025-09-02 09:10:37] [Rank 0] step:7441/10000 train_time:561041ms step_avg:75.40ms +[2025-09-02 09:10:39] [Rank 0] step:7461/10000 train_time:562643ms step_avg:75.41ms +[2025-09-02 09:10:39] [Rank 0] step:7461/10000 train_time:562643ms step_avg:75.41ms +[2025-09-02 09:10:40] [Rank 0] step:7481/10000 train_time:564249ms step_avg:75.42ms +[2025-09-02 09:10:40] [Rank 0] step:7481/10000 train_time:564249ms step_avg:75.42ms +[2025-09-02 09:10:42] [Rank 0] step:7501/10000 train_time:565857ms step_avg:75.44ms +[2025-09-02 09:10:42] [Rank 0] step:7501/10000 train_time:565857ms step_avg:75.44ms +[2025-09-02 09:10:44] [Rank 0] step:7521/10000 train_time:567464ms step_avg:75.45ms +[2025-09-02 09:10:44] [Rank 0] step:7521/10000 train_time:567464ms step_avg:75.45ms +[2025-09-02 09:10:45] [Rank 0] step:7541/10000 train_time:569079ms step_avg:75.46ms +[2025-09-02 09:10:45] [Rank 0] step:7541/10000 train_time:569079ms step_avg:75.46ms +[2025-09-02 09:10:47] [Rank 0] step:7561/10000 train_time:570672ms step_avg:75.48ms +[2025-09-02 09:10:47] [Rank 0] step:7561/10000 train_time:570672ms step_avg:75.48ms +[2025-09-02 09:10:48] [Rank 0] step:7581/10000 train_time:572286ms step_avg:75.49ms +[2025-09-02 09:10:48] [Rank 0] step:7581/10000 train_time:572286ms step_avg:75.49ms +[2025-09-02 09:10:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:10:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:11:02] [Rank 0] PRINT: step:7600/10000 val_loss:3.9293 svd_entropy: attn_qk:H=0.7487,top10E=0.26,eRank=153.0,q75/q25=105.23 attn_vo:H=0.8344,top10E=0.14,eRank=280.0,q75/q25=63.15 mlp_w1:H=0.7807,top10E=0.27,eRank=207.4,q75/q25=16.34 mlp_w2:H=0.8663,top10E=0.12,eRank=328.6,q75/q25=24.07 vo_prod:H=0.7417,top10E=0.24,eRank=146.3,q75/q25=3995.99 train_time:574058ms step_avg:75.53ms +[2025-09-02 09:11:02] [Rank 0] PRINT: step:7600/10000 val_loss:3.9293 svd_entropy: attn_qk:H=0.7487,top10E=0.26,eRank=153.0,q75/q25=105.23 attn_vo:H=0.8344,top10E=0.14,eRank=280.0,q75/q25=63.15 mlp_w1:H=0.7807,top10E=0.27,eRank=207.4,q75/q25=16.34 mlp_w2:H=0.8663,top10E=0.12,eRank=328.6,q75/q25=24.07 vo_prod:H=0.7417,top10E=0.24,eRank=146.3,q75/q25=3995.99 train_time:574058ms step_avg:75.53ms +[2025-09-02 09:11:02] [Rank 0] step:7601/10000 train_time:574069ms step_avg:75.53ms +[2025-09-02 09:11:02] [Rank 0] step:7601/10000 train_time:574069ms step_avg:75.53ms +[2025-09-02 09:11:04] [Rank 0] step:7621/10000 train_time:575525ms step_avg:75.52ms +[2025-09-02 09:11:04] [Rank 0] step:7621/10000 train_time:575525ms step_avg:75.52ms +[2025-09-02 09:11:05] [Rank 0] step:7641/10000 train_time:577126ms step_avg:75.53ms +[2025-09-02 09:11:05] [Rank 0] step:7641/10000 train_time:577126ms step_avg:75.53ms +[2025-09-02 09:11:07] [Rank 0] step:7661/10000 train_time:578732ms step_avg:75.54ms +[2025-09-02 09:11:07] [Rank 0] step:7661/10000 train_time:578732ms step_avg:75.54ms +[2025-09-02 09:11:08] [Rank 0] step:7681/10000 train_time:580338ms step_avg:75.56ms +[2025-09-02 09:11:08] [Rank 0] step:7681/10000 train_time:580338ms step_avg:75.56ms +[2025-09-02 09:11:10] [Rank 0] step:7701/10000 train_time:581936ms step_avg:75.57ms +[2025-09-02 09:11:10] [Rank 0] step:7701/10000 train_time:581936ms step_avg:75.57ms +[2025-09-02 09:11:12] [Rank 0] step:7721/10000 train_time:583551ms step_avg:75.58ms +[2025-09-02 09:11:12] [Rank 0] step:7721/10000 train_time:583551ms step_avg:75.58ms +[2025-09-02 09:11:13] [Rank 0] step:7741/10000 train_time:585156ms step_avg:75.59ms +[2025-09-02 09:11:13] [Rank 0] step:7741/10000 train_time:585156ms step_avg:75.59ms +[2025-09-02 09:11:15] [Rank 0] step:7761/10000 train_time:586769ms step_avg:75.60ms +[2025-09-02 09:11:15] [Rank 0] step:7761/10000 train_time:586769ms step_avg:75.60ms +[2025-09-02 09:11:17] [Rank 0] step:7781/10000 train_time:588379ms step_avg:75.62ms +[2025-09-02 09:11:17] [Rank 0] step:7781/10000 train_time:588379ms step_avg:75.62ms +[2025-09-02 09:11:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:11:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:11:30] [Rank 0] PRINT: step:7800/10000 val_loss:3.9133 svd_entropy: attn_qk:H=0.7497,top10E=0.26,eRank=153.9,q75/q25=105.21 attn_vo:H=0.8352,top10E=0.14,eRank=281.4,q75/q25=62.02 mlp_w1:H=0.7820,top10E=0.27,eRank=208.8,q75/q25=16.38 mlp_w2:H=0.8669,top10E=0.12,eRank=330.0,q75/q25=24.07 vo_prod:H=0.7429,top10E=0.24,eRank=147.5,q75/q25=3777.73 train_time:590154ms step_avg:75.66ms +[2025-09-02 09:11:30] [Rank 0] PRINT: step:7800/10000 val_loss:3.9133 svd_entropy: attn_qk:H=0.7497,top10E=0.26,eRank=153.9,q75/q25=105.21 attn_vo:H=0.8352,top10E=0.14,eRank=281.4,q75/q25=62.02 mlp_w1:H=0.7820,top10E=0.27,eRank=208.8,q75/q25=16.38 mlp_w2:H=0.8669,top10E=0.12,eRank=330.0,q75/q25=24.07 vo_prod:H=0.7429,top10E=0.24,eRank=147.5,q75/q25=3777.73 train_time:590154ms step_avg:75.66ms +[2025-09-02 09:11:30] [Rank 0] step:7801/10000 train_time:590164ms step_avg:75.65ms +[2025-09-02 09:11:30] [Rank 0] step:7801/10000 train_time:590164ms step_avg:75.65ms +[2025-09-02 09:11:32] [Rank 0] step:7821/10000 train_time:591618ms step_avg:75.64ms +[2025-09-02 09:11:32] [Rank 0] step:7821/10000 train_time:591618ms step_avg:75.64ms +[2025-09-02 09:11:33] [Rank 0] step:7841/10000 train_time:593220ms step_avg:75.66ms +[2025-09-02 09:11:33] [Rank 0] step:7841/10000 train_time:593220ms step_avg:75.66ms +[2025-09-02 09:11:35] [Rank 0] step:7861/10000 train_time:594829ms step_avg:75.67ms +[2025-09-02 09:11:35] [Rank 0] step:7861/10000 train_time:594829ms step_avg:75.67ms +[2025-09-02 09:11:36] [Rank 0] step:7881/10000 train_time:596438ms step_avg:75.68ms +[2025-09-02 09:11:36] [Rank 0] step:7881/10000 train_time:596438ms step_avg:75.68ms +[2025-09-02 09:11:38] [Rank 0] step:7901/10000 train_time:598041ms step_avg:75.69ms +[2025-09-02 09:11:38] [Rank 0] step:7901/10000 train_time:598041ms step_avg:75.69ms +[2025-09-02 09:11:40] [Rank 0] step:7921/10000 train_time:599643ms step_avg:75.70ms +[2025-09-02 09:11:40] [Rank 0] step:7921/10000 train_time:599643ms step_avg:75.70ms +[2025-09-02 09:11:41] [Rank 0] step:7941/10000 train_time:601257ms step_avg:75.72ms +[2025-09-02 09:11:41] [Rank 0] step:7941/10000 train_time:601257ms step_avg:75.72ms +[2025-09-02 09:11:43] [Rank 0] step:7961/10000 train_time:602865ms step_avg:75.73ms +[2025-09-02 09:11:43] [Rank 0] step:7961/10000 train_time:602865ms step_avg:75.73ms +[2025-09-02 09:11:44] [Rank 0] step:7981/10000 train_time:604467ms step_avg:75.74ms +[2025-09-02 09:11:44] [Rank 0] step:7981/10000 train_time:604467ms step_avg:75.74ms +[2025-09-02 09:11:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:11:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:11:58] [Rank 0] PRINT: step:8000/10000 val_loss:3.8977 svd_entropy: attn_qk:H=0.7506,top10E=0.26,eRank=154.7,q75/q25=104.92 attn_vo:H=0.8360,top10E=0.14,eRank=282.7,q75/q25=61.30 mlp_w1:H=0.7830,top10E=0.27,eRank=210.1,q75/q25=16.54 mlp_w2:H=0.8675,top10E=0.12,eRank=331.3,q75/q25=24.17 vo_prod:H=0.7440,top10E=0.24,eRank=148.6,q75/q25=3631.87 train_time:606233ms step_avg:75.78ms +[2025-09-02 09:11:58] [Rank 0] PRINT: step:8000/10000 val_loss:3.8977 svd_entropy: attn_qk:H=0.7506,top10E=0.26,eRank=154.7,q75/q25=104.92 attn_vo:H=0.8360,top10E=0.14,eRank=282.7,q75/q25=61.30 mlp_w1:H=0.7830,top10E=0.27,eRank=210.1,q75/q25=16.54 mlp_w2:H=0.8675,top10E=0.12,eRank=331.3,q75/q25=24.17 vo_prod:H=0.7440,top10E=0.24,eRank=148.6,q75/q25=3631.87 train_time:606233ms step_avg:75.78ms +[2025-09-02 09:11:58] [Rank 0] step:8001/10000 train_time:606244ms step_avg:75.77ms +[2025-09-02 09:11:58] [Rank 0] step:8001/10000 train_time:606244ms step_avg:75.77ms +[2025-09-02 09:12:00] [Rank 0] step:8021/10000 train_time:607697ms step_avg:75.76ms +[2025-09-02 09:12:00] [Rank 0] step:8021/10000 train_time:607697ms step_avg:75.76ms +[2025-09-02 09:12:01] [Rank 0] step:8041/10000 train_time:609311ms step_avg:75.78ms +[2025-09-02 09:12:01] [Rank 0] step:8041/10000 train_time:609311ms step_avg:75.78ms +[2025-09-02 09:12:03] [Rank 0] step:8061/10000 train_time:610915ms step_avg:75.79ms +[2025-09-02 09:12:03] [Rank 0] step:8061/10000 train_time:610915ms step_avg:75.79ms +[2025-09-02 09:12:04] [Rank 0] step:8081/10000 train_time:612515ms step_avg:75.80ms +[2025-09-02 09:12:04] [Rank 0] step:8081/10000 train_time:612515ms step_avg:75.80ms +[2025-09-02 09:12:06] [Rank 0] step:8101/10000 train_time:614128ms step_avg:75.81ms +[2025-09-02 09:12:06] [Rank 0] step:8101/10000 train_time:614128ms step_avg:75.81ms +[2025-09-02 09:12:08] [Rank 0] step:8121/10000 train_time:615733ms step_avg:75.82ms +[2025-09-02 09:12:08] [Rank 0] step:8121/10000 train_time:615733ms step_avg:75.82ms +[2025-09-02 09:12:09] [Rank 0] step:8141/10000 train_time:617442ms step_avg:75.84ms +[2025-09-02 09:12:09] [Rank 0] step:8141/10000 train_time:617442ms step_avg:75.84ms +[2025-09-02 09:12:11] [Rank 0] step:8161/10000 train_time:619061ms step_avg:75.86ms +[2025-09-02 09:12:11] [Rank 0] step:8161/10000 train_time:619061ms step_avg:75.86ms +[2025-09-02 09:12:13] [Rank 0] step:8181/10000 train_time:620699ms step_avg:75.87ms +[2025-09-02 09:12:13] [Rank 0] step:8181/10000 train_time:620699ms step_avg:75.87ms +[2025-09-02 09:12:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:12:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:12:26] [Rank 0] PRINT: step:8200/10000 val_loss:3.8874 svd_entropy: attn_qk:H=0.7514,top10E=0.26,eRank=155.4,q75/q25=105.27 attn_vo:H=0.8367,top10E=0.14,eRank=283.8,q75/q25=60.62 mlp_w1:H=0.7839,top10E=0.26,eRank=211.2,q75/q25=16.55 mlp_w2:H=0.8679,top10E=0.12,eRank=332.4,q75/q25=24.24 vo_prod:H=0.7450,top10E=0.23,eRank=149.6,q75/q25=3507.20 train_time:622521ms step_avg:75.92ms +[2025-09-02 09:12:26] [Rank 0] PRINT: step:8200/10000 val_loss:3.8874 svd_entropy: attn_qk:H=0.7514,top10E=0.26,eRank=155.4,q75/q25=105.27 attn_vo:H=0.8367,top10E=0.14,eRank=283.8,q75/q25=60.62 mlp_w1:H=0.7839,top10E=0.26,eRank=211.2,q75/q25=16.55 mlp_w2:H=0.8679,top10E=0.12,eRank=332.4,q75/q25=24.24 vo_prod:H=0.7450,top10E=0.23,eRank=149.6,q75/q25=3507.20 train_time:622521ms step_avg:75.92ms +[2025-09-02 09:12:26] [Rank 0] step:8201/10000 train_time:622532ms step_avg:75.91ms +[2025-09-02 09:12:26] [Rank 0] step:8201/10000 train_time:622532ms step_avg:75.91ms +[2025-09-02 09:12:28] [Rank 0] step:8221/10000 train_time:624021ms step_avg:75.91ms +[2025-09-02 09:12:28] [Rank 0] step:8221/10000 train_time:624021ms step_avg:75.91ms +[2025-09-02 09:12:29] [Rank 0] step:8241/10000 train_time:625667ms step_avg:75.92ms +[2025-09-02 09:12:29] [Rank 0] step:8241/10000 train_time:625667ms step_avg:75.92ms +[2025-09-02 09:12:31] [Rank 0] step:8261/10000 train_time:627295ms step_avg:75.93ms +[2025-09-02 09:12:31] [Rank 0] step:8261/10000 train_time:627295ms step_avg:75.93ms +[2025-09-02 09:12:33] [Rank 0] step:8281/10000 train_time:628933ms step_avg:75.95ms +[2025-09-02 09:12:33] [Rank 0] step:8281/10000 train_time:628933ms step_avg:75.95ms +[2025-09-02 09:12:34] [Rank 0] step:8301/10000 train_time:630570ms step_avg:75.96ms +[2025-09-02 09:12:34] [Rank 0] step:8301/10000 train_time:630570ms step_avg:75.96ms +[2025-09-02 09:12:36] [Rank 0] step:8321/10000 train_time:632197ms step_avg:75.98ms +[2025-09-02 09:12:36] [Rank 0] step:8321/10000 train_time:632197ms step_avg:75.98ms +[2025-09-02 09:12:37] [Rank 0] step:8341/10000 train_time:633837ms step_avg:75.99ms +[2025-09-02 09:12:37] [Rank 0] step:8341/10000 train_time:633837ms step_avg:75.99ms +[2025-09-02 09:12:39] [Rank 0] step:8361/10000 train_time:635476ms step_avg:76.00ms +[2025-09-02 09:12:39] [Rank 0] step:8361/10000 train_time:635476ms step_avg:76.00ms +[2025-09-02 09:12:41] [Rank 0] step:8381/10000 train_time:637110ms step_avg:76.02ms +[2025-09-02 09:12:41] [Rank 0] step:8381/10000 train_time:637110ms step_avg:76.02ms +[2025-09-02 09:12:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:12:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:12:54] [Rank 0] PRINT: step:8400/10000 val_loss:3.8765 svd_entropy: attn_qk:H=0.7521,top10E=0.26,eRank=156.0,q75/q25=105.90 attn_vo:H=0.8373,top10E=0.14,eRank=284.8,q75/q25=59.53 mlp_w1:H=0.7849,top10E=0.26,eRank=212.3,q75/q25=16.65 mlp_w2:H=0.8684,top10E=0.12,eRank=333.4,q75/q25=24.21 vo_prod:H=0.7460,top10E=0.23,eRank=150.6,q75/q25=3408.16 train_time:638907ms step_avg:76.06ms +[2025-09-02 09:12:54] [Rank 0] PRINT: step:8400/10000 val_loss:3.8765 svd_entropy: attn_qk:H=0.7521,top10E=0.26,eRank=156.0,q75/q25=105.90 attn_vo:H=0.8373,top10E=0.14,eRank=284.8,q75/q25=59.53 mlp_w1:H=0.7849,top10E=0.26,eRank=212.3,q75/q25=16.65 mlp_w2:H=0.8684,top10E=0.12,eRank=333.4,q75/q25=24.21 vo_prod:H=0.7460,top10E=0.23,eRank=150.6,q75/q25=3408.16 train_time:638907ms step_avg:76.06ms +[2025-09-02 09:12:54] [Rank 0] step:8401/10000 train_time:638918ms step_avg:76.05ms +[2025-09-02 09:12:54] [Rank 0] step:8401/10000 train_time:638918ms step_avg:76.05ms +[2025-09-02 09:12:56] [Rank 0] step:8421/10000 train_time:640388ms step_avg:76.05ms +[2025-09-02 09:12:56] [Rank 0] step:8421/10000 train_time:640388ms step_avg:76.05ms +[2025-09-02 09:12:58] [Rank 0] step:8441/10000 train_time:642022ms step_avg:76.06ms +[2025-09-02 09:12:58] [Rank 0] step:8441/10000 train_time:642022ms step_avg:76.06ms +[2025-09-02 09:12:59] [Rank 0] step:8461/10000 train_time:643650ms step_avg:76.07ms +[2025-09-02 09:12:59] [Rank 0] step:8461/10000 train_time:643650ms step_avg:76.07ms +[2025-09-02 09:13:01] [Rank 0] step:8481/10000 train_time:645287ms step_avg:76.09ms +[2025-09-02 09:13:01] [Rank 0] step:8481/10000 train_time:645287ms step_avg:76.09ms +[2025-09-02 09:13:03] [Rank 0] step:8501/10000 train_time:646944ms step_avg:76.10ms +[2025-09-02 09:13:03] [Rank 0] step:8501/10000 train_time:646944ms step_avg:76.10ms +[2025-09-02 09:13:04] [Rank 0] step:8521/10000 train_time:648584ms step_avg:76.12ms +[2025-09-02 09:13:04] [Rank 0] step:8521/10000 train_time:648584ms step_avg:76.12ms +[2025-09-02 09:13:06] [Rank 0] step:8541/10000 train_time:650230ms step_avg:76.13ms +[2025-09-02 09:13:06] [Rank 0] step:8541/10000 train_time:650230ms step_avg:76.13ms +[2025-09-02 09:13:07] [Rank 0] step:8561/10000 train_time:651865ms step_avg:76.14ms +[2025-09-02 09:13:07] [Rank 0] step:8561/10000 train_time:651865ms step_avg:76.14ms +[2025-09-02 09:13:09] [Rank 0] step:8581/10000 train_time:653501ms step_avg:76.16ms +[2025-09-02 09:13:09] [Rank 0] step:8581/10000 train_time:653501ms step_avg:76.16ms +[2025-09-02 09:13:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:13:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:13:23] [Rank 0] PRINT: step:8600/10000 val_loss:3.8675 svd_entropy: attn_qk:H=0.7528,top10E=0.26,eRank=156.7,q75/q25=105.41 attn_vo:H=0.8379,top10E=0.14,eRank=285.7,q75/q25=58.90 mlp_w1:H=0.7857,top10E=0.26,eRank=213.3,q75/q25=16.71 mlp_w2:H=0.8688,top10E=0.12,eRank=334.4,q75/q25=24.19 vo_prod:H=0.7467,top10E=0.23,eRank=151.3,q75/q25=3273.88 train_time:655296ms step_avg:76.20ms +[2025-09-02 09:13:23] [Rank 0] PRINT: step:8600/10000 val_loss:3.8675 svd_entropy: attn_qk:H=0.7528,top10E=0.26,eRank=156.7,q75/q25=105.41 attn_vo:H=0.8379,top10E=0.14,eRank=285.7,q75/q25=58.90 mlp_w1:H=0.7857,top10E=0.26,eRank=213.3,q75/q25=16.71 mlp_w2:H=0.8688,top10E=0.12,eRank=334.4,q75/q25=24.19 vo_prod:H=0.7467,top10E=0.23,eRank=151.3,q75/q25=3273.88 train_time:655296ms step_avg:76.20ms +[2025-09-02 09:13:23] [Rank 0] step:8601/10000 train_time:655306ms step_avg:76.19ms +[2025-09-02 09:13:23] [Rank 0] step:8601/10000 train_time:655306ms step_avg:76.19ms +[2025-09-02 09:13:24] [Rank 0] step:8621/10000 train_time:656809ms step_avg:76.19ms +[2025-09-02 09:13:24] [Rank 0] step:8621/10000 train_time:656809ms step_avg:76.19ms +[2025-09-02 09:13:26] [Rank 0] step:8641/10000 train_time:658435ms step_avg:76.20ms +[2025-09-02 09:13:26] [Rank 0] step:8641/10000 train_time:658435ms step_avg:76.20ms +[2025-09-02 09:13:28] [Rank 0] step:8661/10000 train_time:660069ms step_avg:76.21ms +[2025-09-02 09:13:28] [Rank 0] step:8661/10000 train_time:660069ms step_avg:76.21ms +[2025-09-02 09:13:29] [Rank 0] step:8681/10000 train_time:661696ms step_avg:76.22ms +[2025-09-02 09:13:29] [Rank 0] step:8681/10000 train_time:661696ms step_avg:76.22ms +[2025-09-02 09:13:31] [Rank 0] step:8701/10000 train_time:663321ms step_avg:76.24ms +[2025-09-02 09:13:31] [Rank 0] step:8701/10000 train_time:663321ms step_avg:76.24ms +[2025-09-02 09:13:33] [Rank 0] step:8721/10000 train_time:664956ms step_avg:76.25ms +[2025-09-02 09:13:33] [Rank 0] step:8721/10000 train_time:664956ms step_avg:76.25ms +[2025-09-02 09:13:34] [Rank 0] step:8741/10000 train_time:666577ms step_avg:76.26ms +[2025-09-02 09:13:34] [Rank 0] step:8741/10000 train_time:666577ms step_avg:76.26ms +[2025-09-02 09:13:36] [Rank 0] step:8761/10000 train_time:668204ms step_avg:76.27ms +[2025-09-02 09:13:36] [Rank 0] step:8761/10000 train_time:668204ms step_avg:76.27ms +[2025-09-02 09:13:37] [Rank 0] step:8781/10000 train_time:669848ms step_avg:76.28ms +[2025-09-02 09:13:37] [Rank 0] step:8781/10000 train_time:669848ms step_avg:76.28ms +[2025-09-02 09:13:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:13:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:13:51] [Rank 0] PRINT: step:8800/10000 val_loss:3.8580 svd_entropy: attn_qk:H=0.7533,top10E=0.26,eRank=157.2,q75/q25=105.25 attn_vo:H=0.8384,top10E=0.14,eRank=286.5,q75/q25=58.26 mlp_w1:H=0.7864,top10E=0.26,eRank=214.2,q75/q25=16.73 mlp_w2:H=0.8691,top10E=0.12,eRank=335.2,q75/q25=24.19 vo_prod:H=0.7475,top10E=0.23,eRank=152.1,q75/q25=3141.82 train_time:671648ms step_avg:76.32ms +[2025-09-02 09:13:51] [Rank 0] PRINT: step:8800/10000 val_loss:3.8580 svd_entropy: attn_qk:H=0.7533,top10E=0.26,eRank=157.2,q75/q25=105.25 attn_vo:H=0.8384,top10E=0.14,eRank=286.5,q75/q25=58.26 mlp_w1:H=0.7864,top10E=0.26,eRank=214.2,q75/q25=16.73 mlp_w2:H=0.8691,top10E=0.12,eRank=335.2,q75/q25=24.19 vo_prod:H=0.7475,top10E=0.23,eRank=152.1,q75/q25=3141.82 train_time:671648ms step_avg:76.32ms +[2025-09-02 09:13:51] [Rank 0] step:8801/10000 train_time:671658ms step_avg:76.32ms +[2025-09-02 09:13:51] [Rank 0] step:8801/10000 train_time:671658ms step_avg:76.32ms +[2025-09-02 09:13:52] [Rank 0] step:8821/10000 train_time:673144ms step_avg:76.31ms +[2025-09-02 09:13:52] [Rank 0] step:8821/10000 train_time:673144ms step_avg:76.31ms +[2025-09-02 09:13:54] [Rank 0] step:8841/10000 train_time:674799ms step_avg:76.33ms +[2025-09-02 09:13:54] [Rank 0] step:8841/10000 train_time:674799ms step_avg:76.33ms +[2025-09-02 09:13:56] [Rank 0] step:8861/10000 train_time:676428ms step_avg:76.34ms +[2025-09-02 09:13:56] [Rank 0] step:8861/10000 train_time:676428ms step_avg:76.34ms +[2025-09-02 09:13:57] [Rank 0] step:8881/10000 train_time:678065ms step_avg:76.35ms +[2025-09-02 09:13:57] [Rank 0] step:8881/10000 train_time:678065ms step_avg:76.35ms +[2025-09-02 09:13:59] [Rank 0] step:8901/10000 train_time:679704ms step_avg:76.36ms +[2025-09-02 09:13:59] [Rank 0] step:8901/10000 train_time:679704ms step_avg:76.36ms +[2025-09-02 09:14:01] [Rank 0] step:8921/10000 train_time:681342ms step_avg:76.38ms +[2025-09-02 09:14:01] [Rank 0] step:8921/10000 train_time:681342ms step_avg:76.38ms +[2025-09-02 09:14:02] [Rank 0] step:8941/10000 train_time:682986ms step_avg:76.39ms +[2025-09-02 09:14:02] [Rank 0] step:8941/10000 train_time:682986ms step_avg:76.39ms +[2025-09-02 09:14:04] [Rank 0] step:8961/10000 train_time:684616ms step_avg:76.40ms +[2025-09-02 09:14:04] [Rank 0] step:8961/10000 train_time:684616ms step_avg:76.40ms +[2025-09-02 09:14:06] [Rank 0] step:8981/10000 train_time:686248ms step_avg:76.41ms +[2025-09-02 09:14:06] [Rank 0] step:8981/10000 train_time:686248ms step_avg:76.41ms +[2025-09-02 09:14:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:14:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:14:19] [Rank 0] PRINT: step:9000/10000 val_loss:3.8483 svd_entropy: attn_qk:H=0.7539,top10E=0.26,eRank=157.6,q75/q25=105.13 attn_vo:H=0.8388,top10E=0.14,eRank=287.2,q75/q25=57.76 mlp_w1:H=0.7870,top10E=0.26,eRank=214.9,q75/q25=16.72 mlp_w2:H=0.8695,top10E=0.12,eRank=336.0,q75/q25=24.14 vo_prod:H=0.7481,top10E=0.23,eRank=152.8,q75/q25=3103.08 train_time:688044ms step_avg:76.45ms +[2025-09-02 09:14:19] [Rank 0] PRINT: step:9000/10000 val_loss:3.8483 svd_entropy: attn_qk:H=0.7539,top10E=0.26,eRank=157.6,q75/q25=105.13 attn_vo:H=0.8388,top10E=0.14,eRank=287.2,q75/q25=57.76 mlp_w1:H=0.7870,top10E=0.26,eRank=214.9,q75/q25=16.72 mlp_w2:H=0.8695,top10E=0.12,eRank=336.0,q75/q25=24.14 vo_prod:H=0.7481,top10E=0.23,eRank=152.8,q75/q25=3103.08 train_time:688044ms step_avg:76.45ms +[2025-09-02 09:14:19] [Rank 0] step:9001/10000 train_time:688055ms step_avg:76.44ms +[2025-09-02 09:14:19] [Rank 0] step:9001/10000 train_time:688055ms step_avg:76.44ms +[2025-09-02 09:14:21] [Rank 0] step:9021/10000 train_time:689531ms step_avg:76.44ms +[2025-09-02 09:14:21] [Rank 0] step:9021/10000 train_time:689531ms step_avg:76.44ms +[2025-09-02 09:14:22] [Rank 0] step:9041/10000 train_time:691158ms step_avg:76.45ms +[2025-09-02 09:14:22] [Rank 0] step:9041/10000 train_time:691158ms step_avg:76.45ms +[2025-09-02 09:14:24] [Rank 0] step:9061/10000 train_time:692804ms step_avg:76.46ms +[2025-09-02 09:14:24] [Rank 0] step:9061/10000 train_time:692804ms step_avg:76.46ms +[2025-09-02 09:14:25] [Rank 0] step:9081/10000 train_time:694448ms step_avg:76.47ms +[2025-09-02 09:14:25] [Rank 0] step:9081/10000 train_time:694448ms step_avg:76.47ms +[2025-09-02 09:14:27] [Rank 0] step:9101/10000 train_time:696104ms step_avg:76.49ms +[2025-09-02 09:14:27] [Rank 0] step:9101/10000 train_time:696104ms step_avg:76.49ms +[2025-09-02 09:14:29] [Rank 0] step:9121/10000 train_time:697743ms step_avg:76.50ms +[2025-09-02 09:14:29] [Rank 0] step:9121/10000 train_time:697743ms step_avg:76.50ms +[2025-09-02 09:14:30] [Rank 0] step:9141/10000 train_time:699367ms step_avg:76.51ms +[2025-09-02 09:14:30] [Rank 0] step:9141/10000 train_time:699367ms step_avg:76.51ms +[2025-09-02 09:14:32] [Rank 0] step:9161/10000 train_time:700992ms step_avg:76.52ms +[2025-09-02 09:14:32] [Rank 0] step:9161/10000 train_time:700992ms step_avg:76.52ms +[2025-09-02 09:14:34] [Rank 0] step:9181/10000 train_time:702657ms step_avg:76.53ms +[2025-09-02 09:14:34] [Rank 0] step:9181/10000 train_time:702657ms step_avg:76.53ms +[2025-09-02 09:14:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:14:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:14:47] [Rank 0] PRINT: step:9200/10000 val_loss:3.8405 svd_entropy: attn_qk:H=0.7543,top10E=0.26,eRank=158.0,q75/q25=105.32 attn_vo:H=0.8392,top10E=0.14,eRank=287.8,q75/q25=57.35 mlp_w1:H=0.7876,top10E=0.26,eRank=215.6,q75/q25=16.78 mlp_w2:H=0.8697,top10E=0.12,eRank=336.7,q75/q25=24.10 vo_prod:H=0.7487,top10E=0.23,eRank=153.4,q75/q25=3068.86 train_time:704456ms step_avg:76.57ms +[2025-09-02 09:14:47] [Rank 0] PRINT: step:9200/10000 val_loss:3.8405 svd_entropy: attn_qk:H=0.7543,top10E=0.26,eRank=158.0,q75/q25=105.32 attn_vo:H=0.8392,top10E=0.14,eRank=287.8,q75/q25=57.35 mlp_w1:H=0.7876,top10E=0.26,eRank=215.6,q75/q25=16.78 mlp_w2:H=0.8697,top10E=0.12,eRank=336.7,q75/q25=24.10 vo_prod:H=0.7487,top10E=0.23,eRank=153.4,q75/q25=3068.86 train_time:704456ms step_avg:76.57ms +[2025-09-02 09:14:47] [Rank 0] step:9201/10000 train_time:704466ms step_avg:76.56ms +[2025-09-02 09:14:47] [Rank 0] step:9201/10000 train_time:704466ms step_avg:76.56ms +[2025-09-02 09:14:49] [Rank 0] step:9221/10000 train_time:705971ms step_avg:76.56ms +[2025-09-02 09:14:49] [Rank 0] step:9221/10000 train_time:705971ms step_avg:76.56ms +[2025-09-02 09:14:50] [Rank 0] step:9241/10000 train_time:707617ms step_avg:76.57ms +[2025-09-02 09:14:50] [Rank 0] step:9241/10000 train_time:707617ms step_avg:76.57ms +[2025-09-02 09:14:52] [Rank 0] step:9261/10000 train_time:709263ms step_avg:76.59ms +[2025-09-02 09:14:52] [Rank 0] step:9261/10000 train_time:709263ms step_avg:76.59ms +[2025-09-02 09:14:54] [Rank 0] step:9281/10000 train_time:710893ms step_avg:76.60ms +[2025-09-02 09:14:54] [Rank 0] step:9281/10000 train_time:710893ms step_avg:76.60ms +[2025-09-02 09:14:55] [Rank 0] step:9301/10000 train_time:712530ms step_avg:76.61ms +[2025-09-02 09:14:55] [Rank 0] step:9301/10000 train_time:712530ms step_avg:76.61ms +[2025-09-02 09:14:57] [Rank 0] step:9321/10000 train_time:714169ms step_avg:76.62ms +[2025-09-02 09:14:57] [Rank 0] step:9321/10000 train_time:714169ms step_avg:76.62ms +[2025-09-02 09:14:59] [Rank 0] step:9341/10000 train_time:715805ms step_avg:76.63ms +[2025-09-02 09:14:59] [Rank 0] step:9341/10000 train_time:715805ms step_avg:76.63ms +[2025-09-02 09:15:00] [Rank 0] step:9361/10000 train_time:717444ms step_avg:76.64ms +[2025-09-02 09:15:00] [Rank 0] step:9361/10000 train_time:717444ms step_avg:76.64ms +[2025-09-02 09:15:02] [Rank 0] step:9381/10000 train_time:719099ms step_avg:76.65ms +[2025-09-02 09:15:02] [Rank 0] step:9381/10000 train_time:719099ms step_avg:76.65ms +[2025-09-02 09:15:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:15:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:15:15] [Rank 0] PRINT: step:9400/10000 val_loss:3.8327 svd_entropy: attn_qk:H=0.7546,top10E=0.26,eRank=158.4,q75/q25=105.17 attn_vo:H=0.8395,top10E=0.14,eRank=288.4,q75/q25=56.81 mlp_w1:H=0.7880,top10E=0.26,eRank=216.2,q75/q25=16.78 mlp_w2:H=0.8700,top10E=0.12,eRank=337.2,q75/q25=24.06 vo_prod:H=0.7492,top10E=0.23,eRank=153.9,q75/q25=2994.85 train_time:720910ms step_avg:76.69ms +[2025-09-02 09:15:15] [Rank 0] PRINT: step:9400/10000 val_loss:3.8327 svd_entropy: attn_qk:H=0.7546,top10E=0.26,eRank=158.4,q75/q25=105.17 attn_vo:H=0.8395,top10E=0.14,eRank=288.4,q75/q25=56.81 mlp_w1:H=0.7880,top10E=0.26,eRank=216.2,q75/q25=16.78 mlp_w2:H=0.8700,top10E=0.12,eRank=337.2,q75/q25=24.06 vo_prod:H=0.7492,top10E=0.23,eRank=153.9,q75/q25=2994.85 train_time:720910ms step_avg:76.69ms +[2025-09-02 09:15:15] [Rank 0] step:9401/10000 train_time:720920ms step_avg:76.69ms +[2025-09-02 09:15:15] [Rank 0] step:9401/10000 train_time:720920ms step_avg:76.69ms +[2025-09-02 09:15:17] [Rank 0] step:9421/10000 train_time:722396ms step_avg:76.68ms +[2025-09-02 09:15:17] [Rank 0] step:9421/10000 train_time:722396ms step_avg:76.68ms +[2025-09-02 09:15:19] [Rank 0] step:9441/10000 train_time:724031ms step_avg:76.69ms +[2025-09-02 09:15:19] [Rank 0] step:9441/10000 train_time:724031ms step_avg:76.69ms +[2025-09-02 09:15:20] [Rank 0] step:9461/10000 train_time:725673ms step_avg:76.70ms +[2025-09-02 09:15:20] [Rank 0] step:9461/10000 train_time:725673ms step_avg:76.70ms +[2025-09-02 09:15:22] [Rank 0] step:9481/10000 train_time:727313ms step_avg:76.71ms +[2025-09-02 09:15:22] [Rank 0] step:9481/10000 train_time:727313ms step_avg:76.71ms +[2025-09-02 09:15:23] [Rank 0] step:9501/10000 train_time:728964ms step_avg:76.72ms +[2025-09-02 09:15:23] [Rank 0] step:9501/10000 train_time:728964ms step_avg:76.72ms +[2025-09-02 09:15:25] [Rank 0] step:9521/10000 train_time:730595ms step_avg:76.74ms +[2025-09-02 09:15:25] [Rank 0] step:9521/10000 train_time:730595ms step_avg:76.74ms +[2025-09-02 09:15:27] [Rank 0] step:9541/10000 train_time:732230ms step_avg:76.75ms +[2025-09-02 09:15:27] [Rank 0] step:9541/10000 train_time:732230ms step_avg:76.75ms +[2025-09-02 09:15:28] [Rank 0] step:9561/10000 train_time:733866ms step_avg:76.76ms +[2025-09-02 09:15:28] [Rank 0] step:9561/10000 train_time:733866ms step_avg:76.76ms +[2025-09-02 09:15:30] [Rank 0] step:9581/10000 train_time:735506ms step_avg:76.77ms +[2025-09-02 09:15:30] [Rank 0] step:9581/10000 train_time:735506ms step_avg:76.77ms +[2025-09-02 09:15:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:15:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:15:43] [Rank 0] PRINT: step:9600/10000 val_loss:3.8265 svd_entropy: attn_qk:H=0.7549,top10E=0.26,eRank=158.6,q75/q25=105.09 attn_vo:H=0.8397,top10E=0.14,eRank=288.8,q75/q25=56.41 mlp_w1:H=0.7884,top10E=0.26,eRank=216.6,q75/q25=16.80 mlp_w2:H=0.8702,top10E=0.11,eRank=337.7,q75/q25=24.06 vo_prod:H=0.7496,top10E=0.23,eRank=154.3,q75/q25=2946.84 train_time:737317ms step_avg:76.80ms +[2025-09-02 09:15:43] [Rank 0] PRINT: step:9600/10000 val_loss:3.8265 svd_entropy: attn_qk:H=0.7549,top10E=0.26,eRank=158.6,q75/q25=105.09 attn_vo:H=0.8397,top10E=0.14,eRank=288.8,q75/q25=56.41 mlp_w1:H=0.7884,top10E=0.26,eRank=216.6,q75/q25=16.80 mlp_w2:H=0.8702,top10E=0.11,eRank=337.7,q75/q25=24.06 vo_prod:H=0.7496,top10E=0.23,eRank=154.3,q75/q25=2946.84 train_time:737317ms step_avg:76.80ms +[2025-09-02 09:15:43] [Rank 0] step:9601/10000 train_time:737327ms step_avg:76.80ms +[2025-09-02 09:15:43] [Rank 0] step:9601/10000 train_time:737327ms step_avg:76.80ms +[2025-09-02 09:15:45] [Rank 0] step:9621/10000 train_time:738820ms step_avg:76.79ms +[2025-09-02 09:15:45] [Rank 0] step:9621/10000 train_time:738820ms step_avg:76.79ms +[2025-09-02 09:15:47] [Rank 0] step:9641/10000 train_time:740461ms step_avg:76.80ms +[2025-09-02 09:15:47] [Rank 0] step:9641/10000 train_time:740461ms step_avg:76.80ms +[2025-09-02 09:15:48] [Rank 0] step:9661/10000 train_time:742125ms step_avg:76.82ms +[2025-09-02 09:15:48] [Rank 0] step:9661/10000 train_time:742125ms step_avg:76.82ms +[2025-09-02 09:15:50] [Rank 0] step:9681/10000 train_time:743784ms step_avg:76.83ms +[2025-09-02 09:15:50] [Rank 0] step:9681/10000 train_time:743784ms step_avg:76.83ms +[2025-09-02 09:15:52] [Rank 0] step:9701/10000 train_time:745459ms step_avg:76.84ms +[2025-09-02 09:15:52] [Rank 0] step:9701/10000 train_time:745459ms step_avg:76.84ms +[2025-09-02 09:15:53] [Rank 0] step:9721/10000 train_time:747115ms step_avg:76.86ms +[2025-09-02 09:15:53] [Rank 0] step:9721/10000 train_time:747115ms step_avg:76.86ms +[2025-09-02 09:15:55] [Rank 0] step:9741/10000 train_time:748796ms step_avg:76.87ms +[2025-09-02 09:15:55] [Rank 0] step:9741/10000 train_time:748796ms step_avg:76.87ms +[2025-09-02 09:15:57] [Rank 0] step:9761/10000 train_time:750459ms step_avg:76.88ms +[2025-09-02 09:15:57] [Rank 0] step:9761/10000 train_time:750459ms step_avg:76.88ms +[2025-09-02 09:15:58] [Rank 0] step:9781/10000 train_time:752135ms step_avg:76.90ms +[2025-09-02 09:15:58] [Rank 0] step:9781/10000 train_time:752135ms step_avg:76.90ms +[2025-09-02 09:16:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:16:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:16:12] [Rank 0] PRINT: step:9800/10000 val_loss:3.8205 svd_entropy: attn_qk:H=0.7551,top10E=0.26,eRank=158.8,q75/q25=105.02 attn_vo:H=0.8399,top10E=0.13,eRank=289.1,q75/q25=56.28 mlp_w1:H=0.7887,top10E=0.26,eRank=217.0,q75/q25=16.80 mlp_w2:H=0.8704,top10E=0.11,eRank=338.1,q75/q25=24.05 vo_prod:H=0.7500,top10E=0.23,eRank=154.7,q75/q25=2914.39 train_time:753978ms step_avg:76.94ms +[2025-09-02 09:16:12] [Rank 0] PRINT: step:9800/10000 val_loss:3.8205 svd_entropy: attn_qk:H=0.7551,top10E=0.26,eRank=158.8,q75/q25=105.02 attn_vo:H=0.8399,top10E=0.13,eRank=289.1,q75/q25=56.28 mlp_w1:H=0.7887,top10E=0.26,eRank=217.0,q75/q25=16.80 mlp_w2:H=0.8704,top10E=0.11,eRank=338.1,q75/q25=24.05 vo_prod:H=0.7500,top10E=0.23,eRank=154.7,q75/q25=2914.39 train_time:753978ms step_avg:76.94ms +[2025-09-02 09:16:12] [Rank 0] step:9801/10000 train_time:753988ms step_avg:76.93ms +[2025-09-02 09:16:12] [Rank 0] step:9801/10000 train_time:753988ms step_avg:76.93ms +[2025-09-02 09:16:14] [Rank 0] step:9821/10000 train_time:755505ms step_avg:76.93ms +[2025-09-02 09:16:14] [Rank 0] step:9821/10000 train_time:755505ms step_avg:76.93ms +[2025-09-02 09:16:15] [Rank 0] step:9841/10000 train_time:757181ms step_avg:76.94ms +[2025-09-02 09:16:15] [Rank 0] step:9841/10000 train_time:757181ms step_avg:76.94ms +[2025-09-02 09:16:17] [Rank 0] step:9861/10000 train_time:758833ms step_avg:76.95ms +[2025-09-02 09:16:17] [Rank 0] step:9861/10000 train_time:758833ms step_avg:76.95ms +[2025-09-02 09:16:18] [Rank 0] step:9881/10000 train_time:760483ms step_avg:76.96ms +[2025-09-02 09:16:18] [Rank 0] step:9881/10000 train_time:760483ms step_avg:76.96ms +[2025-09-02 09:16:20] [Rank 0] step:9901/10000 train_time:762148ms step_avg:76.98ms +[2025-09-02 09:16:20] [Rank 0] step:9901/10000 train_time:762148ms step_avg:76.98ms +[2025-09-02 09:16:22] [Rank 0] step:9921/10000 train_time:763805ms step_avg:76.99ms +[2025-09-02 09:16:22] [Rank 0] step:9921/10000 train_time:763805ms step_avg:76.99ms +[2025-09-02 09:16:23] [Rank 0] step:9941/10000 train_time:765469ms step_avg:77.00ms +[2025-09-02 09:16:23] [Rank 0] step:9941/10000 train_time:765469ms step_avg:77.00ms +[2025-09-02 09:16:25] [Rank 0] step:9961/10000 train_time:767129ms step_avg:77.01ms +[2025-09-02 09:16:25] [Rank 0] step:9961/10000 train_time:767129ms step_avg:77.01ms +[2025-09-02 09:16:27] [Rank 0] step:9981/10000 train_time:768789ms step_avg:77.03ms +[2025-09-02 09:16:27] [Rank 0] step:9981/10000 train_time:768789ms step_avg:77.03ms +[2025-09-02 09:16:28] [Rank 0] step:10000/10000 train_time:770374ms step_avg:77.04ms +[2025-09-02 09:16:28] [Rank 0] step:10000/10000 train_time:770374ms step_avg:77.04ms +[2025-09-02 09:16:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:16:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 09:16:40] [Rank 0] PRINT: step:10000/10000 val_loss:3.8146 svd_entropy: attn_qk:H=0.7553,top10E=0.25,eRank=159.0,q75/q25=104.90 attn_vo:H=0.8401,top10E=0.13,eRank=289.3,q75/q25=56.12 mlp_w1:H=0.7889,top10E=0.26,eRank=217.3,q75/q25=16.81 mlp_w2:H=0.8705,top10E=0.11,eRank=338.4,q75/q25=24.04 vo_prod:H=0.7502,top10E=0.23,eRank=154.9,q75/q25=2883.28 train_time:770635ms step_avg:77.06ms +[2025-09-02 09:16:40] [Rank 0] PRINT: step:10000/10000 val_loss:3.8146 svd_entropy: attn_qk:H=0.7553,top10E=0.25,eRank=159.0,q75/q25=104.90 attn_vo:H=0.8401,top10E=0.13,eRank=289.3,q75/q25=56.12 mlp_w1:H=0.7889,top10E=0.26,eRank=217.3,q75/q25=16.81 mlp_w2:H=0.8705,top10E=0.11,eRank=338.4,q75/q25=24.04 vo_prod:H=0.7502,top10E=0.23,eRank=154.9,q75/q25=2883.28 train_time:770635ms step_avg:77.06ms +[2025-09-02 09:16:40] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 09:16:40 2025 --- +[2025-09-02 09:16:40] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 09:16:40 2025 --- +[2025-09-02 09:16:40] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14416 MiB +[2025-09-02 09:16:40] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14416 MiB diff --git a/logs_svd_qkvo/mode_15_param_qkvo_seed_45/config.json b/logs_svd_qkvo/mode_15_param_qkvo_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..16258167f5c76f761e0154572026267d4fb81733 --- /dev/null +++ b/logs_svd_qkvo/mode_15_param_qkvo_seed_45/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 15, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "59f391bf-b7d0-467d-a229-7eeec08e3247", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_15_param_qkvo_seed_45/training_log_59f391bf-b7d0-467d-a229-7eeec08e3247.txt b/logs_svd_qkvo/mode_15_param_qkvo_seed_45/training_log_59f391bf-b7d0-467d-a229-7eeec08e3247.txt new file mode 100644 index 0000000000000000000000000000000000000000..efeae5fcdd7e8b1d31aa5dc6d03d00792ac5ea1e --- /dev/null +++ b/logs_svd_qkvo/mode_15_param_qkvo_seed_45/training_log_59f391bf-b7d0-467d-a229-7eeec08e3247.txt @@ -0,0 +1,2984 @@ +[2025-09-03 05:32:41] [Rank 0] PRINT: --- Script Start: Wed Sep 3 05:32:41 2025 --- +[2025-09-03 05:32:41] [Rank 0] PRINT: --- Script Start: Wed Sep 3 05:32:41 2025 --- +[2025-09-03 05:32:41] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=15, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-03 05:32:41] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=15, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-03 05:32:41] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-03 05:32:41] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-03 05:32:41] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-03 05:32:41] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-03 05:32:41] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_15_param_qkvo_seed_45 +[2025-09-03 05:32:41] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_15_param_qkvo_seed_45 +[2025-09-03 05:32:41] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-03 05:32:41] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-03 05:32:41] [Rank 0] PRINT: Constructing model... +[2025-09-03 05:32:41] [Rank 0] PRINT: Constructing model... +[2025-09-03 05:32:43] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-03 05:32:43] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-03 05:32:43] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-03 05:32:43] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-03 05:32:43] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-03 05:32:43] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-03 05:32:43] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 15 +[2025-09-03 05:32:43] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 15 +[2025-09-03 05:32:43] [Rank 0] PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-03 05:32:43] [Rank 0] PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-03 05:32:43] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-03 05:32:43] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-03 05:32:43] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-03 05:32:43] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-03 05:32:43] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-03 05:32:43] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-03 05:32:43] [Rank 0] PRINT: Model compilation complete. +[2025-09-03 05:32:43] [Rank 0] PRINT: Model compilation complete. +[2025-09-03 05:32:43] [Rank 0] PRINT: Starting warmup... +[2025-09-03 05:32:43] [Rank 0] PRINT: Starting warmup... +[2025-09-03 05:34:12] [Rank 0] PRINT: Warmup complete. +[2025-09-03 05:34:12] [Rank 0] PRINT: Warmup complete. +[2025-09-03 05:34:13] [Rank 0] PRINT: Starting training... +[2025-09-03 05:34:13] [Rank 0] PRINT: Starting training... +[2025-09-03 05:34:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:34:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:34:29] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.28 attn_vo:H=0.4623,top10E=0.02,eRank=232.8,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-03 05:34:29] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.28 attn_vo:H=0.4623,top10E=0.02,eRank=232.8,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-03 05:34:30] [Rank 0] step:21/10000 train_time:1300ms step_avg:61.89ms +[2025-09-03 05:34:30] [Rank 0] step:21/10000 train_time:1300ms step_avg:61.89ms +[2025-09-03 05:34:32] [Rank 0] step:41/10000 train_time:2690ms step_avg:65.62ms +[2025-09-03 05:34:32] [Rank 0] step:41/10000 train_time:2690ms step_avg:65.62ms +[2025-09-03 05:34:33] [Rank 0] step:61/10000 train_time:4085ms step_avg:66.96ms +[2025-09-03 05:34:33] [Rank 0] step:61/10000 train_time:4085ms step_avg:66.96ms +[2025-09-03 05:34:34] [Rank 0] step:81/10000 train_time:5576ms step_avg:68.84ms +[2025-09-03 05:34:34] [Rank 0] step:81/10000 train_time:5576ms step_avg:68.84ms +[2025-09-03 05:34:36] [Rank 0] step:101/10000 train_time:6973ms step_avg:69.04ms +[2025-09-03 05:34:36] [Rank 0] step:101/10000 train_time:6973ms step_avg:69.04ms +[2025-09-03 05:34:37] [Rank 0] step:121/10000 train_time:8372ms step_avg:69.19ms +[2025-09-03 05:34:37] [Rank 0] step:121/10000 train_time:8372ms step_avg:69.19ms +[2025-09-03 05:34:39] [Rank 0] step:141/10000 train_time:9772ms step_avg:69.30ms +[2025-09-03 05:34:39] [Rank 0] step:141/10000 train_time:9772ms step_avg:69.30ms +[2025-09-03 05:34:40] [Rank 0] step:161/10000 train_time:11173ms step_avg:69.40ms +[2025-09-03 05:34:40] [Rank 0] step:161/10000 train_time:11173ms step_avg:69.40ms +[2025-09-03 05:34:41] [Rank 0] step:181/10000 train_time:12573ms step_avg:69.46ms +[2025-09-03 05:34:41] [Rank 0] step:181/10000 train_time:12573ms step_avg:69.46ms +[2025-09-03 05:34:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:34:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:34:55] [Rank 0] PRINT: step:200/10000 val_loss:6.5094 svd_entropy: attn_qk:H=0.4469,top10E=0.81,eRank=34.4,q75/q25=11.97 attn_vo:H=0.5467,top10E=0.64,eRank=115.8,q75/q25=113.38 mlp_w1:H=0.3966,top10E=0.77,eRank=15.8,q75/q25=2.64 mlp_w2:H=0.1476,top10E=0.96,eRank=3.8,q75/q25=227.06 vo_prod:H=0.2380,top10E=0.98,eRank=5.5,q75/q25=723.09 train_time:14115ms step_avg:70.58ms +[2025-09-03 05:34:55] [Rank 0] PRINT: step:200/10000 val_loss:6.5094 svd_entropy: attn_qk:H=0.4469,top10E=0.81,eRank=34.4,q75/q25=11.97 attn_vo:H=0.5467,top10E=0.64,eRank=115.8,q75/q25=113.38 mlp_w1:H=0.3966,top10E=0.77,eRank=15.8,q75/q25=2.64 mlp_w2:H=0.1476,top10E=0.96,eRank=3.8,q75/q25=227.06 vo_prod:H=0.2380,top10E=0.98,eRank=5.5,q75/q25=723.09 train_time:14115ms step_avg:70.58ms +[2025-09-03 05:34:55] [Rank 0] step:201/10000 train_time:14126ms step_avg:70.28ms +[2025-09-03 05:34:55] [Rank 0] step:201/10000 train_time:14126ms step_avg:70.28ms +[2025-09-03 05:34:56] [Rank 0] step:221/10000 train_time:15404ms step_avg:69.70ms +[2025-09-03 05:34:56] [Rank 0] step:221/10000 train_time:15404ms step_avg:69.70ms +[2025-09-03 05:34:58] [Rank 0] step:241/10000 train_time:16803ms step_avg:69.72ms +[2025-09-03 05:34:58] [Rank 0] step:241/10000 train_time:16803ms step_avg:69.72ms +[2025-09-03 05:34:59] [Rank 0] step:261/10000 train_time:18203ms step_avg:69.74ms +[2025-09-03 05:34:59] [Rank 0] step:261/10000 train_time:18203ms step_avg:69.74ms +[2025-09-03 05:35:00] [Rank 0] step:281/10000 train_time:19604ms step_avg:69.76ms +[2025-09-03 05:35:00] [Rank 0] step:281/10000 train_time:19604ms step_avg:69.76ms +[2025-09-03 05:35:02] [Rank 0] step:301/10000 train_time:21005ms step_avg:69.78ms +[2025-09-03 05:35:02] [Rank 0] step:301/10000 train_time:21005ms step_avg:69.78ms +[2025-09-03 05:35:03] [Rank 0] step:321/10000 train_time:22413ms step_avg:69.82ms +[2025-09-03 05:35:03] [Rank 0] step:321/10000 train_time:22413ms step_avg:69.82ms +[2025-09-03 05:35:05] [Rank 0] step:341/10000 train_time:23816ms step_avg:69.84ms +[2025-09-03 05:35:05] [Rank 0] step:341/10000 train_time:23816ms step_avg:69.84ms +[2025-09-03 05:35:06] [Rank 0] step:361/10000 train_time:25219ms step_avg:69.86ms +[2025-09-03 05:35:06] [Rank 0] step:361/10000 train_time:25219ms step_avg:69.86ms +[2025-09-03 05:35:07] [Rank 0] step:381/10000 train_time:26622ms step_avg:69.87ms +[2025-09-03 05:35:07] [Rank 0] step:381/10000 train_time:26622ms step_avg:69.87ms +[2025-09-03 05:35:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:35:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:35:21] [Rank 0] PRINT: step:400/10000 val_loss:5.9982 svd_entropy: attn_qk:H=0.5029,top10E=0.70,eRank=43.2,q75/q25=13.27 attn_vo:H=0.5764,top10E=0.55,eRank=87.6,q75/q25=40.87 mlp_w1:H=0.4390,top10E=0.71,eRank=26.7,q75/q25=3.28 mlp_w2:H=0.5403,top10E=0.61,eRank=37.7,q75/q25=16.47 vo_prod:H=0.3987,top10E=0.84,eRank=15.1,q75/q25=292.96 train_time:28166ms step_avg:70.42ms +[2025-09-03 05:35:21] [Rank 0] PRINT: step:400/10000 val_loss:5.9982 svd_entropy: attn_qk:H=0.5029,top10E=0.70,eRank=43.2,q75/q25=13.27 attn_vo:H=0.5764,top10E=0.55,eRank=87.6,q75/q25=40.87 mlp_w1:H=0.4390,top10E=0.71,eRank=26.7,q75/q25=3.28 mlp_w2:H=0.5403,top10E=0.61,eRank=37.7,q75/q25=16.47 vo_prod:H=0.3987,top10E=0.84,eRank=15.1,q75/q25=292.96 train_time:28166ms step_avg:70.42ms +[2025-09-03 05:35:21] [Rank 0] step:401/10000 train_time:28177ms step_avg:70.27ms +[2025-09-03 05:35:21] [Rank 0] step:401/10000 train_time:28177ms step_avg:70.27ms +[2025-09-03 05:35:22] [Rank 0] step:421/10000 train_time:29449ms step_avg:69.95ms +[2025-09-03 05:35:22] [Rank 0] step:421/10000 train_time:29449ms step_avg:69.95ms +[2025-09-03 05:35:23] [Rank 0] step:441/10000 train_time:30852ms step_avg:69.96ms +[2025-09-03 05:35:23] [Rank 0] step:441/10000 train_time:30852ms step_avg:69.96ms +[2025-09-03 05:35:25] [Rank 0] step:461/10000 train_time:32255ms step_avg:69.97ms +[2025-09-03 05:35:25] [Rank 0] step:461/10000 train_time:32255ms step_avg:69.97ms +[2025-09-03 05:35:26] [Rank 0] step:481/10000 train_time:33656ms step_avg:69.97ms +[2025-09-03 05:35:26] [Rank 0] step:481/10000 train_time:33656ms step_avg:69.97ms +[2025-09-03 05:35:28] [Rank 0] step:501/10000 train_time:35060ms step_avg:69.98ms +[2025-09-03 05:35:28] [Rank 0] step:501/10000 train_time:35060ms step_avg:69.98ms +[2025-09-03 05:35:29] [Rank 0] step:521/10000 train_time:36463ms step_avg:69.99ms +[2025-09-03 05:35:29] [Rank 0] step:521/10000 train_time:36463ms step_avg:69.99ms +[2025-09-03 05:35:30] [Rank 0] step:541/10000 train_time:37866ms step_avg:69.99ms +[2025-09-03 05:35:30] [Rank 0] step:541/10000 train_time:37866ms step_avg:69.99ms +[2025-09-03 05:35:32] [Rank 0] step:561/10000 train_time:39270ms step_avg:70.00ms +[2025-09-03 05:35:32] [Rank 0] step:561/10000 train_time:39270ms step_avg:70.00ms +[2025-09-03 05:35:33] [Rank 0] step:581/10000 train_time:40674ms step_avg:70.01ms +[2025-09-03 05:35:33] [Rank 0] step:581/10000 train_time:40674ms step_avg:70.01ms +[2025-09-03 05:35:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:35:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:35:46] [Rank 0] PRINT: step:600/10000 val_loss:5.6906 svd_entropy: attn_qk:H=0.5390,top10E=0.62,eRank=50.7,q75/q25=14.81 attn_vo:H=0.6109,top10E=0.46,eRank=94.3,q75/q25=29.19 mlp_w1:H=0.4743,top10E=0.66,eRank=36.0,q75/q25=3.59 mlp_w2:H=0.6258,top10E=0.47,eRank=66.1,q75/q25=12.54 vo_prod:H=0.4726,top10E=0.69,eRank=24.3,q75/q25=238.56 train_time:42218ms step_avg:70.36ms +[2025-09-03 05:35:46] [Rank 0] PRINT: step:600/10000 val_loss:5.6906 svd_entropy: attn_qk:H=0.5390,top10E=0.62,eRank=50.7,q75/q25=14.81 attn_vo:H=0.6109,top10E=0.46,eRank=94.3,q75/q25=29.19 mlp_w1:H=0.4743,top10E=0.66,eRank=36.0,q75/q25=3.59 mlp_w2:H=0.6258,top10E=0.47,eRank=66.1,q75/q25=12.54 vo_prod:H=0.4726,top10E=0.69,eRank=24.3,q75/q25=238.56 train_time:42218ms step_avg:70.36ms +[2025-09-03 05:35:46] [Rank 0] step:601/10000 train_time:42228ms step_avg:70.26ms +[2025-09-03 05:35:46] [Rank 0] step:601/10000 train_time:42228ms step_avg:70.26ms +[2025-09-03 05:35:48] [Rank 0] step:621/10000 train_time:43500ms step_avg:70.05ms +[2025-09-03 05:35:48] [Rank 0] step:621/10000 train_time:43500ms step_avg:70.05ms +[2025-09-03 05:35:49] [Rank 0] step:641/10000 train_time:44902ms step_avg:70.05ms +[2025-09-03 05:35:49] [Rank 0] step:641/10000 train_time:44902ms step_avg:70.05ms +[2025-09-03 05:35:51] [Rank 0] step:661/10000 train_time:46305ms step_avg:70.05ms +[2025-09-03 05:35:51] [Rank 0] step:661/10000 train_time:46305ms step_avg:70.05ms +[2025-09-03 05:35:52] [Rank 0] step:681/10000 train_time:47718ms step_avg:70.07ms +[2025-09-03 05:35:52] [Rank 0] step:681/10000 train_time:47718ms step_avg:70.07ms +[2025-09-03 05:35:53] [Rank 0] step:701/10000 train_time:49122ms step_avg:70.07ms +[2025-09-03 05:35:53] [Rank 0] step:701/10000 train_time:49122ms step_avg:70.07ms +[2025-09-03 05:35:55] [Rank 0] step:721/10000 train_time:50525ms step_avg:70.08ms +[2025-09-03 05:35:55] [Rank 0] step:721/10000 train_time:50525ms step_avg:70.08ms +[2025-09-03 05:35:56] [Rank 0] step:741/10000 train_time:51928ms step_avg:70.08ms +[2025-09-03 05:35:56] [Rank 0] step:741/10000 train_time:51928ms step_avg:70.08ms +[2025-09-03 05:35:58] [Rank 0] step:761/10000 train_time:53344ms step_avg:70.10ms +[2025-09-03 05:35:58] [Rank 0] step:761/10000 train_time:53344ms step_avg:70.10ms +[2025-09-03 05:35:59] [Rank 0] step:781/10000 train_time:54760ms step_avg:70.12ms +[2025-09-03 05:35:59] [Rank 0] step:781/10000 train_time:54760ms step_avg:70.12ms +[2025-09-03 05:36:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:36:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:36:12] [Rank 0] PRINT: step:800/10000 val_loss:5.4647 svd_entropy: attn_qk:H=0.5663,top10E=0.57,eRank=57.1,q75/q25=16.81 attn_vo:H=0.6401,top10E=0.40,eRank=103.8,q75/q25=28.17 mlp_w1:H=0.5071,top10E=0.62,eRank=44.6,q75/q25=3.88 mlp_w2:H=0.6768,top10E=0.38,eRank=91.7,q75/q25=11.07 vo_prod:H=0.5181,top10E=0.60,eRank=32.8,q75/q25=311.58 train_time:56320ms step_avg:70.40ms +[2025-09-03 05:36:12] [Rank 0] PRINT: step:800/10000 val_loss:5.4647 svd_entropy: attn_qk:H=0.5663,top10E=0.57,eRank=57.1,q75/q25=16.81 attn_vo:H=0.6401,top10E=0.40,eRank=103.8,q75/q25=28.17 mlp_w1:H=0.5071,top10E=0.62,eRank=44.6,q75/q25=3.88 mlp_w2:H=0.6768,top10E=0.38,eRank=91.7,q75/q25=11.07 vo_prod:H=0.5181,top10E=0.60,eRank=32.8,q75/q25=311.58 train_time:56320ms step_avg:70.40ms +[2025-09-03 05:36:13] [Rank 0] step:801/10000 train_time:56330ms step_avg:70.32ms +[2025-09-03 05:36:13] [Rank 0] step:801/10000 train_time:56330ms step_avg:70.32ms +[2025-09-03 05:36:14] [Rank 0] step:821/10000 train_time:57613ms step_avg:70.17ms +[2025-09-03 05:36:14] [Rank 0] step:821/10000 train_time:57613ms step_avg:70.17ms +[2025-09-03 05:36:15] [Rank 0] step:841/10000 train_time:59028ms step_avg:70.19ms +[2025-09-03 05:36:15] [Rank 0] step:841/10000 train_time:59028ms step_avg:70.19ms +[2025-09-03 05:36:17] [Rank 0] step:861/10000 train_time:60443ms step_avg:70.20ms +[2025-09-03 05:36:17] [Rank 0] step:861/10000 train_time:60443ms step_avg:70.20ms +[2025-09-03 05:36:18] [Rank 0] step:881/10000 train_time:61859ms step_avg:70.21ms +[2025-09-03 05:36:18] [Rank 0] step:881/10000 train_time:61859ms step_avg:70.21ms +[2025-09-03 05:36:20] [Rank 0] step:901/10000 train_time:63276ms step_avg:70.23ms +[2025-09-03 05:36:20] [Rank 0] step:901/10000 train_time:63276ms step_avg:70.23ms +[2025-09-03 05:36:21] [Rank 0] step:921/10000 train_time:64692ms step_avg:70.24ms +[2025-09-03 05:36:21] [Rank 0] step:921/10000 train_time:64692ms step_avg:70.24ms +[2025-09-03 05:36:22] [Rank 0] step:941/10000 train_time:66109ms step_avg:70.25ms +[2025-09-03 05:36:22] [Rank 0] step:941/10000 train_time:66109ms step_avg:70.25ms +[2025-09-03 05:36:24] [Rank 0] step:961/10000 train_time:67525ms step_avg:70.27ms +[2025-09-03 05:36:24] [Rank 0] step:961/10000 train_time:67525ms step_avg:70.27ms +[2025-09-03 05:36:25] [Rank 0] step:981/10000 train_time:68942ms step_avg:70.28ms +[2025-09-03 05:36:25] [Rank 0] step:981/10000 train_time:68942ms step_avg:70.28ms +[2025-09-03 05:36:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:36:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:36:38] [Rank 0] PRINT: step:1000/10000 val_loss:5.2944 svd_entropy: attn_qk:H=0.5887,top10E=0.52,eRank=63.2,q75/q25=19.30 attn_vo:H=0.6634,top10E=0.36,eRank=113.7,q75/q25=33.57 mlp_w1:H=0.5338,top10E=0.59,eRank=52.0,q75/q25=4.19 mlp_w2:H=0.7092,top10E=0.33,eRank=113.1,q75/q25=11.46 vo_prod:H=0.5499,top10E=0.53,eRank=40.4,q75/q25=609.33 train_time:70501ms step_avg:70.50ms +[2025-09-03 05:36:38] [Rank 0] PRINT: step:1000/10000 val_loss:5.2944 svd_entropy: attn_qk:H=0.5887,top10E=0.52,eRank=63.2,q75/q25=19.30 attn_vo:H=0.6634,top10E=0.36,eRank=113.7,q75/q25=33.57 mlp_w1:H=0.5338,top10E=0.59,eRank=52.0,q75/q25=4.19 mlp_w2:H=0.7092,top10E=0.33,eRank=113.1,q75/q25=11.46 vo_prod:H=0.5499,top10E=0.53,eRank=40.4,q75/q25=609.33 train_time:70501ms step_avg:70.50ms +[2025-09-03 05:36:39] [Rank 0] step:1001/10000 train_time:70513ms step_avg:70.44ms +[2025-09-03 05:36:39] [Rank 0] step:1001/10000 train_time:70513ms step_avg:70.44ms +[2025-09-03 05:36:40] [Rank 0] step:1021/10000 train_time:71792ms step_avg:70.31ms +[2025-09-03 05:36:40] [Rank 0] step:1021/10000 train_time:71792ms step_avg:70.31ms +[2025-09-03 05:36:41] [Rank 0] step:1041/10000 train_time:73256ms step_avg:70.37ms +[2025-09-03 05:36:41] [Rank 0] step:1041/10000 train_time:73256ms step_avg:70.37ms +[2025-09-03 05:36:43] [Rank 0] step:1061/10000 train_time:74674ms step_avg:70.38ms +[2025-09-03 05:36:43] [Rank 0] step:1061/10000 train_time:74674ms step_avg:70.38ms +[2025-09-03 05:36:44] [Rank 0] step:1081/10000 train_time:76091ms step_avg:70.39ms +[2025-09-03 05:36:44] [Rank 0] step:1081/10000 train_time:76091ms step_avg:70.39ms +[2025-09-03 05:36:46] [Rank 0] step:1101/10000 train_time:77509ms step_avg:70.40ms +[2025-09-03 05:36:46] [Rank 0] step:1101/10000 train_time:77509ms step_avg:70.40ms +[2025-09-03 05:36:47] [Rank 0] step:1121/10000 train_time:78926ms step_avg:70.41ms +[2025-09-03 05:36:47] [Rank 0] step:1121/10000 train_time:78926ms step_avg:70.41ms +[2025-09-03 05:36:49] [Rank 0] step:1141/10000 train_time:80344ms step_avg:70.42ms +[2025-09-03 05:36:49] [Rank 0] step:1141/10000 train_time:80344ms step_avg:70.42ms +[2025-09-03 05:36:50] [Rank 0] step:1161/10000 train_time:81763ms step_avg:70.42ms +[2025-09-03 05:36:50] [Rank 0] step:1161/10000 train_time:81763ms step_avg:70.42ms +[2025-09-03 05:36:51] [Rank 0] step:1181/10000 train_time:83181ms step_avg:70.43ms +[2025-09-03 05:36:51] [Rank 0] step:1181/10000 train_time:83181ms step_avg:70.43ms +[2025-09-03 05:36:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:36:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:37:04] [Rank 0] PRINT: step:1200/10000 val_loss:5.1366 svd_entropy: attn_qk:H=0.6067,top10E=0.48,eRank=69.1,q75/q25=22.40 attn_vo:H=0.6837,top10E=0.33,eRank=124.2,q75/q25=44.10 mlp_w1:H=0.5558,top10E=0.56,eRank=58.8,q75/q25=4.55 mlp_w2:H=0.7305,top10E=0.30,eRank=130.3,q75/q25=12.97 vo_prod:H=0.5740,top10E=0.49,eRank=47.4,q75/q25=1359.19 train_time:84742ms step_avg:70.62ms +[2025-09-03 05:37:04] [Rank 0] PRINT: step:1200/10000 val_loss:5.1366 svd_entropy: attn_qk:H=0.6067,top10E=0.48,eRank=69.1,q75/q25=22.40 attn_vo:H=0.6837,top10E=0.33,eRank=124.2,q75/q25=44.10 mlp_w1:H=0.5558,top10E=0.56,eRank=58.8,q75/q25=4.55 mlp_w2:H=0.7305,top10E=0.30,eRank=130.3,q75/q25=12.97 vo_prod:H=0.5740,top10E=0.49,eRank=47.4,q75/q25=1359.19 train_time:84742ms step_avg:70.62ms +[2025-09-03 05:37:04] [Rank 0] step:1201/10000 train_time:84754ms step_avg:70.57ms +[2025-09-03 05:37:04] [Rank 0] step:1201/10000 train_time:84754ms step_avg:70.57ms +[2025-09-03 05:37:06] [Rank 0] step:1221/10000 train_time:86061ms step_avg:70.48ms +[2025-09-03 05:37:06] [Rank 0] step:1221/10000 train_time:86061ms step_avg:70.48ms +[2025-09-03 05:37:07] [Rank 0] step:1241/10000 train_time:87478ms step_avg:70.49ms +[2025-09-03 05:37:07] [Rank 0] step:1241/10000 train_time:87478ms step_avg:70.49ms +[2025-09-03 05:37:09] [Rank 0] step:1261/10000 train_time:88895ms step_avg:70.50ms +[2025-09-03 05:37:09] [Rank 0] step:1261/10000 train_time:88895ms step_avg:70.50ms +[2025-09-03 05:37:10] [Rank 0] step:1281/10000 train_time:90313ms step_avg:70.50ms +[2025-09-03 05:37:10] [Rank 0] step:1281/10000 train_time:90313ms step_avg:70.50ms +[2025-09-03 05:37:12] [Rank 0] step:1301/10000 train_time:91731ms step_avg:70.51ms +[2025-09-03 05:37:12] [Rank 0] step:1301/10000 train_time:91731ms step_avg:70.51ms +[2025-09-03 05:37:13] [Rank 0] step:1321/10000 train_time:93149ms step_avg:70.51ms +[2025-09-03 05:37:13] [Rank 0] step:1321/10000 train_time:93149ms step_avg:70.51ms +[2025-09-03 05:37:14] [Rank 0] step:1341/10000 train_time:94567ms step_avg:70.52ms +[2025-09-03 05:37:14] [Rank 0] step:1341/10000 train_time:94567ms step_avg:70.52ms +[2025-09-03 05:37:16] [Rank 0] step:1361/10000 train_time:95986ms step_avg:70.53ms +[2025-09-03 05:37:16] [Rank 0] step:1361/10000 train_time:95986ms step_avg:70.53ms +[2025-09-03 05:37:17] [Rank 0] step:1381/10000 train_time:97405ms step_avg:70.53ms +[2025-09-03 05:37:17] [Rank 0] step:1381/10000 train_time:97405ms step_avg:70.53ms +[2025-09-03 05:37:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:37:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:37:30] [Rank 0] PRINT: step:1400/10000 val_loss:5.0104 svd_entropy: attn_qk:H=0.6215,top10E=0.46,eRank=74.6,q75/q25=26.60 attn_vo:H=0.7009,top10E=0.30,eRank=134.5,q75/q25=57.01 mlp_w1:H=0.5761,top10E=0.54,eRank=65.8,q75/q25=4.97 mlp_w2:H=0.7492,top10E=0.27,eRank=147.5,q75/q25=14.58 vo_prod:H=0.5937,top10E=0.45,eRank=54.0,q75/q25=2587.12 train_time:98966ms step_avg:70.69ms +[2025-09-03 05:37:30] [Rank 0] PRINT: step:1400/10000 val_loss:5.0104 svd_entropy: attn_qk:H=0.6215,top10E=0.46,eRank=74.6,q75/q25=26.60 attn_vo:H=0.7009,top10E=0.30,eRank=134.5,q75/q25=57.01 mlp_w1:H=0.5761,top10E=0.54,eRank=65.8,q75/q25=4.97 mlp_w2:H=0.7492,top10E=0.27,eRank=147.5,q75/q25=14.58 vo_prod:H=0.5937,top10E=0.45,eRank=54.0,q75/q25=2587.12 train_time:98966ms step_avg:70.69ms +[2025-09-03 05:37:30] [Rank 0] step:1401/10000 train_time:98978ms step_avg:70.65ms +[2025-09-03 05:37:30] [Rank 0] step:1401/10000 train_time:98978ms step_avg:70.65ms +[2025-09-03 05:37:32] [Rank 0] step:1421/10000 train_time:100267ms step_avg:70.56ms +[2025-09-03 05:37:32] [Rank 0] step:1421/10000 train_time:100267ms step_avg:70.56ms +[2025-09-03 05:37:33] [Rank 0] step:1441/10000 train_time:101684ms step_avg:70.56ms +[2025-09-03 05:37:33] [Rank 0] step:1441/10000 train_time:101684ms step_avg:70.56ms +[2025-09-03 05:37:35] [Rank 0] step:1461/10000 train_time:103101ms step_avg:70.57ms +[2025-09-03 05:37:35] [Rank 0] step:1461/10000 train_time:103101ms step_avg:70.57ms +[2025-09-03 05:37:36] [Rank 0] step:1481/10000 train_time:104518ms step_avg:70.57ms +[2025-09-03 05:37:36] [Rank 0] step:1481/10000 train_time:104518ms step_avg:70.57ms +[2025-09-03 05:37:37] [Rank 0] step:1501/10000 train_time:105943ms step_avg:70.58ms +[2025-09-03 05:37:37] [Rank 0] step:1501/10000 train_time:105943ms step_avg:70.58ms +[2025-09-03 05:37:39] [Rank 0] step:1521/10000 train_time:107371ms step_avg:70.59ms +[2025-09-03 05:37:39] [Rank 0] step:1521/10000 train_time:107371ms step_avg:70.59ms +[2025-09-03 05:37:40] [Rank 0] step:1541/10000 train_time:108799ms step_avg:70.60ms +[2025-09-03 05:37:40] [Rank 0] step:1541/10000 train_time:108799ms step_avg:70.60ms +[2025-09-03 05:37:42] [Rank 0] step:1561/10000 train_time:110228ms step_avg:70.61ms +[2025-09-03 05:37:42] [Rank 0] step:1561/10000 train_time:110228ms step_avg:70.61ms +[2025-09-03 05:37:43] [Rank 0] step:1581/10000 train_time:111656ms step_avg:70.62ms +[2025-09-03 05:37:43] [Rank 0] step:1581/10000 train_time:111656ms step_avg:70.62ms +[2025-09-03 05:37:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:37:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:37:56] [Rank 0] PRINT: step:1600/10000 val_loss:4.8753 svd_entropy: attn_qk:H=0.6338,top10E=0.43,eRank=79.2,q75/q25=31.65 attn_vo:H=0.7153,top10E=0.28,eRank=144.3,q75/q25=69.62 mlp_w1:H=0.5938,top10E=0.51,eRank=72.5,q75/q25=5.45 mlp_w2:H=0.7620,top10E=0.25,eRank=160.8,q75/q25=16.55 vo_prod:H=0.6105,top10E=0.42,eRank=60.3,q75/q25=4266.03 train_time:113253ms step_avg:70.78ms +[2025-09-03 05:37:56] [Rank 0] PRINT: step:1600/10000 val_loss:4.8753 svd_entropy: attn_qk:H=0.6338,top10E=0.43,eRank=79.2,q75/q25=31.65 attn_vo:H=0.7153,top10E=0.28,eRank=144.3,q75/q25=69.62 mlp_w1:H=0.5938,top10E=0.51,eRank=72.5,q75/q25=5.45 mlp_w2:H=0.7620,top10E=0.25,eRank=160.8,q75/q25=16.55 vo_prod:H=0.6105,top10E=0.42,eRank=60.3,q75/q25=4266.03 train_time:113253ms step_avg:70.78ms +[2025-09-03 05:37:56] [Rank 0] step:1601/10000 train_time:113265ms step_avg:70.75ms +[2025-09-03 05:37:56] [Rank 0] step:1601/10000 train_time:113265ms step_avg:70.75ms +[2025-09-03 05:37:58] [Rank 0] step:1621/10000 train_time:114572ms step_avg:70.68ms +[2025-09-03 05:37:58] [Rank 0] step:1621/10000 train_time:114572ms step_avg:70.68ms +[2025-09-03 05:37:59] [Rank 0] step:1641/10000 train_time:115999ms step_avg:70.69ms +[2025-09-03 05:37:59] [Rank 0] step:1641/10000 train_time:115999ms step_avg:70.69ms +[2025-09-03 05:38:01] [Rank 0] step:1661/10000 train_time:117427ms step_avg:70.70ms +[2025-09-03 05:38:01] [Rank 0] step:1661/10000 train_time:117427ms step_avg:70.70ms +[2025-09-03 05:38:02] [Rank 0] step:1681/10000 train_time:118858ms step_avg:70.71ms +[2025-09-03 05:38:02] [Rank 0] step:1681/10000 train_time:118858ms step_avg:70.71ms +[2025-09-03 05:38:03] [Rank 0] step:1701/10000 train_time:120286ms step_avg:70.71ms +[2025-09-03 05:38:03] [Rank 0] step:1701/10000 train_time:120286ms step_avg:70.71ms +[2025-09-03 05:38:05] [Rank 0] step:1721/10000 train_time:121715ms step_avg:70.72ms +[2025-09-03 05:38:05] [Rank 0] step:1721/10000 train_time:121715ms step_avg:70.72ms +[2025-09-03 05:38:06] [Rank 0] step:1741/10000 train_time:123144ms step_avg:70.73ms +[2025-09-03 05:38:06] [Rank 0] step:1741/10000 train_time:123144ms step_avg:70.73ms +[2025-09-03 05:38:08] [Rank 0] step:1761/10000 train_time:124572ms step_avg:70.74ms +[2025-09-03 05:38:08] [Rank 0] step:1761/10000 train_time:124572ms step_avg:70.74ms +[2025-09-03 05:38:09] [Rank 0] step:1781/10000 train_time:126000ms step_avg:70.75ms +[2025-09-03 05:38:09] [Rank 0] step:1781/10000 train_time:126000ms step_avg:70.75ms +[2025-09-03 05:38:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:38:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:38:22] [Rank 0] PRINT: step:1800/10000 val_loss:4.7732 svd_entropy: attn_qk:H=0.6447,top10E=0.42,eRank=83.8,q75/q25=37.38 attn_vo:H=0.7274,top10E=0.26,eRank=153.4,q75/q25=80.85 mlp_w1:H=0.6097,top10E=0.49,eRank=79.1,q75/q25=5.97 mlp_w2:H=0.7721,top10E=0.23,eRank=172.2,q75/q25=18.63 vo_prod:H=0.6238,top10E=0.40,eRank=65.8,q75/q25=6191.99 train_time:127572ms step_avg:70.87ms +[2025-09-03 05:38:22] [Rank 0] PRINT: step:1800/10000 val_loss:4.7732 svd_entropy: attn_qk:H=0.6447,top10E=0.42,eRank=83.8,q75/q25=37.38 attn_vo:H=0.7274,top10E=0.26,eRank=153.4,q75/q25=80.85 mlp_w1:H=0.6097,top10E=0.49,eRank=79.1,q75/q25=5.97 mlp_w2:H=0.7721,top10E=0.23,eRank=172.2,q75/q25=18.63 vo_prod:H=0.6238,top10E=0.40,eRank=65.8,q75/q25=6191.99 train_time:127572ms step_avg:70.87ms +[2025-09-03 05:38:22] [Rank 0] step:1801/10000 train_time:127583ms step_avg:70.84ms +[2025-09-03 05:38:22] [Rank 0] step:1801/10000 train_time:127583ms step_avg:70.84ms +[2025-09-03 05:38:24] [Rank 0] step:1821/10000 train_time:128890ms step_avg:70.78ms +[2025-09-03 05:38:24] [Rank 0] step:1821/10000 train_time:128890ms step_avg:70.78ms +[2025-09-03 05:38:25] [Rank 0] step:1841/10000 train_time:130319ms step_avg:70.79ms +[2025-09-03 05:38:25] [Rank 0] step:1841/10000 train_time:130319ms step_avg:70.79ms +[2025-09-03 05:38:27] [Rank 0] step:1861/10000 train_time:131747ms step_avg:70.79ms +[2025-09-03 05:38:27] [Rank 0] step:1861/10000 train_time:131747ms step_avg:70.79ms +[2025-09-03 05:38:28] [Rank 0] step:1881/10000 train_time:133176ms step_avg:70.80ms +[2025-09-03 05:38:28] [Rank 0] step:1881/10000 train_time:133176ms step_avg:70.80ms +[2025-09-03 05:38:29] [Rank 0] step:1901/10000 train_time:134604ms step_avg:70.81ms +[2025-09-03 05:38:29] [Rank 0] step:1901/10000 train_time:134604ms step_avg:70.81ms +[2025-09-03 05:38:31] [Rank 0] step:1921/10000 train_time:136033ms step_avg:70.81ms +[2025-09-03 05:38:31] [Rank 0] step:1921/10000 train_time:136033ms step_avg:70.81ms +[2025-09-03 05:38:32] [Rank 0] step:1941/10000 train_time:137463ms step_avg:70.82ms +[2025-09-03 05:38:32] [Rank 0] step:1941/10000 train_time:137463ms step_avg:70.82ms +[2025-09-03 05:38:34] [Rank 0] step:1961/10000 train_time:138891ms step_avg:70.83ms +[2025-09-03 05:38:34] [Rank 0] step:1961/10000 train_time:138891ms step_avg:70.83ms +[2025-09-03 05:38:35] [Rank 0] step:1981/10000 train_time:140320ms step_avg:70.83ms +[2025-09-03 05:38:35] [Rank 0] step:1981/10000 train_time:140320ms step_avg:70.83ms +[2025-09-03 05:38:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:38:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:38:48] [Rank 0] PRINT: step:2000/10000 val_loss:4.7058 svd_entropy: attn_qk:H=0.6543,top10E=0.40,eRank=88.2,q75/q25=43.53 attn_vo:H=0.7378,top10E=0.25,eRank=161.8,q75/q25=90.05 mlp_w1:H=0.6238,top10E=0.48,eRank=85.3,q75/q25=6.56 mlp_w2:H=0.7804,top10E=0.22,eRank=182.3,q75/q25=20.51 vo_prod:H=0.6356,top10E=0.38,eRank=71.1,q75/q25=8354.84 train_time:141893ms step_avg:70.95ms +[2025-09-03 05:38:48] [Rank 0] PRINT: step:2000/10000 val_loss:4.7058 svd_entropy: attn_qk:H=0.6543,top10E=0.40,eRank=88.2,q75/q25=43.53 attn_vo:H=0.7378,top10E=0.25,eRank=161.8,q75/q25=90.05 mlp_w1:H=0.6238,top10E=0.48,eRank=85.3,q75/q25=6.56 mlp_w2:H=0.7804,top10E=0.22,eRank=182.3,q75/q25=20.51 vo_prod:H=0.6356,top10E=0.38,eRank=71.1,q75/q25=8354.84 train_time:141893ms step_avg:70.95ms +[2025-09-03 05:38:48] [Rank 0] step:2001/10000 train_time:141905ms step_avg:70.92ms +[2025-09-03 05:38:48] [Rank 0] step:2001/10000 train_time:141905ms step_avg:70.92ms +[2025-09-03 05:38:50] [Rank 0] step:2021/10000 train_time:143213ms step_avg:70.86ms +[2025-09-03 05:38:50] [Rank 0] step:2021/10000 train_time:143213ms step_avg:70.86ms +[2025-09-03 05:38:51] [Rank 0] step:2041/10000 train_time:144762ms step_avg:70.93ms +[2025-09-03 05:38:51] [Rank 0] step:2041/10000 train_time:144762ms step_avg:70.93ms +[2025-09-03 05:38:53] [Rank 0] step:2061/10000 train_time:146190ms step_avg:70.93ms +[2025-09-03 05:38:53] [Rank 0] step:2061/10000 train_time:146190ms step_avg:70.93ms +[2025-09-03 05:38:54] [Rank 0] step:2081/10000 train_time:147618ms step_avg:70.94ms +[2025-09-03 05:38:54] [Rank 0] step:2081/10000 train_time:147618ms step_avg:70.94ms +[2025-09-03 05:38:56] [Rank 0] step:2101/10000 train_time:149047ms step_avg:70.94ms +[2025-09-03 05:38:56] [Rank 0] step:2101/10000 train_time:149047ms step_avg:70.94ms +[2025-09-03 05:38:57] [Rank 0] step:2121/10000 train_time:150476ms step_avg:70.95ms +[2025-09-03 05:38:57] [Rank 0] step:2121/10000 train_time:150476ms step_avg:70.95ms +[2025-09-03 05:38:59] [Rank 0] step:2141/10000 train_time:151907ms step_avg:70.95ms +[2025-09-03 05:38:59] [Rank 0] step:2141/10000 train_time:151907ms step_avg:70.95ms +[2025-09-03 05:39:00] [Rank 0] step:2161/10000 train_time:153337ms step_avg:70.96ms +[2025-09-03 05:39:00] [Rank 0] step:2161/10000 train_time:153337ms step_avg:70.96ms +[2025-09-03 05:39:01] [Rank 0] step:2181/10000 train_time:154767ms step_avg:70.96ms +[2025-09-03 05:39:01] [Rank 0] step:2181/10000 train_time:154767ms step_avg:70.96ms +[2025-09-03 05:39:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:39:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:39:14] [Rank 0] PRINT: step:2200/10000 val_loss:4.6315 svd_entropy: attn_qk:H=0.6624,top10E=0.38,eRank=92.1,q75/q25=49.44 attn_vo:H=0.7465,top10E=0.24,eRank=169.4,q75/q25=96.30 mlp_w1:H=0.6358,top10E=0.46,eRank=91.1,q75/q25=7.18 mlp_w2:H=0.7867,top10E=0.21,eRank=190.3,q75/q25=22.45 vo_prod:H=0.6455,top10E=0.36,eRank=75.9,q75/q25=10119.42 train_time:156340ms step_avg:71.06ms +[2025-09-03 05:39:14] [Rank 0] PRINT: step:2200/10000 val_loss:4.6315 svd_entropy: attn_qk:H=0.6624,top10E=0.38,eRank=92.1,q75/q25=49.44 attn_vo:H=0.7465,top10E=0.24,eRank=169.4,q75/q25=96.30 mlp_w1:H=0.6358,top10E=0.46,eRank=91.1,q75/q25=7.18 mlp_w2:H=0.7867,top10E=0.21,eRank=190.3,q75/q25=22.45 vo_prod:H=0.6455,top10E=0.36,eRank=75.9,q75/q25=10119.42 train_time:156340ms step_avg:71.06ms +[2025-09-03 05:39:14] [Rank 0] step:2201/10000 train_time:156352ms step_avg:71.04ms +[2025-09-03 05:39:14] [Rank 0] step:2201/10000 train_time:156352ms step_avg:71.04ms +[2025-09-03 05:39:16] [Rank 0] step:2221/10000 train_time:157646ms step_avg:70.98ms +[2025-09-03 05:39:16] [Rank 0] step:2221/10000 train_time:157646ms step_avg:70.98ms +[2025-09-03 05:39:17] [Rank 0] step:2241/10000 train_time:159107ms step_avg:71.00ms +[2025-09-03 05:39:17] [Rank 0] step:2241/10000 train_time:159107ms step_avg:71.00ms +[2025-09-03 05:39:19] [Rank 0] step:2261/10000 train_time:160578ms step_avg:71.02ms +[2025-09-03 05:39:19] [Rank 0] step:2261/10000 train_time:160578ms step_avg:71.02ms +[2025-09-03 05:39:20] [Rank 0] step:2281/10000 train_time:162049ms step_avg:71.04ms +[2025-09-03 05:39:20] [Rank 0] step:2281/10000 train_time:162049ms step_avg:71.04ms +[2025-09-03 05:39:22] [Rank 0] step:2301/10000 train_time:163520ms step_avg:71.06ms +[2025-09-03 05:39:22] [Rank 0] step:2301/10000 train_time:163520ms step_avg:71.06ms +[2025-09-03 05:39:23] [Rank 0] step:2321/10000 train_time:164992ms step_avg:71.09ms +[2025-09-03 05:39:23] [Rank 0] step:2321/10000 train_time:164992ms step_avg:71.09ms +[2025-09-03 05:39:25] [Rank 0] step:2341/10000 train_time:166464ms step_avg:71.11ms +[2025-09-03 05:39:25] [Rank 0] step:2341/10000 train_time:166464ms step_avg:71.11ms +[2025-09-03 05:39:26] [Rank 0] step:2361/10000 train_time:167937ms step_avg:71.13ms +[2025-09-03 05:39:26] [Rank 0] step:2361/10000 train_time:167937ms step_avg:71.13ms +[2025-09-03 05:39:28] [Rank 0] step:2381/10000 train_time:169408ms step_avg:71.15ms +[2025-09-03 05:39:28] [Rank 0] step:2381/10000 train_time:169408ms step_avg:71.15ms +[2025-09-03 05:39:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:39:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:39:41] [Rank 0] PRINT: step:2400/10000 val_loss:4.5565 svd_entropy: attn_qk:H=0.6691,top10E=0.37,eRank=95.4,q75/q25=55.75 attn_vo:H=0.7543,top10E=0.23,eRank=176.7,q75/q25=101.79 mlp_w1:H=0.6463,top10E=0.45,eRank=96.5,q75/q25=7.76 mlp_w2:H=0.7925,top10E=0.20,eRank=198.2,q75/q25=24.23 vo_prod:H=0.6546,top10E=0.34,eRank=80.8,q75/q25=11687.62 train_time:171028ms step_avg:71.26ms +[2025-09-03 05:39:41] [Rank 0] PRINT: step:2400/10000 val_loss:4.5565 svd_entropy: attn_qk:H=0.6691,top10E=0.37,eRank=95.4,q75/q25=55.75 attn_vo:H=0.7543,top10E=0.23,eRank=176.7,q75/q25=101.79 mlp_w1:H=0.6463,top10E=0.45,eRank=96.5,q75/q25=7.76 mlp_w2:H=0.7925,top10E=0.20,eRank=198.2,q75/q25=24.23 vo_prod:H=0.6546,top10E=0.34,eRank=80.8,q75/q25=11687.62 train_time:171028ms step_avg:71.26ms +[2025-09-03 05:39:41] [Rank 0] step:2401/10000 train_time:171039ms step_avg:71.24ms +[2025-09-03 05:39:41] [Rank 0] step:2401/10000 train_time:171039ms step_avg:71.24ms +[2025-09-03 05:39:42] [Rank 0] step:2421/10000 train_time:172391ms step_avg:71.21ms +[2025-09-03 05:39:42] [Rank 0] step:2421/10000 train_time:172391ms step_avg:71.21ms +[2025-09-03 05:39:44] [Rank 0] step:2441/10000 train_time:173860ms step_avg:71.22ms +[2025-09-03 05:39:44] [Rank 0] step:2441/10000 train_time:173860ms step_avg:71.22ms +[2025-09-03 05:39:45] [Rank 0] step:2461/10000 train_time:175331ms step_avg:71.24ms +[2025-09-03 05:39:45] [Rank 0] step:2461/10000 train_time:175331ms step_avg:71.24ms +[2025-09-03 05:39:47] [Rank 0] step:2481/10000 train_time:176801ms step_avg:71.26ms +[2025-09-03 05:39:47] [Rank 0] step:2481/10000 train_time:176801ms step_avg:71.26ms +[2025-09-03 05:39:48] [Rank 0] step:2501/10000 train_time:178273ms step_avg:71.28ms +[2025-09-03 05:39:48] [Rank 0] step:2501/10000 train_time:178273ms step_avg:71.28ms +[2025-09-03 05:39:50] [Rank 0] step:2521/10000 train_time:179744ms step_avg:71.30ms +[2025-09-03 05:39:50] [Rank 0] step:2521/10000 train_time:179744ms step_avg:71.30ms +[2025-09-03 05:39:51] [Rank 0] step:2541/10000 train_time:181218ms step_avg:71.32ms +[2025-09-03 05:39:51] [Rank 0] step:2541/10000 train_time:181218ms step_avg:71.32ms +[2025-09-03 05:39:53] [Rank 0] step:2561/10000 train_time:182690ms step_avg:71.34ms +[2025-09-03 05:39:53] [Rank 0] step:2561/10000 train_time:182690ms step_avg:71.34ms +[2025-09-03 05:39:54] [Rank 0] step:2581/10000 train_time:184163ms step_avg:71.35ms +[2025-09-03 05:39:54] [Rank 0] step:2581/10000 train_time:184163ms step_avg:71.35ms +[2025-09-03 05:39:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:39:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:40:07] [Rank 0] PRINT: step:2600/10000 val_loss:4.4993 svd_entropy: attn_qk:H=0.6757,top10E=0.36,eRank=98.9,q75/q25=61.60 attn_vo:H=0.7612,top10E=0.22,eRank=183.3,q75/q25=104.95 mlp_w1:H=0.6555,top10E=0.43,eRank=101.5,q75/q25=8.37 mlp_w2:H=0.7974,top10E=0.19,eRank=205.1,q75/q25=26.05 vo_prod:H=0.6624,top10E=0.33,eRank=85.2,q75/q25=12942.96 train_time:185783ms step_avg:71.46ms +[2025-09-03 05:40:07] [Rank 0] PRINT: step:2600/10000 val_loss:4.4993 svd_entropy: attn_qk:H=0.6757,top10E=0.36,eRank=98.9,q75/q25=61.60 attn_vo:H=0.7612,top10E=0.22,eRank=183.3,q75/q25=104.95 mlp_w1:H=0.6555,top10E=0.43,eRank=101.5,q75/q25=8.37 mlp_w2:H=0.7974,top10E=0.19,eRank=205.1,q75/q25=26.05 vo_prod:H=0.6624,top10E=0.33,eRank=85.2,q75/q25=12942.96 train_time:185783ms step_avg:71.46ms +[2025-09-03 05:40:07] [Rank 0] step:2601/10000 train_time:185795ms step_avg:71.43ms +[2025-09-03 05:40:07] [Rank 0] step:2601/10000 train_time:185795ms step_avg:71.43ms +[2025-09-03 05:40:09] [Rank 0] step:2621/10000 train_time:187147ms step_avg:71.40ms +[2025-09-03 05:40:09] [Rank 0] step:2621/10000 train_time:187147ms step_avg:71.40ms +[2025-09-03 05:40:10] [Rank 0] step:2641/10000 train_time:188618ms step_avg:71.42ms +[2025-09-03 05:40:10] [Rank 0] step:2641/10000 train_time:188618ms step_avg:71.42ms +[2025-09-03 05:40:12] [Rank 0] step:2661/10000 train_time:190088ms step_avg:71.43ms +[2025-09-03 05:40:12] [Rank 0] step:2661/10000 train_time:190088ms step_avg:71.43ms +[2025-09-03 05:40:13] [Rank 0] step:2681/10000 train_time:191559ms step_avg:71.45ms +[2025-09-03 05:40:13] [Rank 0] step:2681/10000 train_time:191559ms step_avg:71.45ms +[2025-09-03 05:40:15] [Rank 0] step:2701/10000 train_time:193032ms step_avg:71.47ms +[2025-09-03 05:40:15] [Rank 0] step:2701/10000 train_time:193032ms step_avg:71.47ms +[2025-09-03 05:40:16] [Rank 0] step:2721/10000 train_time:194504ms step_avg:71.48ms +[2025-09-03 05:40:16] [Rank 0] step:2721/10000 train_time:194504ms step_avg:71.48ms +[2025-09-03 05:40:18] [Rank 0] step:2741/10000 train_time:195976ms step_avg:71.50ms +[2025-09-03 05:40:18] [Rank 0] step:2741/10000 train_time:195976ms step_avg:71.50ms +[2025-09-03 05:40:19] [Rank 0] step:2761/10000 train_time:197449ms step_avg:71.51ms +[2025-09-03 05:40:19] [Rank 0] step:2761/10000 train_time:197449ms step_avg:71.51ms +[2025-09-03 05:40:21] [Rank 0] step:2781/10000 train_time:198921ms step_avg:71.53ms +[2025-09-03 05:40:21] [Rank 0] step:2781/10000 train_time:198921ms step_avg:71.53ms +[2025-09-03 05:40:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:40:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:40:34] [Rank 0] PRINT: step:2800/10000 val_loss:4.4616 svd_entropy: attn_qk:H=0.6818,top10E=0.35,eRank=102.2,q75/q25=67.94 attn_vo:H=0.7676,top10E=0.21,eRank=189.9,q75/q25=107.05 mlp_w1:H=0.6639,top10E=0.42,eRank=106.2,q75/q25=8.97 mlp_w2:H=0.8018,top10E=0.19,eRank=211.5,q75/q25=27.51 vo_prod:H=0.6697,top10E=0.32,eRank=89.4,q75/q25=13771.18 train_time:200543ms step_avg:71.62ms +[2025-09-03 05:40:34] [Rank 0] PRINT: step:2800/10000 val_loss:4.4616 svd_entropy: attn_qk:H=0.6818,top10E=0.35,eRank=102.2,q75/q25=67.94 attn_vo:H=0.7676,top10E=0.21,eRank=189.9,q75/q25=107.05 mlp_w1:H=0.6639,top10E=0.42,eRank=106.2,q75/q25=8.97 mlp_w2:H=0.8018,top10E=0.19,eRank=211.5,q75/q25=27.51 vo_prod:H=0.6697,top10E=0.32,eRank=89.4,q75/q25=13771.18 train_time:200543ms step_avg:71.62ms +[2025-09-03 05:40:34] [Rank 0] step:2801/10000 train_time:200554ms step_avg:71.60ms +[2025-09-03 05:40:34] [Rank 0] step:2801/10000 train_time:200554ms step_avg:71.60ms +[2025-09-03 05:40:35] [Rank 0] step:2821/10000 train_time:201901ms step_avg:71.57ms +[2025-09-03 05:40:35] [Rank 0] step:2821/10000 train_time:201901ms step_avg:71.57ms +[2025-09-03 05:40:37] [Rank 0] step:2841/10000 train_time:203371ms step_avg:71.58ms +[2025-09-03 05:40:37] [Rank 0] step:2841/10000 train_time:203371ms step_avg:71.58ms +[2025-09-03 05:40:38] [Rank 0] step:2861/10000 train_time:204843ms step_avg:71.60ms +[2025-09-03 05:40:38] [Rank 0] step:2861/10000 train_time:204843ms step_avg:71.60ms +[2025-09-03 05:40:40] [Rank 0] step:2881/10000 train_time:206315ms step_avg:71.61ms +[2025-09-03 05:40:40] [Rank 0] step:2881/10000 train_time:206315ms step_avg:71.61ms +[2025-09-03 05:40:41] [Rank 0] step:2901/10000 train_time:207787ms step_avg:71.63ms +[2025-09-03 05:40:41] [Rank 0] step:2901/10000 train_time:207787ms step_avg:71.63ms +[2025-09-03 05:40:43] [Rank 0] step:2921/10000 train_time:209260ms step_avg:71.64ms +[2025-09-03 05:40:43] [Rank 0] step:2921/10000 train_time:209260ms step_avg:71.64ms +[2025-09-03 05:40:44] [Rank 0] step:2941/10000 train_time:210733ms step_avg:71.65ms +[2025-09-03 05:40:44] [Rank 0] step:2941/10000 train_time:210733ms step_avg:71.65ms +[2025-09-03 05:40:45] [Rank 0] step:2961/10000 train_time:212207ms step_avg:71.67ms +[2025-09-03 05:40:45] [Rank 0] step:2961/10000 train_time:212207ms step_avg:71.67ms +[2025-09-03 05:40:47] [Rank 0] step:2981/10000 train_time:213688ms step_avg:71.68ms +[2025-09-03 05:40:47] [Rank 0] step:2981/10000 train_time:213688ms step_avg:71.68ms +[2025-09-03 05:40:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:40:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:41:00] [Rank 0] PRINT: step:3000/10000 val_loss:4.4179 svd_entropy: attn_qk:H=0.6872,top10E=0.34,eRank=105.3,q75/q25=72.00 attn_vo:H=0.7732,top10E=0.21,eRank=195.8,q75/q25=107.85 mlp_w1:H=0.6716,top10E=0.41,eRank=110.8,q75/q25=9.61 mlp_w2:H=0.8056,top10E=0.18,eRank=217.2,q75/q25=28.92 vo_prod:H=0.6757,top10E=0.31,eRank=93.1,q75/q25=14230.74 train_time:215319ms step_avg:71.77ms +[2025-09-03 05:41:00] [Rank 0] PRINT: step:3000/10000 val_loss:4.4179 svd_entropy: attn_qk:H=0.6872,top10E=0.34,eRank=105.3,q75/q25=72.00 attn_vo:H=0.7732,top10E=0.21,eRank=195.8,q75/q25=107.85 mlp_w1:H=0.6716,top10E=0.41,eRank=110.8,q75/q25=9.61 mlp_w2:H=0.8056,top10E=0.18,eRank=217.2,q75/q25=28.92 vo_prod:H=0.6757,top10E=0.31,eRank=93.1,q75/q25=14230.74 train_time:215319ms step_avg:71.77ms +[2025-09-03 05:41:00] [Rank 0] step:3001/10000 train_time:215330ms step_avg:71.75ms +[2025-09-03 05:41:00] [Rank 0] step:3001/10000 train_time:215330ms step_avg:71.75ms +[2025-09-03 05:41:02] [Rank 0] step:3021/10000 train_time:216680ms step_avg:71.72ms +[2025-09-03 05:41:02] [Rank 0] step:3021/10000 train_time:216680ms step_avg:71.72ms +[2025-09-03 05:41:03] [Rank 0] step:3041/10000 train_time:218159ms step_avg:71.74ms +[2025-09-03 05:41:03] [Rank 0] step:3041/10000 train_time:218159ms step_avg:71.74ms +[2025-09-03 05:41:05] [Rank 0] step:3061/10000 train_time:219639ms step_avg:71.75ms +[2025-09-03 05:41:05] [Rank 0] step:3061/10000 train_time:219639ms step_avg:71.75ms +[2025-09-03 05:41:06] [Rank 0] step:3081/10000 train_time:221118ms step_avg:71.77ms +[2025-09-03 05:41:06] [Rank 0] step:3081/10000 train_time:221118ms step_avg:71.77ms +[2025-09-03 05:41:08] [Rank 0] step:3101/10000 train_time:222598ms step_avg:71.78ms +[2025-09-03 05:41:08] [Rank 0] step:3101/10000 train_time:222598ms step_avg:71.78ms +[2025-09-03 05:41:09] [Rank 0] step:3121/10000 train_time:224079ms step_avg:71.80ms +[2025-09-03 05:41:09] [Rank 0] step:3121/10000 train_time:224079ms step_avg:71.80ms +[2025-09-03 05:41:11] [Rank 0] step:3141/10000 train_time:225560ms step_avg:71.81ms +[2025-09-03 05:41:11] [Rank 0] step:3141/10000 train_time:225560ms step_avg:71.81ms +[2025-09-03 05:41:12] [Rank 0] step:3161/10000 train_time:227042ms step_avg:71.83ms +[2025-09-03 05:41:12] [Rank 0] step:3161/10000 train_time:227042ms step_avg:71.83ms +[2025-09-03 05:41:14] [Rank 0] step:3181/10000 train_time:228522ms step_avg:71.84ms +[2025-09-03 05:41:14] [Rank 0] step:3181/10000 train_time:228522ms step_avg:71.84ms +[2025-09-03 05:41:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:41:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:41:27] [Rank 0] PRINT: step:3200/10000 val_loss:4.3817 svd_entropy: attn_qk:H=0.6924,top10E=0.34,eRank=108.3,q75/q25=77.03 attn_vo:H=0.7782,top10E=0.20,eRank=201.3,q75/q25=107.67 mlp_w1:H=0.6784,top10E=0.40,eRank=114.9,q75/q25=10.24 mlp_w2:H=0.8091,top10E=0.18,eRank=222.6,q75/q25=30.13 vo_prod:H=0.6815,top10E=0.31,eRank=96.9,q75/q25=14394.48 train_time:230152ms step_avg:71.92ms +[2025-09-03 05:41:27] [Rank 0] PRINT: step:3200/10000 val_loss:4.3817 svd_entropy: attn_qk:H=0.6924,top10E=0.34,eRank=108.3,q75/q25=77.03 attn_vo:H=0.7782,top10E=0.20,eRank=201.3,q75/q25=107.67 mlp_w1:H=0.6784,top10E=0.40,eRank=114.9,q75/q25=10.24 mlp_w2:H=0.8091,top10E=0.18,eRank=222.6,q75/q25=30.13 vo_prod:H=0.6815,top10E=0.31,eRank=96.9,q75/q25=14394.48 train_time:230152ms step_avg:71.92ms +[2025-09-03 05:41:27] [Rank 0] step:3201/10000 train_time:230163ms step_avg:71.90ms +[2025-09-03 05:41:27] [Rank 0] step:3201/10000 train_time:230163ms step_avg:71.90ms +[2025-09-03 05:41:28] [Rank 0] step:3221/10000 train_time:231501ms step_avg:71.87ms +[2025-09-03 05:41:28] [Rank 0] step:3221/10000 train_time:231501ms step_avg:71.87ms +[2025-09-03 05:41:30] [Rank 0] step:3241/10000 train_time:232979ms step_avg:71.88ms +[2025-09-03 05:41:30] [Rank 0] step:3241/10000 train_time:232979ms step_avg:71.88ms +[2025-09-03 05:41:31] [Rank 0] step:3261/10000 train_time:234456ms step_avg:71.90ms +[2025-09-03 05:41:31] [Rank 0] step:3261/10000 train_time:234456ms step_avg:71.90ms +[2025-09-03 05:41:33] [Rank 0] step:3281/10000 train_time:235937ms step_avg:71.91ms +[2025-09-03 05:41:33] [Rank 0] step:3281/10000 train_time:235937ms step_avg:71.91ms +[2025-09-03 05:41:34] [Rank 0] step:3301/10000 train_time:237415ms step_avg:71.92ms +[2025-09-03 05:41:34] [Rank 0] step:3301/10000 train_time:237415ms step_avg:71.92ms +[2025-09-03 05:41:36] [Rank 0] step:3321/10000 train_time:238896ms step_avg:71.93ms +[2025-09-03 05:41:36] [Rank 0] step:3321/10000 train_time:238896ms step_avg:71.93ms +[2025-09-03 05:41:37] [Rank 0] step:3341/10000 train_time:240376ms step_avg:71.95ms +[2025-09-03 05:41:37] [Rank 0] step:3341/10000 train_time:240376ms step_avg:71.95ms +[2025-09-03 05:41:39] [Rank 0] step:3361/10000 train_time:241857ms step_avg:71.96ms +[2025-09-03 05:41:39] [Rank 0] step:3361/10000 train_time:241857ms step_avg:71.96ms +[2025-09-03 05:41:40] [Rank 0] step:3381/10000 train_time:243338ms step_avg:71.97ms +[2025-09-03 05:41:40] [Rank 0] step:3381/10000 train_time:243338ms step_avg:71.97ms +[2025-09-03 05:41:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:41:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:41:53] [Rank 0] PRINT: step:3400/10000 val_loss:4.3439 svd_entropy: attn_qk:H=0.6975,top10E=0.33,eRank=111.6,q75/q25=82.27 attn_vo:H=0.7831,top10E=0.20,eRank=206.9,q75/q25=106.72 mlp_w1:H=0.6847,top10E=0.39,eRank=119.1,q75/q25=10.82 mlp_w2:H=0.8123,top10E=0.17,eRank=227.7,q75/q25=31.54 vo_prod:H=0.6870,top10E=0.30,eRank=100.5,q75/q25=14580.32 train_time:244967ms step_avg:72.05ms +[2025-09-03 05:41:53] [Rank 0] PRINT: step:3400/10000 val_loss:4.3439 svd_entropy: attn_qk:H=0.6975,top10E=0.33,eRank=111.6,q75/q25=82.27 attn_vo:H=0.7831,top10E=0.20,eRank=206.9,q75/q25=106.72 mlp_w1:H=0.6847,top10E=0.39,eRank=119.1,q75/q25=10.82 mlp_w2:H=0.8123,top10E=0.17,eRank=227.7,q75/q25=31.54 vo_prod:H=0.6870,top10E=0.30,eRank=100.5,q75/q25=14580.32 train_time:244967ms step_avg:72.05ms +[2025-09-03 05:41:53] [Rank 0] step:3401/10000 train_time:244978ms step_avg:72.03ms +[2025-09-03 05:41:53] [Rank 0] step:3401/10000 train_time:244978ms step_avg:72.03ms +[2025-09-03 05:41:55] [Rank 0] step:3421/10000 train_time:246333ms step_avg:72.01ms +[2025-09-03 05:41:55] [Rank 0] step:3421/10000 train_time:246333ms step_avg:72.01ms +[2025-09-03 05:41:56] [Rank 0] step:3441/10000 train_time:247809ms step_avg:72.02ms +[2025-09-03 05:41:56] [Rank 0] step:3441/10000 train_time:247809ms step_avg:72.02ms +[2025-09-03 05:41:58] [Rank 0] step:3461/10000 train_time:249288ms step_avg:72.03ms +[2025-09-03 05:41:58] [Rank 0] step:3461/10000 train_time:249288ms step_avg:72.03ms +[2025-09-03 05:41:59] [Rank 0] step:3481/10000 train_time:250766ms step_avg:72.04ms +[2025-09-03 05:41:59] [Rank 0] step:3481/10000 train_time:250766ms step_avg:72.04ms +[2025-09-03 05:42:01] [Rank 0] step:3501/10000 train_time:252247ms step_avg:72.05ms +[2025-09-03 05:42:01] [Rank 0] step:3501/10000 train_time:252247ms step_avg:72.05ms +[2025-09-03 05:42:02] [Rank 0] step:3521/10000 train_time:253727ms step_avg:72.06ms +[2025-09-03 05:42:02] [Rank 0] step:3521/10000 train_time:253727ms step_avg:72.06ms +[2025-09-03 05:42:04] [Rank 0] step:3541/10000 train_time:255209ms step_avg:72.07ms +[2025-09-03 05:42:04] [Rank 0] step:3541/10000 train_time:255209ms step_avg:72.07ms +[2025-09-03 05:42:05] [Rank 0] step:3561/10000 train_time:256690ms step_avg:72.08ms +[2025-09-03 05:42:05] [Rank 0] step:3561/10000 train_time:256690ms step_avg:72.08ms +[2025-09-03 05:42:07] [Rank 0] step:3581/10000 train_time:258171ms step_avg:72.09ms +[2025-09-03 05:42:07] [Rank 0] step:3581/10000 train_time:258171ms step_avg:72.09ms +[2025-09-03 05:42:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:42:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:42:20] [Rank 0] PRINT: step:3600/10000 val_loss:4.3362 svd_entropy: attn_qk:H=0.7019,top10E=0.32,eRank=114.4,q75/q25=86.52 attn_vo:H=0.7874,top10E=0.19,eRank=212.0,q75/q25=105.58 mlp_w1:H=0.6905,top10E=0.39,eRank=123.0,q75/q25=11.46 mlp_w2:H=0.8151,top10E=0.17,eRank=232.2,q75/q25=32.65 vo_prod:H=0.6918,top10E=0.29,eRank=104.0,q75/q25=14219.34 train_time:259800ms step_avg:72.17ms +[2025-09-03 05:42:20] [Rank 0] PRINT: step:3600/10000 val_loss:4.3362 svd_entropy: attn_qk:H=0.7019,top10E=0.32,eRank=114.4,q75/q25=86.52 attn_vo:H=0.7874,top10E=0.19,eRank=212.0,q75/q25=105.58 mlp_w1:H=0.6905,top10E=0.39,eRank=123.0,q75/q25=11.46 mlp_w2:H=0.8151,top10E=0.17,eRank=232.2,q75/q25=32.65 vo_prod:H=0.6918,top10E=0.29,eRank=104.0,q75/q25=14219.34 train_time:259800ms step_avg:72.17ms +[2025-09-03 05:42:20] [Rank 0] step:3601/10000 train_time:259811ms step_avg:72.15ms +[2025-09-03 05:42:20] [Rank 0] step:3601/10000 train_time:259811ms step_avg:72.15ms +[2025-09-03 05:42:21] [Rank 0] step:3621/10000 train_time:261164ms step_avg:72.12ms +[2025-09-03 05:42:21] [Rank 0] step:3621/10000 train_time:261164ms step_avg:72.12ms +[2025-09-03 05:42:23] [Rank 0] step:3641/10000 train_time:262643ms step_avg:72.13ms +[2025-09-03 05:42:23] [Rank 0] step:3641/10000 train_time:262643ms step_avg:72.13ms +[2025-09-03 05:42:24] [Rank 0] step:3661/10000 train_time:264123ms step_avg:72.15ms +[2025-09-03 05:42:24] [Rank 0] step:3661/10000 train_time:264123ms step_avg:72.15ms +[2025-09-03 05:42:26] [Rank 0] step:3681/10000 train_time:265605ms step_avg:72.16ms +[2025-09-03 05:42:26] [Rank 0] step:3681/10000 train_time:265605ms step_avg:72.16ms +[2025-09-03 05:42:27] [Rank 0] step:3701/10000 train_time:267086ms step_avg:72.17ms +[2025-09-03 05:42:27] [Rank 0] step:3701/10000 train_time:267086ms step_avg:72.17ms +[2025-09-03 05:42:29] [Rank 0] step:3721/10000 train_time:268594ms step_avg:72.18ms +[2025-09-03 05:42:29] [Rank 0] step:3721/10000 train_time:268594ms step_avg:72.18ms +[2025-09-03 05:42:30] [Rank 0] step:3741/10000 train_time:270111ms step_avg:72.20ms +[2025-09-03 05:42:30] [Rank 0] step:3741/10000 train_time:270111ms step_avg:72.20ms +[2025-09-03 05:42:32] [Rank 0] step:3761/10000 train_time:271627ms step_avg:72.22ms +[2025-09-03 05:42:32] [Rank 0] step:3761/10000 train_time:271627ms step_avg:72.22ms +[2025-09-03 05:42:33] [Rank 0] step:3781/10000 train_time:273146ms step_avg:72.24ms +[2025-09-03 05:42:33] [Rank 0] step:3781/10000 train_time:273146ms step_avg:72.24ms +[2025-09-03 05:42:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:42:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:42:46] [Rank 0] PRINT: step:3800/10000 val_loss:4.2737 svd_entropy: attn_qk:H=0.7059,top10E=0.32,eRank=117.0,q75/q25=90.50 attn_vo:H=0.7914,top10E=0.19,eRank=216.9,q75/q25=104.87 mlp_w1:H=0.6956,top10E=0.38,eRank=126.5,q75/q25=12.05 mlp_w2:H=0.8175,top10E=0.17,eRank=236.2,q75/q25=34.07 vo_prod:H=0.6965,top10E=0.29,eRank=107.3,q75/q25=13804.04 train_time:274816ms step_avg:72.32ms +[2025-09-03 05:42:46] [Rank 0] PRINT: step:3800/10000 val_loss:4.2737 svd_entropy: attn_qk:H=0.7059,top10E=0.32,eRank=117.0,q75/q25=90.50 attn_vo:H=0.7914,top10E=0.19,eRank=216.9,q75/q25=104.87 mlp_w1:H=0.6956,top10E=0.38,eRank=126.5,q75/q25=12.05 mlp_w2:H=0.8175,top10E=0.17,eRank=236.2,q75/q25=34.07 vo_prod:H=0.6965,top10E=0.29,eRank=107.3,q75/q25=13804.04 train_time:274816ms step_avg:72.32ms +[2025-09-03 05:42:46] [Rank 0] step:3801/10000 train_time:274827ms step_avg:72.30ms +[2025-09-03 05:42:46] [Rank 0] step:3801/10000 train_time:274827ms step_avg:72.30ms +[2025-09-03 05:42:48] [Rank 0] step:3821/10000 train_time:276200ms step_avg:72.28ms +[2025-09-03 05:42:48] [Rank 0] step:3821/10000 train_time:276200ms step_avg:72.28ms +[2025-09-03 05:42:49] [Rank 0] step:3841/10000 train_time:277719ms step_avg:72.30ms +[2025-09-03 05:42:49] [Rank 0] step:3841/10000 train_time:277719ms step_avg:72.30ms +[2025-09-03 05:42:51] [Rank 0] step:3861/10000 train_time:279234ms step_avg:72.32ms +[2025-09-03 05:42:51] [Rank 0] step:3861/10000 train_time:279234ms step_avg:72.32ms +[2025-09-03 05:42:52] [Rank 0] step:3881/10000 train_time:280749ms step_avg:72.34ms +[2025-09-03 05:42:52] [Rank 0] step:3881/10000 train_time:280749ms step_avg:72.34ms +[2025-09-03 05:42:54] [Rank 0] step:3901/10000 train_time:282266ms step_avg:72.36ms +[2025-09-03 05:42:54] [Rank 0] step:3901/10000 train_time:282266ms step_avg:72.36ms +[2025-09-03 05:42:55] [Rank 0] step:3921/10000 train_time:283781ms step_avg:72.37ms +[2025-09-03 05:42:55] [Rank 0] step:3921/10000 train_time:283781ms step_avg:72.37ms +[2025-09-03 05:42:57] [Rank 0] step:3941/10000 train_time:285298ms step_avg:72.39ms +[2025-09-03 05:42:57] [Rank 0] step:3941/10000 train_time:285298ms step_avg:72.39ms +[2025-09-03 05:42:59] [Rank 0] step:3961/10000 train_time:286815ms step_avg:72.41ms +[2025-09-03 05:42:59] [Rank 0] step:3961/10000 train_time:286815ms step_avg:72.41ms +[2025-09-03 05:43:00] [Rank 0] step:3981/10000 train_time:288329ms step_avg:72.43ms +[2025-09-03 05:43:00] [Rank 0] step:3981/10000 train_time:288329ms step_avg:72.43ms +[2025-09-03 05:43:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:43:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:43:13] [Rank 0] PRINT: step:4000/10000 val_loss:4.2469 svd_entropy: attn_qk:H=0.7098,top10E=0.31,eRank=119.6,q75/q25=93.18 attn_vo:H=0.7951,top10E=0.18,eRank=221.5,q75/q25=102.01 mlp_w1:H=0.7007,top10E=0.37,eRank=130.1,q75/q25=12.71 mlp_w2:H=0.8198,top10E=0.16,eRank=240.3,q75/q25=35.34 vo_prod:H=0.7006,top10E=0.28,eRank=110.4,q75/q25=13436.08 train_time:289997ms step_avg:72.50ms +[2025-09-03 05:43:13] [Rank 0] PRINT: step:4000/10000 val_loss:4.2469 svd_entropy: attn_qk:H=0.7098,top10E=0.31,eRank=119.6,q75/q25=93.18 attn_vo:H=0.7951,top10E=0.18,eRank=221.5,q75/q25=102.01 mlp_w1:H=0.7007,top10E=0.37,eRank=130.1,q75/q25=12.71 mlp_w2:H=0.8198,top10E=0.16,eRank=240.3,q75/q25=35.34 vo_prod:H=0.7006,top10E=0.28,eRank=110.4,q75/q25=13436.08 train_time:289997ms step_avg:72.50ms +[2025-09-03 05:43:13] [Rank 0] step:4001/10000 train_time:290008ms step_avg:72.48ms +[2025-09-03 05:43:13] [Rank 0] step:4001/10000 train_time:290008ms step_avg:72.48ms +[2025-09-03 05:43:15] [Rank 0] step:4021/10000 train_time:291384ms step_avg:72.47ms +[2025-09-03 05:43:15] [Rank 0] step:4021/10000 train_time:291384ms step_avg:72.47ms +[2025-09-03 05:43:16] [Rank 0] step:4041/10000 train_time:292899ms step_avg:72.48ms +[2025-09-03 05:43:16] [Rank 0] step:4041/10000 train_time:292899ms step_avg:72.48ms +[2025-09-03 05:43:18] [Rank 0] step:4061/10000 train_time:294414ms step_avg:72.50ms +[2025-09-03 05:43:18] [Rank 0] step:4061/10000 train_time:294414ms step_avg:72.50ms +[2025-09-03 05:43:19] [Rank 0] step:4081/10000 train_time:296038ms step_avg:72.54ms +[2025-09-03 05:43:19] [Rank 0] step:4081/10000 train_time:296038ms step_avg:72.54ms +[2025-09-03 05:43:21] [Rank 0] step:4101/10000 train_time:297553ms step_avg:72.56ms +[2025-09-03 05:43:21] [Rank 0] step:4101/10000 train_time:297553ms step_avg:72.56ms +[2025-09-03 05:43:22] [Rank 0] step:4121/10000 train_time:299069ms step_avg:72.57ms +[2025-09-03 05:43:22] [Rank 0] step:4121/10000 train_time:299069ms step_avg:72.57ms +[2025-09-03 05:43:24] [Rank 0] step:4141/10000 train_time:300585ms step_avg:72.59ms +[2025-09-03 05:43:24] [Rank 0] step:4141/10000 train_time:300585ms step_avg:72.59ms +[2025-09-03 05:43:26] [Rank 0] step:4161/10000 train_time:302101ms step_avg:72.60ms +[2025-09-03 05:43:26] [Rank 0] step:4161/10000 train_time:302101ms step_avg:72.60ms +[2025-09-03 05:43:27] [Rank 0] step:4181/10000 train_time:303621ms step_avg:72.62ms +[2025-09-03 05:43:27] [Rank 0] step:4181/10000 train_time:303621ms step_avg:72.62ms +[2025-09-03 05:43:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:43:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:43:40] [Rank 0] PRINT: step:4200/10000 val_loss:4.2277 svd_entropy: attn_qk:H=0.7134,top10E=0.31,eRank=122.1,q75/q25=96.08 attn_vo:H=0.7986,top10E=0.18,eRank=226.0,q75/q25=100.37 mlp_w1:H=0.7053,top10E=0.37,eRank=133.5,q75/q25=13.35 mlp_w2:H=0.8221,top10E=0.16,eRank=244.1,q75/q25=36.09 vo_prod:H=0.7044,top10E=0.28,eRank=113.4,q75/q25=12881.89 train_time:305290ms step_avg:72.69ms +[2025-09-03 05:43:40] [Rank 0] PRINT: step:4200/10000 val_loss:4.2277 svd_entropy: attn_qk:H=0.7134,top10E=0.31,eRank=122.1,q75/q25=96.08 attn_vo:H=0.7986,top10E=0.18,eRank=226.0,q75/q25=100.37 mlp_w1:H=0.7053,top10E=0.37,eRank=133.5,q75/q25=13.35 mlp_w2:H=0.8221,top10E=0.16,eRank=244.1,q75/q25=36.09 vo_prod:H=0.7044,top10E=0.28,eRank=113.4,q75/q25=12881.89 train_time:305290ms step_avg:72.69ms +[2025-09-03 05:43:40] [Rank 0] step:4201/10000 train_time:305301ms step_avg:72.67ms +[2025-09-03 05:43:40] [Rank 0] step:4201/10000 train_time:305301ms step_avg:72.67ms +[2025-09-03 05:43:42] [Rank 0] step:4221/10000 train_time:306671ms step_avg:72.65ms +[2025-09-03 05:43:42] [Rank 0] step:4221/10000 train_time:306671ms step_avg:72.65ms +[2025-09-03 05:43:43] [Rank 0] step:4241/10000 train_time:308187ms step_avg:72.67ms +[2025-09-03 05:43:43] [Rank 0] step:4241/10000 train_time:308187ms step_avg:72.67ms +[2025-09-03 05:43:45] [Rank 0] step:4261/10000 train_time:309703ms step_avg:72.68ms +[2025-09-03 05:43:45] [Rank 0] step:4261/10000 train_time:309703ms step_avg:72.68ms +[2025-09-03 05:43:46] [Rank 0] step:4281/10000 train_time:311217ms step_avg:72.70ms +[2025-09-03 05:43:46] [Rank 0] step:4281/10000 train_time:311217ms step_avg:72.70ms +[2025-09-03 05:43:48] [Rank 0] step:4301/10000 train_time:312736ms step_avg:72.71ms +[2025-09-03 05:43:48] [Rank 0] step:4301/10000 train_time:312736ms step_avg:72.71ms +[2025-09-03 05:43:49] [Rank 0] step:4321/10000 train_time:314254ms step_avg:72.73ms +[2025-09-03 05:43:49] [Rank 0] step:4321/10000 train_time:314254ms step_avg:72.73ms +[2025-09-03 05:43:51] [Rank 0] step:4341/10000 train_time:315769ms step_avg:72.74ms +[2025-09-03 05:43:51] [Rank 0] step:4341/10000 train_time:315769ms step_avg:72.74ms +[2025-09-03 05:43:52] [Rank 0] step:4361/10000 train_time:317285ms step_avg:72.76ms +[2025-09-03 05:43:52] [Rank 0] step:4361/10000 train_time:317285ms step_avg:72.76ms +[2025-09-03 05:43:54] [Rank 0] step:4381/10000 train_time:318800ms step_avg:72.77ms +[2025-09-03 05:43:54] [Rank 0] step:4381/10000 train_time:318800ms step_avg:72.77ms +[2025-09-03 05:43:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:43:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:44:07] [Rank 0] PRINT: step:4400/10000 val_loss:4.2015 svd_entropy: attn_qk:H=0.7169,top10E=0.30,eRank=124.7,q75/q25=99.31 attn_vo:H=0.8018,top10E=0.17,eRank=230.2,q75/q25=97.56 mlp_w1:H=0.7099,top10E=0.36,eRank=137.0,q75/q25=13.97 mlp_w2:H=0.8240,top10E=0.16,eRank=247.5,q75/q25=37.26 vo_prod:H=0.7081,top10E=0.27,eRank=116.3,q75/q25=12151.34 train_time:320469ms step_avg:72.83ms +[2025-09-03 05:44:07] [Rank 0] PRINT: step:4400/10000 val_loss:4.2015 svd_entropy: attn_qk:H=0.7169,top10E=0.30,eRank=124.7,q75/q25=99.31 attn_vo:H=0.8018,top10E=0.17,eRank=230.2,q75/q25=97.56 mlp_w1:H=0.7099,top10E=0.36,eRank=137.0,q75/q25=13.97 mlp_w2:H=0.8240,top10E=0.16,eRank=247.5,q75/q25=37.26 vo_prod:H=0.7081,top10E=0.27,eRank=116.3,q75/q25=12151.34 train_time:320469ms step_avg:72.83ms +[2025-09-03 05:44:07] [Rank 0] step:4401/10000 train_time:320480ms step_avg:72.82ms +[2025-09-03 05:44:07] [Rank 0] step:4401/10000 train_time:320480ms step_avg:72.82ms +[2025-09-03 05:44:09] [Rank 0] step:4421/10000 train_time:321864ms step_avg:72.80ms +[2025-09-03 05:44:09] [Rank 0] step:4421/10000 train_time:321864ms step_avg:72.80ms +[2025-09-03 05:44:10] [Rank 0] step:4441/10000 train_time:323378ms step_avg:72.82ms +[2025-09-03 05:44:10] [Rank 0] step:4441/10000 train_time:323378ms step_avg:72.82ms +[2025-09-03 05:44:12] [Rank 0] step:4461/10000 train_time:324898ms step_avg:72.83ms +[2025-09-03 05:44:12] [Rank 0] step:4461/10000 train_time:324898ms step_avg:72.83ms +[2025-09-03 05:44:13] [Rank 0] step:4481/10000 train_time:326421ms step_avg:72.85ms +[2025-09-03 05:44:13] [Rank 0] step:4481/10000 train_time:326421ms step_avg:72.85ms +[2025-09-03 05:44:15] [Rank 0] step:4501/10000 train_time:327944ms step_avg:72.86ms +[2025-09-03 05:44:15] [Rank 0] step:4501/10000 train_time:327944ms step_avg:72.86ms +[2025-09-03 05:44:16] [Rank 0] step:4521/10000 train_time:329464ms step_avg:72.87ms +[2025-09-03 05:44:16] [Rank 0] step:4521/10000 train_time:329464ms step_avg:72.87ms +[2025-09-03 05:44:18] [Rank 0] step:4541/10000 train_time:330987ms step_avg:72.89ms +[2025-09-03 05:44:18] [Rank 0] step:4541/10000 train_time:330987ms step_avg:72.89ms +[2025-09-03 05:44:19] [Rank 0] step:4561/10000 train_time:332514ms step_avg:72.90ms +[2025-09-03 05:44:19] [Rank 0] step:4561/10000 train_time:332514ms step_avg:72.90ms +[2025-09-03 05:44:21] [Rank 0] step:4581/10000 train_time:334038ms step_avg:72.92ms +[2025-09-03 05:44:21] [Rank 0] step:4581/10000 train_time:334038ms step_avg:72.92ms +[2025-09-03 05:44:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:44:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:44:34] [Rank 0] PRINT: step:4600/10000 val_loss:4.1729 svd_entropy: attn_qk:H=0.7203,top10E=0.30,eRank=127.1,q75/q25=101.31 attn_vo:H=0.8050,top10E=0.17,eRank=234.4,q75/q25=94.96 mlp_w1:H=0.7140,top10E=0.35,eRank=140.2,q75/q25=14.60 mlp_w2:H=0.8259,top10E=0.15,eRank=250.9,q75/q25=38.30 vo_prod:H=0.7117,top10E=0.27,eRank=119.2,q75/q25=11540.19 train_time:335716ms step_avg:72.98ms +[2025-09-03 05:44:34] [Rank 0] PRINT: step:4600/10000 val_loss:4.1729 svd_entropy: attn_qk:H=0.7203,top10E=0.30,eRank=127.1,q75/q25=101.31 attn_vo:H=0.8050,top10E=0.17,eRank=234.4,q75/q25=94.96 mlp_w1:H=0.7140,top10E=0.35,eRank=140.2,q75/q25=14.60 mlp_w2:H=0.8259,top10E=0.15,eRank=250.9,q75/q25=38.30 vo_prod:H=0.7117,top10E=0.27,eRank=119.2,q75/q25=11540.19 train_time:335716ms step_avg:72.98ms +[2025-09-03 05:44:34] [Rank 0] step:4601/10000 train_time:335728ms step_avg:72.97ms +[2025-09-03 05:44:34] [Rank 0] step:4601/10000 train_time:335728ms step_avg:72.97ms +[2025-09-03 05:44:36] [Rank 0] step:4621/10000 train_time:337121ms step_avg:72.95ms +[2025-09-03 05:44:36] [Rank 0] step:4621/10000 train_time:337121ms step_avg:72.95ms +[2025-09-03 05:44:37] [Rank 0] step:4641/10000 train_time:338644ms step_avg:72.97ms +[2025-09-03 05:44:37] [Rank 0] step:4641/10000 train_time:338644ms step_avg:72.97ms +[2025-09-03 05:44:39] [Rank 0] step:4661/10000 train_time:340166ms step_avg:72.98ms +[2025-09-03 05:44:39] [Rank 0] step:4661/10000 train_time:340166ms step_avg:72.98ms +[2025-09-03 05:44:40] [Rank 0] step:4681/10000 train_time:341693ms step_avg:73.00ms +[2025-09-03 05:44:40] [Rank 0] step:4681/10000 train_time:341693ms step_avg:73.00ms +[2025-09-03 05:44:42] [Rank 0] step:4701/10000 train_time:343217ms step_avg:73.01ms +[2025-09-03 05:44:42] [Rank 0] step:4701/10000 train_time:343217ms step_avg:73.01ms +[2025-09-03 05:44:43] [Rank 0] step:4721/10000 train_time:344737ms step_avg:73.02ms +[2025-09-03 05:44:43] [Rank 0] step:4721/10000 train_time:344737ms step_avg:73.02ms +[2025-09-03 05:44:45] [Rank 0] step:4741/10000 train_time:346261ms step_avg:73.04ms +[2025-09-03 05:44:45] [Rank 0] step:4741/10000 train_time:346261ms step_avg:73.04ms +[2025-09-03 05:44:46] [Rank 0] step:4761/10000 train_time:347784ms step_avg:73.05ms +[2025-09-03 05:44:46] [Rank 0] step:4761/10000 train_time:347784ms step_avg:73.05ms +[2025-09-03 05:44:48] [Rank 0] step:4781/10000 train_time:349308ms step_avg:73.06ms +[2025-09-03 05:44:48] [Rank 0] step:4781/10000 train_time:349308ms step_avg:73.06ms +[2025-09-03 05:44:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:44:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:45:01] [Rank 0] PRINT: step:4800/10000 val_loss:4.1602 svd_entropy: attn_qk:H=0.7233,top10E=0.29,eRank=129.4,q75/q25=103.79 attn_vo:H=0.8079,top10E=0.17,eRank=238.4,q75/q25=92.91 mlp_w1:H=0.7177,top10E=0.35,eRank=143.2,q75/q25=15.07 mlp_w2:H=0.8276,top10E=0.15,eRank=254.1,q75/q25=39.04 vo_prod:H=0.7152,top10E=0.26,eRank=122.1,q75/q25=10794.61 train_time:350985ms step_avg:73.12ms +[2025-09-03 05:45:01] [Rank 0] PRINT: step:4800/10000 val_loss:4.1602 svd_entropy: attn_qk:H=0.7233,top10E=0.29,eRank=129.4,q75/q25=103.79 attn_vo:H=0.8079,top10E=0.17,eRank=238.4,q75/q25=92.91 mlp_w1:H=0.7177,top10E=0.35,eRank=143.2,q75/q25=15.07 mlp_w2:H=0.8276,top10E=0.15,eRank=254.1,q75/q25=39.04 vo_prod:H=0.7152,top10E=0.26,eRank=122.1,q75/q25=10794.61 train_time:350985ms step_avg:73.12ms +[2025-09-03 05:45:01] [Rank 0] step:4801/10000 train_time:350997ms step_avg:73.11ms +[2025-09-03 05:45:01] [Rank 0] step:4801/10000 train_time:350997ms step_avg:73.11ms +[2025-09-03 05:45:03] [Rank 0] step:4821/10000 train_time:352384ms step_avg:73.09ms +[2025-09-03 05:45:03] [Rank 0] step:4821/10000 train_time:352384ms step_avg:73.09ms +[2025-09-03 05:45:04] [Rank 0] step:4841/10000 train_time:353903ms step_avg:73.11ms +[2025-09-03 05:45:04] [Rank 0] step:4841/10000 train_time:353903ms step_avg:73.11ms +[2025-09-03 05:45:06] [Rank 0] step:4861/10000 train_time:355429ms step_avg:73.12ms +[2025-09-03 05:45:06] [Rank 0] step:4861/10000 train_time:355429ms step_avg:73.12ms +[2025-09-03 05:45:07] [Rank 0] step:4881/10000 train_time:356951ms step_avg:73.13ms +[2025-09-03 05:45:07] [Rank 0] step:4881/10000 train_time:356951ms step_avg:73.13ms +[2025-09-03 05:45:09] [Rank 0] step:4901/10000 train_time:358470ms step_avg:73.14ms +[2025-09-03 05:45:09] [Rank 0] step:4901/10000 train_time:358470ms step_avg:73.14ms +[2025-09-03 05:45:10] [Rank 0] step:4921/10000 train_time:359994ms step_avg:73.15ms +[2025-09-03 05:45:10] [Rank 0] step:4921/10000 train_time:359994ms step_avg:73.15ms +[2025-09-03 05:45:12] [Rank 0] step:4941/10000 train_time:361520ms step_avg:73.17ms +[2025-09-03 05:45:12] [Rank 0] step:4941/10000 train_time:361520ms step_avg:73.17ms +[2025-09-03 05:45:13] [Rank 0] step:4961/10000 train_time:363041ms step_avg:73.18ms +[2025-09-03 05:45:13] [Rank 0] step:4961/10000 train_time:363041ms step_avg:73.18ms +[2025-09-03 05:45:15] [Rank 0] step:4981/10000 train_time:364564ms step_avg:73.19ms +[2025-09-03 05:45:15] [Rank 0] step:4981/10000 train_time:364564ms step_avg:73.19ms +[2025-09-03 05:45:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:45:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:45:28] [Rank 0] PRINT: step:5000/10000 val_loss:4.1376 svd_entropy: attn_qk:H=0.7263,top10E=0.29,eRank=131.7,q75/q25=105.90 attn_vo:H=0.8106,top10E=0.17,eRank=242.2,q75/q25=90.47 mlp_w1:H=0.7213,top10E=0.34,eRank=146.0,q75/q25=15.70 mlp_w2:H=0.8292,top10E=0.15,eRank=257.0,q75/q25=39.99 vo_prod:H=0.7184,top10E=0.26,eRank=124.8,q75/q25=10120.43 train_time:366238ms step_avg:73.25ms +[2025-09-03 05:45:28] [Rank 0] PRINT: step:5000/10000 val_loss:4.1376 svd_entropy: attn_qk:H=0.7263,top10E=0.29,eRank=131.7,q75/q25=105.90 attn_vo:H=0.8106,top10E=0.17,eRank=242.2,q75/q25=90.47 mlp_w1:H=0.7213,top10E=0.34,eRank=146.0,q75/q25=15.70 mlp_w2:H=0.8292,top10E=0.15,eRank=257.0,q75/q25=39.99 vo_prod:H=0.7184,top10E=0.26,eRank=124.8,q75/q25=10120.43 train_time:366238ms step_avg:73.25ms +[2025-09-03 05:45:28] [Rank 0] step:5001/10000 train_time:366249ms step_avg:73.24ms +[2025-09-03 05:45:28] [Rank 0] step:5001/10000 train_time:366249ms step_avg:73.24ms +[2025-09-03 05:45:30] [Rank 0] step:5021/10000 train_time:367626ms step_avg:73.22ms +[2025-09-03 05:45:30] [Rank 0] step:5021/10000 train_time:367626ms step_avg:73.22ms +[2025-09-03 05:45:31] [Rank 0] step:5041/10000 train_time:369149ms step_avg:73.23ms +[2025-09-03 05:45:31] [Rank 0] step:5041/10000 train_time:369149ms step_avg:73.23ms +[2025-09-03 05:45:33] [Rank 0] step:5061/10000 train_time:370671ms step_avg:73.24ms +[2025-09-03 05:45:33] [Rank 0] step:5061/10000 train_time:370671ms step_avg:73.24ms +[2025-09-03 05:45:34] [Rank 0] step:5081/10000 train_time:372194ms step_avg:73.25ms +[2025-09-03 05:45:34] [Rank 0] step:5081/10000 train_time:372194ms step_avg:73.25ms +[2025-09-03 05:45:36] [Rank 0] step:5101/10000 train_time:373717ms step_avg:73.26ms +[2025-09-03 05:45:36] [Rank 0] step:5101/10000 train_time:373717ms step_avg:73.26ms +[2025-09-03 05:45:37] [Rank 0] step:5121/10000 train_time:375239ms step_avg:73.27ms +[2025-09-03 05:45:37] [Rank 0] step:5121/10000 train_time:375239ms step_avg:73.27ms +[2025-09-03 05:45:39] [Rank 0] step:5141/10000 train_time:376763ms step_avg:73.29ms +[2025-09-03 05:45:39] [Rank 0] step:5141/10000 train_time:376763ms step_avg:73.29ms +[2025-09-03 05:45:40] [Rank 0] step:5161/10000 train_time:378286ms step_avg:73.30ms +[2025-09-03 05:45:40] [Rank 0] step:5161/10000 train_time:378286ms step_avg:73.30ms +[2025-09-03 05:45:42] [Rank 0] step:5181/10000 train_time:379812ms step_avg:73.31ms +[2025-09-03 05:45:42] [Rank 0] step:5181/10000 train_time:379812ms step_avg:73.31ms +[2025-09-03 05:45:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:45:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:45:55] [Rank 0] PRINT: step:5200/10000 val_loss:4.1170 svd_entropy: attn_qk:H=0.7290,top10E=0.28,eRank=133.8,q75/q25=106.58 attn_vo:H=0.8131,top10E=0.16,eRank=245.8,q75/q25=87.69 mlp_w1:H=0.7248,top10E=0.34,eRank=149.0,q75/q25=16.40 mlp_w2:H=0.8306,top10E=0.15,eRank=259.7,q75/q25=41.08 vo_prod:H=0.7213,top10E=0.26,eRank=127.4,q75/q25=9412.95 train_time:381511ms step_avg:73.37ms +[2025-09-03 05:45:55] [Rank 0] PRINT: step:5200/10000 val_loss:4.1170 svd_entropy: attn_qk:H=0.7290,top10E=0.28,eRank=133.8,q75/q25=106.58 attn_vo:H=0.8131,top10E=0.16,eRank=245.8,q75/q25=87.69 mlp_w1:H=0.7248,top10E=0.34,eRank=149.0,q75/q25=16.40 mlp_w2:H=0.8306,top10E=0.15,eRank=259.7,q75/q25=41.08 vo_prod:H=0.7213,top10E=0.26,eRank=127.4,q75/q25=9412.95 train_time:381511ms step_avg:73.37ms +[2025-09-03 05:45:55] [Rank 0] step:5201/10000 train_time:381522ms step_avg:73.36ms +[2025-09-03 05:45:55] [Rank 0] step:5201/10000 train_time:381522ms step_avg:73.36ms +[2025-09-03 05:45:57] [Rank 0] step:5221/10000 train_time:382926ms step_avg:73.34ms +[2025-09-03 05:45:57] [Rank 0] step:5221/10000 train_time:382926ms step_avg:73.34ms +[2025-09-03 05:45:58] [Rank 0] step:5241/10000 train_time:384479ms step_avg:73.36ms +[2025-09-03 05:45:58] [Rank 0] step:5241/10000 train_time:384479ms step_avg:73.36ms +[2025-09-03 05:46:00] [Rank 0] step:5261/10000 train_time:386031ms step_avg:73.38ms +[2025-09-03 05:46:00] [Rank 0] step:5261/10000 train_time:386031ms step_avg:73.38ms +[2025-09-03 05:46:01] [Rank 0] step:5281/10000 train_time:387587ms step_avg:73.39ms +[2025-09-03 05:46:01] [Rank 0] step:5281/10000 train_time:387587ms step_avg:73.39ms +[2025-09-03 05:46:03] [Rank 0] step:5301/10000 train_time:389150ms step_avg:73.41ms +[2025-09-03 05:46:03] [Rank 0] step:5301/10000 train_time:389150ms step_avg:73.41ms +[2025-09-03 05:46:05] [Rank 0] step:5321/10000 train_time:390702ms step_avg:73.43ms +[2025-09-03 05:46:05] [Rank 0] step:5321/10000 train_time:390702ms step_avg:73.43ms +[2025-09-03 05:46:06] [Rank 0] step:5341/10000 train_time:392255ms step_avg:73.44ms +[2025-09-03 05:46:06] [Rank 0] step:5341/10000 train_time:392255ms step_avg:73.44ms +[2025-09-03 05:46:08] [Rank 0] step:5361/10000 train_time:393814ms step_avg:73.46ms +[2025-09-03 05:46:08] [Rank 0] step:5361/10000 train_time:393814ms step_avg:73.46ms +[2025-09-03 05:46:09] [Rank 0] step:5381/10000 train_time:395370ms step_avg:73.48ms +[2025-09-03 05:46:09] [Rank 0] step:5381/10000 train_time:395370ms step_avg:73.48ms +[2025-09-03 05:46:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:46:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:46:22] [Rank 0] PRINT: step:5400/10000 val_loss:4.0976 svd_entropy: attn_qk:H=0.7315,top10E=0.28,eRank=135.7,q75/q25=107.41 attn_vo:H=0.8155,top10E=0.16,eRank=249.2,q75/q25=84.90 mlp_w1:H=0.7281,top10E=0.33,eRank=151.8,q75/q25=17.01 mlp_w2:H=0.8320,top10E=0.15,eRank=262.3,q75/q25=42.18 vo_prod:H=0.7241,top10E=0.25,eRank=129.8,q75/q25=8688.92 train_time:397080ms step_avg:73.53ms +[2025-09-03 05:46:22] [Rank 0] PRINT: step:5400/10000 val_loss:4.0976 svd_entropy: attn_qk:H=0.7315,top10E=0.28,eRank=135.7,q75/q25=107.41 attn_vo:H=0.8155,top10E=0.16,eRank=249.2,q75/q25=84.90 mlp_w1:H=0.7281,top10E=0.33,eRank=151.8,q75/q25=17.01 mlp_w2:H=0.8320,top10E=0.15,eRank=262.3,q75/q25=42.18 vo_prod:H=0.7241,top10E=0.25,eRank=129.8,q75/q25=8688.92 train_time:397080ms step_avg:73.53ms +[2025-09-03 05:46:23] [Rank 0] step:5401/10000 train_time:397090ms step_avg:73.52ms +[2025-09-03 05:46:23] [Rank 0] step:5401/10000 train_time:397090ms step_avg:73.52ms +[2025-09-03 05:46:24] [Rank 0] step:5421/10000 train_time:398515ms step_avg:73.51ms +[2025-09-03 05:46:24] [Rank 0] step:5421/10000 train_time:398515ms step_avg:73.51ms +[2025-09-03 05:46:26] [Rank 0] step:5441/10000 train_time:400064ms step_avg:73.53ms +[2025-09-03 05:46:26] [Rank 0] step:5441/10000 train_time:400064ms step_avg:73.53ms +[2025-09-03 05:46:27] [Rank 0] step:5461/10000 train_time:401617ms step_avg:73.54ms +[2025-09-03 05:46:27] [Rank 0] step:5461/10000 train_time:401617ms step_avg:73.54ms +[2025-09-03 05:46:29] [Rank 0] step:5481/10000 train_time:403173ms step_avg:73.56ms +[2025-09-03 05:46:29] [Rank 0] step:5481/10000 train_time:403173ms step_avg:73.56ms +[2025-09-03 05:46:30] [Rank 0] step:5501/10000 train_time:404734ms step_avg:73.57ms +[2025-09-03 05:46:30] [Rank 0] step:5501/10000 train_time:404734ms step_avg:73.57ms +[2025-09-03 05:46:32] [Rank 0] step:5521/10000 train_time:406293ms step_avg:73.59ms +[2025-09-03 05:46:32] [Rank 0] step:5521/10000 train_time:406293ms step_avg:73.59ms +[2025-09-03 05:46:33] [Rank 0] step:5541/10000 train_time:407848ms step_avg:73.61ms +[2025-09-03 05:46:33] [Rank 0] step:5541/10000 train_time:407848ms step_avg:73.61ms +[2025-09-03 05:46:35] [Rank 0] step:5561/10000 train_time:409404ms step_avg:73.62ms +[2025-09-03 05:46:35] [Rank 0] step:5561/10000 train_time:409404ms step_avg:73.62ms +[2025-09-03 05:46:37] [Rank 0] step:5581/10000 train_time:410961ms step_avg:73.64ms +[2025-09-03 05:46:37] [Rank 0] step:5581/10000 train_time:410961ms step_avg:73.64ms +[2025-09-03 05:46:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:46:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:46:50] [Rank 0] PRINT: step:5600/10000 val_loss:4.0855 svd_entropy: attn_qk:H=0.7339,top10E=0.28,eRank=137.8,q75/q25=108.88 attn_vo:H=0.8177,top10E=0.16,eRank=252.5,q75/q25=83.34 mlp_w1:H=0.7312,top10E=0.33,eRank=154.4,q75/q25=17.56 mlp_w2:H=0.8332,top10E=0.14,eRank=264.7,q75/q25=43.09 vo_prod:H=0.7268,top10E=0.25,eRank=132.3,q75/q25=8300.17 train_time:412674ms step_avg:73.69ms +[2025-09-03 05:46:50] [Rank 0] PRINT: step:5600/10000 val_loss:4.0855 svd_entropy: attn_qk:H=0.7339,top10E=0.28,eRank=137.8,q75/q25=108.88 attn_vo:H=0.8177,top10E=0.16,eRank=252.5,q75/q25=83.34 mlp_w1:H=0.7312,top10E=0.33,eRank=154.4,q75/q25=17.56 mlp_w2:H=0.8332,top10E=0.14,eRank=264.7,q75/q25=43.09 vo_prod:H=0.7268,top10E=0.25,eRank=132.3,q75/q25=8300.17 train_time:412674ms step_avg:73.69ms +[2025-09-03 05:46:50] [Rank 0] step:5601/10000 train_time:412685ms step_avg:73.68ms +[2025-09-03 05:46:50] [Rank 0] step:5601/10000 train_time:412685ms step_avg:73.68ms +[2025-09-03 05:46:51] [Rank 0] step:5621/10000 train_time:414094ms step_avg:73.67ms +[2025-09-03 05:46:51] [Rank 0] step:5621/10000 train_time:414094ms step_avg:73.67ms +[2025-09-03 05:46:53] [Rank 0] step:5641/10000 train_time:415650ms step_avg:73.68ms +[2025-09-03 05:46:53] [Rank 0] step:5641/10000 train_time:415650ms step_avg:73.68ms +[2025-09-03 05:46:55] [Rank 0] step:5661/10000 train_time:417203ms step_avg:73.70ms +[2025-09-03 05:46:55] [Rank 0] step:5661/10000 train_time:417203ms step_avg:73.70ms +[2025-09-03 05:46:56] [Rank 0] step:5681/10000 train_time:418761ms step_avg:73.71ms +[2025-09-03 05:46:56] [Rank 0] step:5681/10000 train_time:418761ms step_avg:73.71ms +[2025-09-03 05:46:58] [Rank 0] step:5701/10000 train_time:420317ms step_avg:73.73ms +[2025-09-03 05:46:58] [Rank 0] step:5701/10000 train_time:420317ms step_avg:73.73ms +[2025-09-03 05:46:59] [Rank 0] step:5721/10000 train_time:421875ms step_avg:73.74ms +[2025-09-03 05:46:59] [Rank 0] step:5721/10000 train_time:421875ms step_avg:73.74ms +[2025-09-03 05:47:01] [Rank 0] step:5741/10000 train_time:423429ms step_avg:73.76ms +[2025-09-03 05:47:01] [Rank 0] step:5741/10000 train_time:423429ms step_avg:73.76ms +[2025-09-03 05:47:02] [Rank 0] step:5761/10000 train_time:424986ms step_avg:73.77ms +[2025-09-03 05:47:02] [Rank 0] step:5761/10000 train_time:424986ms step_avg:73.77ms +[2025-09-03 05:47:04] [Rank 0] step:5781/10000 train_time:426545ms step_avg:73.78ms +[2025-09-03 05:47:04] [Rank 0] step:5781/10000 train_time:426545ms step_avg:73.78ms +[2025-09-03 05:47:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:47:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:47:17] [Rank 0] PRINT: step:5800/10000 val_loss:4.0738 svd_entropy: attn_qk:H=0.7363,top10E=0.27,eRank=139.7,q75/q25=110.08 attn_vo:H=0.8199,top10E=0.16,eRank=255.7,q75/q25=80.32 mlp_w1:H=0.7342,top10E=0.32,eRank=157.1,q75/q25=18.27 mlp_w2:H=0.8344,top10E=0.14,eRank=267.0,q75/q25=43.91 vo_prod:H=0.7294,top10E=0.25,eRank=134.7,q75/q25=7675.64 train_time:428259ms step_avg:73.84ms +[2025-09-03 05:47:17] [Rank 0] PRINT: step:5800/10000 val_loss:4.0738 svd_entropy: attn_qk:H=0.7363,top10E=0.27,eRank=139.7,q75/q25=110.08 attn_vo:H=0.8199,top10E=0.16,eRank=255.7,q75/q25=80.32 mlp_w1:H=0.7342,top10E=0.32,eRank=157.1,q75/q25=18.27 mlp_w2:H=0.8344,top10E=0.14,eRank=267.0,q75/q25=43.91 vo_prod:H=0.7294,top10E=0.25,eRank=134.7,q75/q25=7675.64 train_time:428259ms step_avg:73.84ms +[2025-09-03 05:47:17] [Rank 0] step:5801/10000 train_time:428270ms step_avg:73.83ms +[2025-09-03 05:47:17] [Rank 0] step:5801/10000 train_time:428270ms step_avg:73.83ms +[2025-09-03 05:47:19] [Rank 0] step:5821/10000 train_time:429668ms step_avg:73.81ms +[2025-09-03 05:47:19] [Rank 0] step:5821/10000 train_time:429668ms step_avg:73.81ms +[2025-09-03 05:47:20] [Rank 0] step:5841/10000 train_time:431220ms step_avg:73.83ms +[2025-09-03 05:47:20] [Rank 0] step:5841/10000 train_time:431220ms step_avg:73.83ms +[2025-09-03 05:47:22] [Rank 0] step:5861/10000 train_time:432778ms step_avg:73.84ms +[2025-09-03 05:47:22] [Rank 0] step:5861/10000 train_time:432778ms step_avg:73.84ms +[2025-09-03 05:47:23] [Rank 0] step:5881/10000 train_time:434333ms step_avg:73.85ms +[2025-09-03 05:47:23] [Rank 0] step:5881/10000 train_time:434333ms step_avg:73.85ms +[2025-09-03 05:47:25] [Rank 0] step:5901/10000 train_time:435891ms step_avg:73.87ms +[2025-09-03 05:47:25] [Rank 0] step:5901/10000 train_time:435891ms step_avg:73.87ms +[2025-09-03 05:47:26] [Rank 0] step:5921/10000 train_time:437447ms step_avg:73.88ms +[2025-09-03 05:47:26] [Rank 0] step:5921/10000 train_time:437447ms step_avg:73.88ms +[2025-09-03 05:47:28] [Rank 0] step:5941/10000 train_time:439005ms step_avg:73.89ms +[2025-09-03 05:47:28] [Rank 0] step:5941/10000 train_time:439005ms step_avg:73.89ms +[2025-09-03 05:47:29] [Rank 0] step:5961/10000 train_time:440566ms step_avg:73.91ms +[2025-09-03 05:47:29] [Rank 0] step:5961/10000 train_time:440566ms step_avg:73.91ms +[2025-09-03 05:47:31] [Rank 0] step:5981/10000 train_time:442126ms step_avg:73.92ms +[2025-09-03 05:47:31] [Rank 0] step:5981/10000 train_time:442126ms step_avg:73.92ms +[2025-09-03 05:47:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:47:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:47:44] [Rank 0] PRINT: step:6000/10000 val_loss:4.0499 svd_entropy: attn_qk:H=0.7386,top10E=0.27,eRank=141.7,q75/q25=110.85 attn_vo:H=0.8220,top10E=0.15,eRank=258.9,q75/q25=78.48 mlp_w1:H=0.7370,top10E=0.32,eRank=159.6,q75/q25=18.85 mlp_w2:H=0.8356,top10E=0.14,eRank=269.3,q75/q25=44.75 vo_prod:H=0.7320,top10E=0.24,eRank=137.0,q75/q25=7071.73 train_time:443838ms step_avg:73.97ms +[2025-09-03 05:47:44] [Rank 0] PRINT: step:6000/10000 val_loss:4.0499 svd_entropy: attn_qk:H=0.7386,top10E=0.27,eRank=141.7,q75/q25=110.85 attn_vo:H=0.8220,top10E=0.15,eRank=258.9,q75/q25=78.48 mlp_w1:H=0.7370,top10E=0.32,eRank=159.6,q75/q25=18.85 mlp_w2:H=0.8356,top10E=0.14,eRank=269.3,q75/q25=44.75 vo_prod:H=0.7320,top10E=0.24,eRank=137.0,q75/q25=7071.73 train_time:443838ms step_avg:73.97ms +[2025-09-03 05:47:44] [Rank 0] step:6001/10000 train_time:443849ms step_avg:73.96ms +[2025-09-03 05:47:44] [Rank 0] step:6001/10000 train_time:443849ms step_avg:73.96ms +[2025-09-03 05:47:46] [Rank 0] step:6021/10000 train_time:445272ms step_avg:73.95ms +[2025-09-03 05:47:46] [Rank 0] step:6021/10000 train_time:445272ms step_avg:73.95ms +[2025-09-03 05:47:48] [Rank 0] step:6041/10000 train_time:446829ms step_avg:73.97ms +[2025-09-03 05:47:48] [Rank 0] step:6041/10000 train_time:446829ms step_avg:73.97ms +[2025-09-03 05:47:49] [Rank 0] step:6061/10000 train_time:448391ms step_avg:73.98ms +[2025-09-03 05:47:49] [Rank 0] step:6061/10000 train_time:448391ms step_avg:73.98ms +[2025-09-03 05:47:51] [Rank 0] step:6081/10000 train_time:449952ms step_avg:73.99ms +[2025-09-03 05:47:51] [Rank 0] step:6081/10000 train_time:449952ms step_avg:73.99ms +[2025-09-03 05:47:52] [Rank 0] step:6101/10000 train_time:451512ms step_avg:74.01ms +[2025-09-03 05:47:52] [Rank 0] step:6101/10000 train_time:451512ms step_avg:74.01ms +[2025-09-03 05:47:54] [Rank 0] step:6121/10000 train_time:453337ms step_avg:74.06ms +[2025-09-03 05:47:54] [Rank 0] step:6121/10000 train_time:453337ms step_avg:74.06ms +[2025-09-03 05:47:56] [Rank 0] step:6141/10000 train_time:454903ms step_avg:74.08ms +[2025-09-03 05:47:56] [Rank 0] step:6141/10000 train_time:454903ms step_avg:74.08ms +[2025-09-03 05:47:57] [Rank 0] step:6161/10000 train_time:456461ms step_avg:74.09ms +[2025-09-03 05:47:57] [Rank 0] step:6161/10000 train_time:456461ms step_avg:74.09ms +[2025-09-03 05:47:59] [Rank 0] step:6181/10000 train_time:458019ms step_avg:74.10ms +[2025-09-03 05:47:59] [Rank 0] step:6181/10000 train_time:458019ms step_avg:74.10ms +[2025-09-03 05:48:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:48:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:48:12] [Rank 0] PRINT: step:6200/10000 val_loss:4.0344 svd_entropy: attn_qk:H=0.7408,top10E=0.27,eRank=143.5,q75/q25=111.06 attn_vo:H=0.8239,top10E=0.15,eRank=261.8,q75/q25=75.94 mlp_w1:H=0.7396,top10E=0.32,eRank=161.9,q75/q25=19.47 mlp_w2:H=0.8366,top10E=0.14,eRank=271.4,q75/q25=45.41 vo_prod:H=0.7343,top10E=0.24,eRank=139.1,q75/q25=6405.89 train_time:459735ms step_avg:74.15ms +[2025-09-03 05:48:12] [Rank 0] PRINT: step:6200/10000 val_loss:4.0344 svd_entropy: attn_qk:H=0.7408,top10E=0.27,eRank=143.5,q75/q25=111.06 attn_vo:H=0.8239,top10E=0.15,eRank=261.8,q75/q25=75.94 mlp_w1:H=0.7396,top10E=0.32,eRank=161.9,q75/q25=19.47 mlp_w2:H=0.8366,top10E=0.14,eRank=271.4,q75/q25=45.41 vo_prod:H=0.7343,top10E=0.24,eRank=139.1,q75/q25=6405.89 train_time:459735ms step_avg:74.15ms +[2025-09-03 05:48:12] [Rank 0] step:6201/10000 train_time:459746ms step_avg:74.14ms +[2025-09-03 05:48:12] [Rank 0] step:6201/10000 train_time:459746ms step_avg:74.14ms +[2025-09-03 05:48:14] [Rank 0] step:6221/10000 train_time:461166ms step_avg:74.13ms +[2025-09-03 05:48:14] [Rank 0] step:6221/10000 train_time:461166ms step_avg:74.13ms +[2025-09-03 05:48:15] [Rank 0] step:6241/10000 train_time:462721ms step_avg:74.14ms +[2025-09-03 05:48:15] [Rank 0] step:6241/10000 train_time:462721ms step_avg:74.14ms +[2025-09-03 05:48:17] [Rank 0] step:6261/10000 train_time:464370ms step_avg:74.17ms +[2025-09-03 05:48:17] [Rank 0] step:6261/10000 train_time:464370ms step_avg:74.17ms +[2025-09-03 05:48:18] [Rank 0] step:6281/10000 train_time:465933ms step_avg:74.18ms +[2025-09-03 05:48:18] [Rank 0] step:6281/10000 train_time:465933ms step_avg:74.18ms +[2025-09-03 05:48:20] [Rank 0] step:6301/10000 train_time:467493ms step_avg:74.19ms +[2025-09-03 05:48:20] [Rank 0] step:6301/10000 train_time:467493ms step_avg:74.19ms +[2025-09-03 05:48:21] [Rank 0] step:6321/10000 train_time:469051ms step_avg:74.21ms +[2025-09-03 05:48:21] [Rank 0] step:6321/10000 train_time:469051ms step_avg:74.21ms +[2025-09-03 05:48:23] [Rank 0] step:6341/10000 train_time:470613ms step_avg:74.22ms +[2025-09-03 05:48:23] [Rank 0] step:6341/10000 train_time:470613ms step_avg:74.22ms +[2025-09-03 05:48:25] [Rank 0] step:6361/10000 train_time:472177ms step_avg:74.23ms +[2025-09-03 05:48:25] [Rank 0] step:6361/10000 train_time:472177ms step_avg:74.23ms +[2025-09-03 05:48:26] [Rank 0] step:6381/10000 train_time:473746ms step_avg:74.24ms +[2025-09-03 05:48:26] [Rank 0] step:6381/10000 train_time:473746ms step_avg:74.24ms +[2025-09-03 05:48:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:48:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:48:39] [Rank 0] PRINT: step:6400/10000 val_loss:4.0188 svd_entropy: attn_qk:H=0.7427,top10E=0.26,eRank=145.1,q75/q25=111.68 attn_vo:H=0.8257,top10E=0.15,eRank=264.6,q75/q25=74.02 mlp_w1:H=0.7419,top10E=0.31,eRank=164.1,q75/q25=19.96 mlp_w2:H=0.8376,top10E=0.14,eRank=273.4,q75/q25=45.89 vo_prod:H=0.7365,top10E=0.24,eRank=141.3,q75/q25=6106.49 train_time:475465ms step_avg:74.29ms +[2025-09-03 05:48:39] [Rank 0] PRINT: step:6400/10000 val_loss:4.0188 svd_entropy: attn_qk:H=0.7427,top10E=0.26,eRank=145.1,q75/q25=111.68 attn_vo:H=0.8257,top10E=0.15,eRank=264.6,q75/q25=74.02 mlp_w1:H=0.7419,top10E=0.31,eRank=164.1,q75/q25=19.96 mlp_w2:H=0.8376,top10E=0.14,eRank=273.4,q75/q25=45.89 vo_prod:H=0.7365,top10E=0.24,eRank=141.3,q75/q25=6106.49 train_time:475465ms step_avg:74.29ms +[2025-09-03 05:48:39] [Rank 0] step:6401/10000 train_time:475476ms step_avg:74.28ms +[2025-09-03 05:48:39] [Rank 0] step:6401/10000 train_time:475476ms step_avg:74.28ms +[2025-09-03 05:48:41] [Rank 0] step:6421/10000 train_time:476908ms step_avg:74.27ms +[2025-09-03 05:48:41] [Rank 0] step:6421/10000 train_time:476908ms step_avg:74.27ms +[2025-09-03 05:48:43] [Rank 0] step:6441/10000 train_time:478476ms step_avg:74.29ms +[2025-09-03 05:48:43] [Rank 0] step:6441/10000 train_time:478476ms step_avg:74.29ms +[2025-09-03 05:48:44] [Rank 0] step:6461/10000 train_time:480041ms step_avg:74.30ms +[2025-09-03 05:48:44] [Rank 0] step:6461/10000 train_time:480041ms step_avg:74.30ms +[2025-09-03 05:48:46] [Rank 0] step:6481/10000 train_time:481611ms step_avg:74.31ms +[2025-09-03 05:48:46] [Rank 0] step:6481/10000 train_time:481611ms step_avg:74.31ms +[2025-09-03 05:48:47] [Rank 0] step:6501/10000 train_time:483171ms step_avg:74.32ms +[2025-09-03 05:48:47] [Rank 0] step:6501/10000 train_time:483171ms step_avg:74.32ms +[2025-09-03 05:48:49] [Rank 0] step:6521/10000 train_time:484728ms step_avg:74.33ms +[2025-09-03 05:48:49] [Rank 0] step:6521/10000 train_time:484728ms step_avg:74.33ms +[2025-09-03 05:48:50] [Rank 0] step:6541/10000 train_time:486292ms step_avg:74.35ms +[2025-09-03 05:48:50] [Rank 0] step:6541/10000 train_time:486292ms step_avg:74.35ms +[2025-09-03 05:48:52] [Rank 0] step:6561/10000 train_time:487859ms step_avg:74.36ms +[2025-09-03 05:48:52] [Rank 0] step:6561/10000 train_time:487859ms step_avg:74.36ms +[2025-09-03 05:48:54] [Rank 0] step:6581/10000 train_time:489420ms step_avg:74.37ms +[2025-09-03 05:48:54] [Rank 0] step:6581/10000 train_time:489420ms step_avg:74.37ms +[2025-09-03 05:48:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:48:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:49:07] [Rank 0] PRINT: step:6600/10000 val_loss:4.0056 svd_entropy: attn_qk:H=0.7445,top10E=0.26,eRank=146.8,q75/q25=112.03 attn_vo:H=0.8273,top10E=0.15,eRank=267.0,q75/q25=72.43 mlp_w1:H=0.7441,top10E=0.31,eRank=166.2,q75/q25=20.54 mlp_w2:H=0.8386,top10E=0.14,eRank=275.3,q75/q25=46.43 vo_prod:H=0.7385,top10E=0.24,eRank=143.3,q75/q25=5757.26 train_time:491141ms step_avg:74.42ms +[2025-09-03 05:49:07] [Rank 0] PRINT: step:6600/10000 val_loss:4.0056 svd_entropy: attn_qk:H=0.7445,top10E=0.26,eRank=146.8,q75/q25=112.03 attn_vo:H=0.8273,top10E=0.15,eRank=267.0,q75/q25=72.43 mlp_w1:H=0.7441,top10E=0.31,eRank=166.2,q75/q25=20.54 mlp_w2:H=0.8386,top10E=0.14,eRank=275.3,q75/q25=46.43 vo_prod:H=0.7385,top10E=0.24,eRank=143.3,q75/q25=5757.26 train_time:491141ms step_avg:74.42ms +[2025-09-03 05:49:07] [Rank 0] step:6601/10000 train_time:491152ms step_avg:74.41ms +[2025-09-03 05:49:07] [Rank 0] step:6601/10000 train_time:491152ms step_avg:74.41ms +[2025-09-03 05:49:09] [Rank 0] step:6621/10000 train_time:492575ms step_avg:74.40ms +[2025-09-03 05:49:09] [Rank 0] step:6621/10000 train_time:492575ms step_avg:74.40ms +[2025-09-03 05:49:10] [Rank 0] step:6641/10000 train_time:494136ms step_avg:74.41ms +[2025-09-03 05:49:10] [Rank 0] step:6641/10000 train_time:494136ms step_avg:74.41ms +[2025-09-03 05:49:12] [Rank 0] step:6661/10000 train_time:495699ms step_avg:74.42ms +[2025-09-03 05:49:12] [Rank 0] step:6661/10000 train_time:495699ms step_avg:74.42ms +[2025-09-03 05:49:13] [Rank 0] step:6681/10000 train_time:497279ms step_avg:74.43ms +[2025-09-03 05:49:13] [Rank 0] step:6681/10000 train_time:497279ms step_avg:74.43ms +[2025-09-03 05:49:15] [Rank 0] step:6701/10000 train_time:498875ms step_avg:74.45ms +[2025-09-03 05:49:15] [Rank 0] step:6701/10000 train_time:498875ms step_avg:74.45ms +[2025-09-03 05:49:16] [Rank 0] step:6721/10000 train_time:500464ms step_avg:74.46ms +[2025-09-03 05:49:16] [Rank 0] step:6721/10000 train_time:500464ms step_avg:74.46ms +[2025-09-03 05:49:18] [Rank 0] step:6741/10000 train_time:502057ms step_avg:74.48ms +[2025-09-03 05:49:18] [Rank 0] step:6741/10000 train_time:502057ms step_avg:74.48ms +[2025-09-03 05:49:20] [Rank 0] step:6761/10000 train_time:503645ms step_avg:74.49ms +[2025-09-03 05:49:20] [Rank 0] step:6761/10000 train_time:503645ms step_avg:74.49ms +[2025-09-03 05:49:21] [Rank 0] step:6781/10000 train_time:505240ms step_avg:74.51ms +[2025-09-03 05:49:21] [Rank 0] step:6781/10000 train_time:505240ms step_avg:74.51ms +[2025-09-03 05:49:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:49:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:49:34] [Rank 0] PRINT: step:6800/10000 val_loss:3.9913 svd_entropy: attn_qk:H=0.7461,top10E=0.26,eRank=148.2,q75/q25=112.65 attn_vo:H=0.8288,top10E=0.15,eRank=269.4,q75/q25=70.57 mlp_w1:H=0.7462,top10E=0.31,eRank=168.2,q75/q25=20.93 mlp_w2:H=0.8394,top10E=0.14,eRank=276.9,q75/q25=46.87 vo_prod:H=0.7404,top10E=0.24,eRank=145.1,q75/q25=5382.47 train_time:506994ms step_avg:74.56ms +[2025-09-03 05:49:34] [Rank 0] PRINT: step:6800/10000 val_loss:3.9913 svd_entropy: attn_qk:H=0.7461,top10E=0.26,eRank=148.2,q75/q25=112.65 attn_vo:H=0.8288,top10E=0.15,eRank=269.4,q75/q25=70.57 mlp_w1:H=0.7462,top10E=0.31,eRank=168.2,q75/q25=20.93 mlp_w2:H=0.8394,top10E=0.14,eRank=276.9,q75/q25=46.87 vo_prod:H=0.7404,top10E=0.24,eRank=145.1,q75/q25=5382.47 train_time:506994ms step_avg:74.56ms +[2025-09-03 05:49:34] [Rank 0] step:6801/10000 train_time:507004ms step_avg:74.55ms +[2025-09-03 05:49:34] [Rank 0] step:6801/10000 train_time:507004ms step_avg:74.55ms +[2025-09-03 05:49:36] [Rank 0] step:6821/10000 train_time:508440ms step_avg:74.54ms +[2025-09-03 05:49:36] [Rank 0] step:6821/10000 train_time:508440ms step_avg:74.54ms +[2025-09-03 05:49:38] [Rank 0] step:6841/10000 train_time:510024ms step_avg:74.55ms +[2025-09-03 05:49:38] [Rank 0] step:6841/10000 train_time:510024ms step_avg:74.55ms +[2025-09-03 05:49:39] [Rank 0] step:6861/10000 train_time:511613ms step_avg:74.57ms +[2025-09-03 05:49:39] [Rank 0] step:6861/10000 train_time:511613ms step_avg:74.57ms +[2025-09-03 05:49:41] [Rank 0] step:6881/10000 train_time:513203ms step_avg:74.58ms +[2025-09-03 05:49:41] [Rank 0] step:6881/10000 train_time:513203ms step_avg:74.58ms +[2025-09-03 05:49:42] [Rank 0] step:6901/10000 train_time:514789ms step_avg:74.60ms +[2025-09-03 05:49:42] [Rank 0] step:6901/10000 train_time:514789ms step_avg:74.60ms +[2025-09-03 05:49:44] [Rank 0] step:6921/10000 train_time:516373ms step_avg:74.61ms +[2025-09-03 05:49:44] [Rank 0] step:6921/10000 train_time:516373ms step_avg:74.61ms +[2025-09-03 05:49:46] [Rank 0] step:6941/10000 train_time:517970ms step_avg:74.62ms +[2025-09-03 05:49:46] [Rank 0] step:6941/10000 train_time:517970ms step_avg:74.62ms +[2025-09-03 05:49:47] [Rank 0] step:6961/10000 train_time:519573ms step_avg:74.64ms +[2025-09-03 05:49:47] [Rank 0] step:6961/10000 train_time:519573ms step_avg:74.64ms +[2025-09-03 05:49:49] [Rank 0] step:6981/10000 train_time:521164ms step_avg:74.65ms +[2025-09-03 05:49:49] [Rank 0] step:6981/10000 train_time:521164ms step_avg:74.65ms +[2025-09-03 05:49:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:49:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:50:02] [Rank 0] PRINT: step:7000/10000 val_loss:3.9731 svd_entropy: attn_qk:H=0.7476,top10E=0.26,eRank=149.5,q75/q25=112.73 attn_vo:H=0.8301,top10E=0.15,eRank=271.5,q75/q25=68.86 mlp_w1:H=0.7481,top10E=0.30,eRank=170.1,q75/q25=21.33 mlp_w2:H=0.8402,top10E=0.13,eRank=278.6,q75/q25=47.25 vo_prod:H=0.7422,top10E=0.23,eRank=146.9,q75/q25=5003.42 train_time:522919ms step_avg:74.70ms +[2025-09-03 05:50:02] [Rank 0] PRINT: step:7000/10000 val_loss:3.9731 svd_entropy: attn_qk:H=0.7476,top10E=0.26,eRank=149.5,q75/q25=112.73 attn_vo:H=0.8301,top10E=0.15,eRank=271.5,q75/q25=68.86 mlp_w1:H=0.7481,top10E=0.30,eRank=170.1,q75/q25=21.33 mlp_w2:H=0.8402,top10E=0.13,eRank=278.6,q75/q25=47.25 vo_prod:H=0.7422,top10E=0.23,eRank=146.9,q75/q25=5003.42 train_time:522919ms step_avg:74.70ms +[2025-09-03 05:50:02] [Rank 0] step:7001/10000 train_time:522930ms step_avg:74.69ms +[2025-09-03 05:50:02] [Rank 0] step:7001/10000 train_time:522930ms step_avg:74.69ms +[2025-09-03 05:50:04] [Rank 0] step:7021/10000 train_time:524361ms step_avg:74.68ms +[2025-09-03 05:50:04] [Rank 0] step:7021/10000 train_time:524361ms step_avg:74.68ms +[2025-09-03 05:50:05] [Rank 0] step:7041/10000 train_time:525951ms step_avg:74.70ms +[2025-09-03 05:50:05] [Rank 0] step:7041/10000 train_time:525951ms step_avg:74.70ms +[2025-09-03 05:50:07] [Rank 0] step:7061/10000 train_time:527536ms step_avg:74.71ms +[2025-09-03 05:50:07] [Rank 0] step:7061/10000 train_time:527536ms step_avg:74.71ms +[2025-09-03 05:50:09] [Rank 0] step:7081/10000 train_time:529124ms step_avg:74.72ms +[2025-09-03 05:50:09] [Rank 0] step:7081/10000 train_time:529124ms step_avg:74.72ms +[2025-09-03 05:50:10] [Rank 0] step:7101/10000 train_time:530714ms step_avg:74.74ms +[2025-09-03 05:50:10] [Rank 0] step:7101/10000 train_time:530714ms step_avg:74.74ms +[2025-09-03 05:50:12] [Rank 0] step:7121/10000 train_time:532303ms step_avg:74.75ms +[2025-09-03 05:50:12] [Rank 0] step:7121/10000 train_time:532303ms step_avg:74.75ms +[2025-09-03 05:50:13] [Rank 0] step:7141/10000 train_time:533896ms step_avg:74.76ms +[2025-09-03 05:50:13] [Rank 0] step:7141/10000 train_time:533896ms step_avg:74.76ms +[2025-09-03 05:50:15] [Rank 0] step:7161/10000 train_time:535485ms step_avg:74.78ms +[2025-09-03 05:50:15] [Rank 0] step:7161/10000 train_time:535485ms step_avg:74.78ms +[2025-09-03 05:50:16] [Rank 0] step:7181/10000 train_time:537077ms step_avg:74.79ms +[2025-09-03 05:50:16] [Rank 0] step:7181/10000 train_time:537077ms step_avg:74.79ms +[2025-09-03 05:50:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:50:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:50:30] [Rank 0] PRINT: step:7200/10000 val_loss:3.9632 svd_entropy: attn_qk:H=0.7490,top10E=0.26,eRank=150.8,q75/q25=112.60 attn_vo:H=0.8314,top10E=0.14,eRank=273.5,q75/q25=67.25 mlp_w1:H=0.7499,top10E=0.30,eRank=171.8,q75/q25=21.60 mlp_w2:H=0.8409,top10E=0.13,eRank=280.1,q75/q25=47.79 vo_prod:H=0.7439,top10E=0.23,eRank=148.6,q75/q25=4726.44 train_time:538832ms step_avg:74.84ms +[2025-09-03 05:50:30] [Rank 0] PRINT: step:7200/10000 val_loss:3.9632 svd_entropy: attn_qk:H=0.7490,top10E=0.26,eRank=150.8,q75/q25=112.60 attn_vo:H=0.8314,top10E=0.14,eRank=273.5,q75/q25=67.25 mlp_w1:H=0.7499,top10E=0.30,eRank=171.8,q75/q25=21.60 mlp_w2:H=0.8409,top10E=0.13,eRank=280.1,q75/q25=47.79 vo_prod:H=0.7439,top10E=0.23,eRank=148.6,q75/q25=4726.44 train_time:538832ms step_avg:74.84ms +[2025-09-03 05:50:30] [Rank 0] step:7201/10000 train_time:538844ms step_avg:74.83ms +[2025-09-03 05:50:30] [Rank 0] step:7201/10000 train_time:538844ms step_avg:74.83ms +[2025-09-03 05:50:31] [Rank 0] step:7221/10000 train_time:540303ms step_avg:74.82ms +[2025-09-03 05:50:31] [Rank 0] step:7221/10000 train_time:540303ms step_avg:74.82ms +[2025-09-03 05:50:33] [Rank 0] step:7241/10000 train_time:541886ms step_avg:74.84ms +[2025-09-03 05:50:33] [Rank 0] step:7241/10000 train_time:541886ms step_avg:74.84ms +[2025-09-03 05:50:35] [Rank 0] step:7261/10000 train_time:543474ms step_avg:74.85ms +[2025-09-03 05:50:35] [Rank 0] step:7261/10000 train_time:543474ms step_avg:74.85ms +[2025-09-03 05:50:36] [Rank 0] step:7281/10000 train_time:545071ms step_avg:74.86ms +[2025-09-03 05:50:36] [Rank 0] step:7281/10000 train_time:545071ms step_avg:74.86ms +[2025-09-03 05:50:38] [Rank 0] step:7301/10000 train_time:546659ms step_avg:74.87ms +[2025-09-03 05:50:38] [Rank 0] step:7301/10000 train_time:546659ms step_avg:74.87ms +[2025-09-03 05:50:39] [Rank 0] step:7321/10000 train_time:548257ms step_avg:74.89ms +[2025-09-03 05:50:39] [Rank 0] step:7321/10000 train_time:548257ms step_avg:74.89ms +[2025-09-03 05:50:41] [Rank 0] step:7341/10000 train_time:549851ms step_avg:74.90ms +[2025-09-03 05:50:41] [Rank 0] step:7341/10000 train_time:549851ms step_avg:74.90ms +[2025-09-03 05:50:43] [Rank 0] step:7361/10000 train_time:551447ms step_avg:74.91ms +[2025-09-03 05:50:43] [Rank 0] step:7361/10000 train_time:551447ms step_avg:74.91ms +[2025-09-03 05:50:44] [Rank 0] step:7381/10000 train_time:553044ms step_avg:74.93ms +[2025-09-03 05:50:44] [Rank 0] step:7381/10000 train_time:553044ms step_avg:74.93ms +[2025-09-03 05:50:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:50:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:50:57] [Rank 0] PRINT: step:7400/10000 val_loss:3.9447 svd_entropy: attn_qk:H=0.7502,top10E=0.26,eRank=152.0,q75/q25=112.95 attn_vo:H=0.8325,top10E=0.14,eRank=275.2,q75/q25=65.88 mlp_w1:H=0.7513,top10E=0.30,eRank=173.3,q75/q25=21.92 mlp_w2:H=0.8416,top10E=0.13,eRank=281.4,q75/q25=48.05 vo_prod:H=0.7454,top10E=0.23,eRank=150.1,q75/q25=4586.88 train_time:554781ms step_avg:74.97ms +[2025-09-03 05:50:57] [Rank 0] PRINT: step:7400/10000 val_loss:3.9447 svd_entropy: attn_qk:H=0.7502,top10E=0.26,eRank=152.0,q75/q25=112.95 attn_vo:H=0.8325,top10E=0.14,eRank=275.2,q75/q25=65.88 mlp_w1:H=0.7513,top10E=0.30,eRank=173.3,q75/q25=21.92 mlp_w2:H=0.8416,top10E=0.13,eRank=281.4,q75/q25=48.05 vo_prod:H=0.7454,top10E=0.23,eRank=150.1,q75/q25=4586.88 train_time:554781ms step_avg:74.97ms +[2025-09-03 05:50:58] [Rank 0] step:7401/10000 train_time:554792ms step_avg:74.96ms +[2025-09-03 05:50:58] [Rank 0] step:7401/10000 train_time:554792ms step_avg:74.96ms +[2025-09-03 05:50:59] [Rank 0] step:7421/10000 train_time:556239ms step_avg:74.95ms +[2025-09-03 05:50:59] [Rank 0] step:7421/10000 train_time:556239ms step_avg:74.95ms +[2025-09-03 05:51:01] [Rank 0] step:7441/10000 train_time:557826ms step_avg:74.97ms +[2025-09-03 05:51:01] [Rank 0] step:7441/10000 train_time:557826ms step_avg:74.97ms +[2025-09-03 05:51:02] [Rank 0] step:7461/10000 train_time:559419ms step_avg:74.98ms +[2025-09-03 05:51:02] [Rank 0] step:7461/10000 train_time:559419ms step_avg:74.98ms +[2025-09-03 05:51:04] [Rank 0] step:7481/10000 train_time:561015ms step_avg:74.99ms +[2025-09-03 05:51:04] [Rank 0] step:7481/10000 train_time:561015ms step_avg:74.99ms +[2025-09-03 05:51:06] [Rank 0] step:7501/10000 train_time:562612ms step_avg:75.00ms +[2025-09-03 05:51:06] [Rank 0] step:7501/10000 train_time:562612ms step_avg:75.00ms +[2025-09-03 05:51:07] [Rank 0] step:7521/10000 train_time:564210ms step_avg:75.02ms +[2025-09-03 05:51:07] [Rank 0] step:7521/10000 train_time:564210ms step_avg:75.02ms +[2025-09-03 05:51:09] [Rank 0] step:7541/10000 train_time:565818ms step_avg:75.03ms +[2025-09-03 05:51:09] [Rank 0] step:7541/10000 train_time:565818ms step_avg:75.03ms +[2025-09-03 05:51:10] [Rank 0] step:7561/10000 train_time:567401ms step_avg:75.04ms +[2025-09-03 05:51:10] [Rank 0] step:7561/10000 train_time:567401ms step_avg:75.04ms +[2025-09-03 05:51:12] [Rank 0] step:7581/10000 train_time:569008ms step_avg:75.06ms +[2025-09-03 05:51:12] [Rank 0] step:7581/10000 train_time:569008ms step_avg:75.06ms +[2025-09-03 05:51:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:51:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:51:25] [Rank 0] PRINT: step:7600/10000 val_loss:3.9397 svd_entropy: attn_qk:H=0.7514,top10E=0.25,eRank=153.1,q75/q25=111.03 attn_vo:H=0.8334,top10E=0.14,eRank=276.6,q75/q25=64.35 mlp_w1:H=0.7526,top10E=0.30,eRank=174.7,q75/q25=22.06 mlp_w2:H=0.8422,top10E=0.13,eRank=282.7,q75/q25=48.25 vo_prod:H=0.7465,top10E=0.23,eRank=151.2,q75/q25=4278.22 train_time:570773ms step_avg:75.10ms +[2025-09-03 05:51:25] [Rank 0] PRINT: step:7600/10000 val_loss:3.9397 svd_entropy: attn_qk:H=0.7514,top10E=0.25,eRank=153.1,q75/q25=111.03 attn_vo:H=0.8334,top10E=0.14,eRank=276.6,q75/q25=64.35 mlp_w1:H=0.7526,top10E=0.30,eRank=174.7,q75/q25=22.06 mlp_w2:H=0.8422,top10E=0.13,eRank=282.7,q75/q25=48.25 vo_prod:H=0.7465,top10E=0.23,eRank=151.2,q75/q25=4278.22 train_time:570773ms step_avg:75.10ms +[2025-09-03 05:51:25] [Rank 0] step:7601/10000 train_time:570784ms step_avg:75.09ms +[2025-09-03 05:51:25] [Rank 0] step:7601/10000 train_time:570784ms step_avg:75.09ms +[2025-09-03 05:51:27] [Rank 0] step:7621/10000 train_time:572224ms step_avg:75.09ms +[2025-09-03 05:51:27] [Rank 0] step:7621/10000 train_time:572224ms step_avg:75.09ms +[2025-09-03 05:51:28] [Rank 0] step:7641/10000 train_time:573815ms step_avg:75.10ms +[2025-09-03 05:51:28] [Rank 0] step:7641/10000 train_time:573815ms step_avg:75.10ms +[2025-09-03 05:51:30] [Rank 0] step:7661/10000 train_time:575410ms step_avg:75.11ms +[2025-09-03 05:51:30] [Rank 0] step:7661/10000 train_time:575410ms step_avg:75.11ms +[2025-09-03 05:51:32] [Rank 0] step:7681/10000 train_time:576997ms step_avg:75.12ms +[2025-09-03 05:51:32] [Rank 0] step:7681/10000 train_time:576997ms step_avg:75.12ms +[2025-09-03 05:51:33] [Rank 0] step:7701/10000 train_time:578587ms step_avg:75.13ms +[2025-09-03 05:51:33] [Rank 0] step:7701/10000 train_time:578587ms step_avg:75.13ms +[2025-09-03 05:51:35] [Rank 0] step:7721/10000 train_time:580193ms step_avg:75.14ms +[2025-09-03 05:51:35] [Rank 0] step:7721/10000 train_time:580193ms step_avg:75.14ms +[2025-09-03 05:51:36] [Rank 0] step:7741/10000 train_time:581787ms step_avg:75.16ms +[2025-09-03 05:51:36] [Rank 0] step:7741/10000 train_time:581787ms step_avg:75.16ms +[2025-09-03 05:51:38] [Rank 0] step:7761/10000 train_time:583388ms step_avg:75.17ms +[2025-09-03 05:51:38] [Rank 0] step:7761/10000 train_time:583388ms step_avg:75.17ms +[2025-09-03 05:51:40] [Rank 0] step:7781/10000 train_time:584990ms step_avg:75.18ms +[2025-09-03 05:51:40] [Rank 0] step:7781/10000 train_time:584990ms step_avg:75.18ms +[2025-09-03 05:51:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:51:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:51:53] [Rank 0] PRINT: step:7800/10000 val_loss:3.9261 svd_entropy: attn_qk:H=0.7525,top10E=0.25,eRank=154.1,q75/q25=111.05 attn_vo:H=0.8343,top10E=0.14,eRank=278.1,q75/q25=63.21 mlp_w1:H=0.7540,top10E=0.30,eRank=176.1,q75/q25=22.39 mlp_w2:H=0.8427,top10E=0.13,eRank=283.9,q75/q25=48.58 vo_prod:H=0.7478,top10E=0.23,eRank=152.6,q75/q25=4005.63 train_time:586756ms step_avg:75.23ms +[2025-09-03 05:51:53] [Rank 0] PRINT: step:7800/10000 val_loss:3.9261 svd_entropy: attn_qk:H=0.7525,top10E=0.25,eRank=154.1,q75/q25=111.05 attn_vo:H=0.8343,top10E=0.14,eRank=278.1,q75/q25=63.21 mlp_w1:H=0.7540,top10E=0.30,eRank=176.1,q75/q25=22.39 mlp_w2:H=0.8427,top10E=0.13,eRank=283.9,q75/q25=48.58 vo_prod:H=0.7478,top10E=0.23,eRank=152.6,q75/q25=4005.63 train_time:586756ms step_avg:75.23ms +[2025-09-03 05:51:53] [Rank 0] step:7801/10000 train_time:586767ms step_avg:75.22ms +[2025-09-03 05:51:53] [Rank 0] step:7801/10000 train_time:586767ms step_avg:75.22ms +[2025-09-03 05:51:55] [Rank 0] step:7821/10000 train_time:588213ms step_avg:75.21ms +[2025-09-03 05:51:55] [Rank 0] step:7821/10000 train_time:588213ms step_avg:75.21ms +[2025-09-03 05:51:56] [Rank 0] step:7841/10000 train_time:589801ms step_avg:75.22ms +[2025-09-03 05:51:56] [Rank 0] step:7841/10000 train_time:589801ms step_avg:75.22ms +[2025-09-03 05:51:58] [Rank 0] step:7861/10000 train_time:591398ms step_avg:75.23ms +[2025-09-03 05:51:58] [Rank 0] step:7861/10000 train_time:591398ms step_avg:75.23ms +[2025-09-03 05:51:59] [Rank 0] step:7881/10000 train_time:593000ms step_avg:75.24ms +[2025-09-03 05:51:59] [Rank 0] step:7881/10000 train_time:593000ms step_avg:75.24ms +[2025-09-03 05:52:01] [Rank 0] step:7901/10000 train_time:594593ms step_avg:75.26ms +[2025-09-03 05:52:01] [Rank 0] step:7901/10000 train_time:594593ms step_avg:75.26ms +[2025-09-03 05:52:03] [Rank 0] step:7921/10000 train_time:596187ms step_avg:75.27ms +[2025-09-03 05:52:03] [Rank 0] step:7921/10000 train_time:596187ms step_avg:75.27ms +[2025-09-03 05:52:04] [Rank 0] step:7941/10000 train_time:597789ms step_avg:75.28ms +[2025-09-03 05:52:04] [Rank 0] step:7941/10000 train_time:597789ms step_avg:75.28ms +[2025-09-03 05:52:06] [Rank 0] step:7961/10000 train_time:599388ms step_avg:75.29ms +[2025-09-03 05:52:06] [Rank 0] step:7961/10000 train_time:599388ms step_avg:75.29ms +[2025-09-03 05:52:07] [Rank 0] step:7981/10000 train_time:600981ms step_avg:75.30ms +[2025-09-03 05:52:07] [Rank 0] step:7981/10000 train_time:600981ms step_avg:75.30ms +[2025-09-03 05:52:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:52:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:52:21] [Rank 0] PRINT: step:8000/10000 val_loss:3.9101 svd_entropy: attn_qk:H=0.7535,top10E=0.25,eRank=155.0,q75/q25=111.15 attn_vo:H=0.8351,top10E=0.14,eRank=279.4,q75/q25=62.17 mlp_w1:H=0.7552,top10E=0.29,eRank=177.4,q75/q25=22.68 mlp_w2:H=0.8433,top10E=0.13,eRank=285.0,q75/q25=48.68 vo_prod:H=0.7491,top10E=0.23,eRank=153.9,q75/q25=3855.18 train_time:602735ms step_avg:75.34ms +[2025-09-03 05:52:21] [Rank 0] PRINT: step:8000/10000 val_loss:3.9101 svd_entropy: attn_qk:H=0.7535,top10E=0.25,eRank=155.0,q75/q25=111.15 attn_vo:H=0.8351,top10E=0.14,eRank=279.4,q75/q25=62.17 mlp_w1:H=0.7552,top10E=0.29,eRank=177.4,q75/q25=22.68 mlp_w2:H=0.8433,top10E=0.13,eRank=285.0,q75/q25=48.68 vo_prod:H=0.7491,top10E=0.23,eRank=153.9,q75/q25=3855.18 train_time:602735ms step_avg:75.34ms +[2025-09-03 05:52:21] [Rank 0] step:8001/10000 train_time:602746ms step_avg:75.33ms +[2025-09-03 05:52:21] [Rank 0] step:8001/10000 train_time:602746ms step_avg:75.33ms +[2025-09-03 05:52:22] [Rank 0] step:8021/10000 train_time:604176ms step_avg:75.32ms +[2025-09-03 05:52:22] [Rank 0] step:8021/10000 train_time:604176ms step_avg:75.32ms +[2025-09-03 05:52:24] [Rank 0] step:8041/10000 train_time:605776ms step_avg:75.34ms +[2025-09-03 05:52:24] [Rank 0] step:8041/10000 train_time:605776ms step_avg:75.34ms +[2025-09-03 05:52:26] [Rank 0] step:8061/10000 train_time:607367ms step_avg:75.35ms +[2025-09-03 05:52:26] [Rank 0] step:8061/10000 train_time:607367ms step_avg:75.35ms +[2025-09-03 05:52:27] [Rank 0] step:8081/10000 train_time:608952ms step_avg:75.36ms +[2025-09-03 05:52:27] [Rank 0] step:8081/10000 train_time:608952ms step_avg:75.36ms +[2025-09-03 05:52:29] [Rank 0] step:8101/10000 train_time:610553ms step_avg:75.37ms +[2025-09-03 05:52:29] [Rank 0] step:8101/10000 train_time:610553ms step_avg:75.37ms +[2025-09-03 05:52:30] [Rank 0] step:8121/10000 train_time:612142ms step_avg:75.38ms +[2025-09-03 05:52:30] [Rank 0] step:8121/10000 train_time:612142ms step_avg:75.38ms +[2025-09-03 05:52:32] [Rank 0] step:8141/10000 train_time:613831ms step_avg:75.40ms +[2025-09-03 05:52:32] [Rank 0] step:8141/10000 train_time:613831ms step_avg:75.40ms +[2025-09-03 05:52:34] [Rank 0] step:8161/10000 train_time:615437ms step_avg:75.41ms +[2025-09-03 05:52:34] [Rank 0] step:8161/10000 train_time:615437ms step_avg:75.41ms +[2025-09-03 05:52:35] [Rank 0] step:8181/10000 train_time:617065ms step_avg:75.43ms +[2025-09-03 05:52:35] [Rank 0] step:8181/10000 train_time:617065ms step_avg:75.43ms +[2025-09-03 05:52:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:52:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:52:49] [Rank 0] PRINT: step:8200/10000 val_loss:3.9007 svd_entropy: attn_qk:H=0.7543,top10E=0.25,eRank=155.8,q75/q25=111.60 attn_vo:H=0.8359,top10E=0.14,eRank=280.7,q75/q25=61.34 mlp_w1:H=0.7562,top10E=0.29,eRank=178.5,q75/q25=22.86 mlp_w2:H=0.8438,top10E=0.13,eRank=286.0,q75/q25=48.75 vo_prod:H=0.7502,top10E=0.23,eRank=155.0,q75/q25=3674.31 train_time:618876ms step_avg:75.47ms +[2025-09-03 05:52:49] [Rank 0] PRINT: step:8200/10000 val_loss:3.9007 svd_entropy: attn_qk:H=0.7543,top10E=0.25,eRank=155.8,q75/q25=111.60 attn_vo:H=0.8359,top10E=0.14,eRank=280.7,q75/q25=61.34 mlp_w1:H=0.7562,top10E=0.29,eRank=178.5,q75/q25=22.86 mlp_w2:H=0.8438,top10E=0.13,eRank=286.0,q75/q25=48.75 vo_prod:H=0.7502,top10E=0.23,eRank=155.0,q75/q25=3674.31 train_time:618876ms step_avg:75.47ms +[2025-09-03 05:52:49] [Rank 0] step:8201/10000 train_time:618888ms step_avg:75.46ms +[2025-09-03 05:52:49] [Rank 0] step:8201/10000 train_time:618888ms step_avg:75.46ms +[2025-09-03 05:52:50] [Rank 0] step:8221/10000 train_time:620377ms step_avg:75.46ms +[2025-09-03 05:52:50] [Rank 0] step:8221/10000 train_time:620377ms step_avg:75.46ms +[2025-09-03 05:52:52] [Rank 0] step:8241/10000 train_time:622021ms step_avg:75.48ms +[2025-09-03 05:52:52] [Rank 0] step:8241/10000 train_time:622021ms step_avg:75.48ms +[2025-09-03 05:52:54] [Rank 0] step:8261/10000 train_time:623646ms step_avg:75.49ms +[2025-09-03 05:52:54] [Rank 0] step:8261/10000 train_time:623646ms step_avg:75.49ms +[2025-09-03 05:52:55] [Rank 0] step:8281/10000 train_time:625270ms step_avg:75.51ms +[2025-09-03 05:52:55] [Rank 0] step:8281/10000 train_time:625270ms step_avg:75.51ms +[2025-09-03 05:52:57] [Rank 0] step:8301/10000 train_time:626891ms step_avg:75.52ms +[2025-09-03 05:52:57] [Rank 0] step:8301/10000 train_time:626891ms step_avg:75.52ms +[2025-09-03 05:52:58] [Rank 0] step:8321/10000 train_time:628503ms step_avg:75.53ms +[2025-09-03 05:52:58] [Rank 0] step:8321/10000 train_time:628503ms step_avg:75.53ms +[2025-09-03 05:53:00] [Rank 0] step:8341/10000 train_time:630129ms step_avg:75.55ms +[2025-09-03 05:53:00] [Rank 0] step:8341/10000 train_time:630129ms step_avg:75.55ms +[2025-09-03 05:53:02] [Rank 0] step:8361/10000 train_time:631758ms step_avg:75.56ms +[2025-09-03 05:53:02] [Rank 0] step:8361/10000 train_time:631758ms step_avg:75.56ms +[2025-09-03 05:53:03] [Rank 0] step:8381/10000 train_time:633380ms step_avg:75.57ms +[2025-09-03 05:53:03] [Rank 0] step:8381/10000 train_time:633380ms step_avg:75.57ms +[2025-09-03 05:53:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:53:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:53:17] [Rank 0] PRINT: step:8400/10000 val_loss:3.8901 svd_entropy: attn_qk:H=0.7551,top10E=0.25,eRank=156.6,q75/q25=111.34 attn_vo:H=0.8366,top10E=0.14,eRank=281.8,q75/q25=60.50 mlp_w1:H=0.7572,top10E=0.29,eRank=179.5,q75/q25=23.01 mlp_w2:H=0.8442,top10E=0.13,eRank=286.9,q75/q25=48.84 vo_prod:H=0.7512,top10E=0.22,eRank=156.1,q75/q25=3508.44 train_time:635160ms step_avg:75.61ms +[2025-09-03 05:53:17] [Rank 0] PRINT: step:8400/10000 val_loss:3.8901 svd_entropy: attn_qk:H=0.7551,top10E=0.25,eRank=156.6,q75/q25=111.34 attn_vo:H=0.8366,top10E=0.14,eRank=281.8,q75/q25=60.50 mlp_w1:H=0.7572,top10E=0.29,eRank=179.5,q75/q25=23.01 mlp_w2:H=0.8442,top10E=0.13,eRank=286.9,q75/q25=48.84 vo_prod:H=0.7512,top10E=0.22,eRank=156.1,q75/q25=3508.44 train_time:635160ms step_avg:75.61ms +[2025-09-03 05:53:17] [Rank 0] step:8401/10000 train_time:635171ms step_avg:75.61ms +[2025-09-03 05:53:17] [Rank 0] step:8401/10000 train_time:635171ms step_avg:75.61ms +[2025-09-03 05:53:18] [Rank 0] step:8421/10000 train_time:636645ms step_avg:75.60ms +[2025-09-03 05:53:18] [Rank 0] step:8421/10000 train_time:636645ms step_avg:75.60ms +[2025-09-03 05:53:20] [Rank 0] step:8441/10000 train_time:638264ms step_avg:75.61ms +[2025-09-03 05:53:20] [Rank 0] step:8441/10000 train_time:638264ms step_avg:75.61ms +[2025-09-03 05:53:22] [Rank 0] step:8461/10000 train_time:639883ms step_avg:75.63ms +[2025-09-03 05:53:22] [Rank 0] step:8461/10000 train_time:639883ms step_avg:75.63ms +[2025-09-03 05:53:23] [Rank 0] step:8481/10000 train_time:641508ms step_avg:75.64ms +[2025-09-03 05:53:23] [Rank 0] step:8481/10000 train_time:641508ms step_avg:75.64ms +[2025-09-03 05:53:25] [Rank 0] step:8501/10000 train_time:643154ms step_avg:75.66ms +[2025-09-03 05:53:25] [Rank 0] step:8501/10000 train_time:643154ms step_avg:75.66ms +[2025-09-03 05:53:27] [Rank 0] step:8521/10000 train_time:644782ms step_avg:75.67ms +[2025-09-03 05:53:27] [Rank 0] step:8521/10000 train_time:644782ms step_avg:75.67ms +[2025-09-03 05:53:28] [Rank 0] step:8541/10000 train_time:646418ms step_avg:75.68ms +[2025-09-03 05:53:28] [Rank 0] step:8541/10000 train_time:646418ms step_avg:75.68ms +[2025-09-03 05:53:30] [Rank 0] step:8561/10000 train_time:648045ms step_avg:75.70ms +[2025-09-03 05:53:30] [Rank 0] step:8561/10000 train_time:648045ms step_avg:75.70ms +[2025-09-03 05:53:31] [Rank 0] step:8581/10000 train_time:649668ms step_avg:75.71ms +[2025-09-03 05:53:31] [Rank 0] step:8581/10000 train_time:649668ms step_avg:75.71ms +[2025-09-03 05:53:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:53:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:53:45] [Rank 0] PRINT: step:8600/10000 val_loss:3.8820 svd_entropy: attn_qk:H=0.7558,top10E=0.25,eRank=157.2,q75/q25=111.34 attn_vo:H=0.8371,top10E=0.14,eRank=282.7,q75/q25=59.55 mlp_w1:H=0.7582,top10E=0.29,eRank=180.5,q75/q25=23.19 mlp_w2:H=0.8446,top10E=0.13,eRank=287.7,q75/q25=49.11 vo_prod:H=0.7520,top10E=0.22,eRank=156.9,q75/q25=3437.21 train_time:651472ms step_avg:75.75ms +[2025-09-03 05:53:45] [Rank 0] PRINT: step:8600/10000 val_loss:3.8820 svd_entropy: attn_qk:H=0.7558,top10E=0.25,eRank=157.2,q75/q25=111.34 attn_vo:H=0.8371,top10E=0.14,eRank=282.7,q75/q25=59.55 mlp_w1:H=0.7582,top10E=0.29,eRank=180.5,q75/q25=23.19 mlp_w2:H=0.8446,top10E=0.13,eRank=287.7,q75/q25=49.11 vo_prod:H=0.7520,top10E=0.22,eRank=156.9,q75/q25=3437.21 train_time:651472ms step_avg:75.75ms +[2025-09-03 05:53:45] [Rank 0] step:8601/10000 train_time:651483ms step_avg:75.75ms +[2025-09-03 05:53:45] [Rank 0] step:8601/10000 train_time:651483ms step_avg:75.75ms +[2025-09-03 05:53:46] [Rank 0] step:8621/10000 train_time:652973ms step_avg:75.74ms +[2025-09-03 05:53:46] [Rank 0] step:8621/10000 train_time:652973ms step_avg:75.74ms +[2025-09-03 05:53:48] [Rank 0] step:8641/10000 train_time:654597ms step_avg:75.75ms +[2025-09-03 05:53:48] [Rank 0] step:8641/10000 train_time:654597ms step_avg:75.75ms +[2025-09-03 05:53:50] [Rank 0] step:8661/10000 train_time:656218ms step_avg:75.77ms +[2025-09-03 05:53:50] [Rank 0] step:8661/10000 train_time:656218ms step_avg:75.77ms +[2025-09-03 05:53:51] [Rank 0] step:8681/10000 train_time:657843ms step_avg:75.78ms +[2025-09-03 05:53:51] [Rank 0] step:8681/10000 train_time:657843ms step_avg:75.78ms +[2025-09-03 05:53:53] [Rank 0] step:8701/10000 train_time:659459ms step_avg:75.79ms +[2025-09-03 05:53:53] [Rank 0] step:8701/10000 train_time:659459ms step_avg:75.79ms +[2025-09-03 05:53:55] [Rank 0] step:8721/10000 train_time:661086ms step_avg:75.80ms +[2025-09-03 05:53:55] [Rank 0] step:8721/10000 train_time:661086ms step_avg:75.80ms +[2025-09-03 05:53:56] [Rank 0] step:8741/10000 train_time:662705ms step_avg:75.82ms +[2025-09-03 05:53:56] [Rank 0] step:8741/10000 train_time:662705ms step_avg:75.82ms +[2025-09-03 05:53:58] [Rank 0] step:8761/10000 train_time:664321ms step_avg:75.83ms +[2025-09-03 05:53:58] [Rank 0] step:8761/10000 train_time:664321ms step_avg:75.83ms +[2025-09-03 05:53:59] [Rank 0] step:8781/10000 train_time:665950ms step_avg:75.84ms +[2025-09-03 05:53:59] [Rank 0] step:8781/10000 train_time:665950ms step_avg:75.84ms +[2025-09-03 05:54:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:54:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:54:13] [Rank 0] PRINT: step:8800/10000 val_loss:3.8725 svd_entropy: attn_qk:H=0.7564,top10E=0.25,eRank=157.8,q75/q25=111.59 attn_vo:H=0.8377,top10E=0.14,eRank=283.6,q75/q25=58.83 mlp_w1:H=0.7590,top10E=0.29,eRank=181.4,q75/q25=23.23 mlp_w2:H=0.8450,top10E=0.13,eRank=288.5,q75/q25=49.15 vo_prod:H=0.7528,top10E=0.22,eRank=157.7,q75/q25=3358.86 train_time:667737ms step_avg:75.88ms +[2025-09-03 05:54:13] [Rank 0] PRINT: step:8800/10000 val_loss:3.8725 svd_entropy: attn_qk:H=0.7564,top10E=0.25,eRank=157.8,q75/q25=111.59 attn_vo:H=0.8377,top10E=0.14,eRank=283.6,q75/q25=58.83 mlp_w1:H=0.7590,top10E=0.29,eRank=181.4,q75/q25=23.23 mlp_w2:H=0.8450,top10E=0.13,eRank=288.5,q75/q25=49.15 vo_prod:H=0.7528,top10E=0.22,eRank=157.7,q75/q25=3358.86 train_time:667737ms step_avg:75.88ms +[2025-09-03 05:54:13] [Rank 0] step:8801/10000 train_time:667748ms step_avg:75.87ms +[2025-09-03 05:54:13] [Rank 0] step:8801/10000 train_time:667748ms step_avg:75.87ms +[2025-09-03 05:54:14] [Rank 0] step:8821/10000 train_time:669207ms step_avg:75.87ms +[2025-09-03 05:54:14] [Rank 0] step:8821/10000 train_time:669207ms step_avg:75.87ms +[2025-09-03 05:54:16] [Rank 0] step:8841/10000 train_time:670850ms step_avg:75.88ms +[2025-09-03 05:54:16] [Rank 0] step:8841/10000 train_time:670850ms step_avg:75.88ms +[2025-09-03 05:54:18] [Rank 0] step:8861/10000 train_time:672467ms step_avg:75.89ms +[2025-09-03 05:54:18] [Rank 0] step:8861/10000 train_time:672467ms step_avg:75.89ms +[2025-09-03 05:54:19] [Rank 0] step:8881/10000 train_time:674087ms step_avg:75.90ms +[2025-09-03 05:54:19] [Rank 0] step:8881/10000 train_time:674087ms step_avg:75.90ms +[2025-09-03 05:54:21] [Rank 0] step:8901/10000 train_time:675717ms step_avg:75.91ms +[2025-09-03 05:54:21] [Rank 0] step:8901/10000 train_time:675717ms step_avg:75.91ms +[2025-09-03 05:54:23] [Rank 0] step:8921/10000 train_time:677348ms step_avg:75.93ms +[2025-09-03 05:54:23] [Rank 0] step:8921/10000 train_time:677348ms step_avg:75.93ms +[2025-09-03 05:54:24] [Rank 0] step:8941/10000 train_time:678983ms step_avg:75.94ms +[2025-09-03 05:54:24] [Rank 0] step:8941/10000 train_time:678983ms step_avg:75.94ms +[2025-09-03 05:54:26] [Rank 0] step:8961/10000 train_time:680603ms step_avg:75.95ms +[2025-09-03 05:54:26] [Rank 0] step:8961/10000 train_time:680603ms step_avg:75.95ms +[2025-09-03 05:54:27] [Rank 0] step:8981/10000 train_time:682220ms step_avg:75.96ms +[2025-09-03 05:54:27] [Rank 0] step:8981/10000 train_time:682220ms step_avg:75.96ms +[2025-09-03 05:54:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:54:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:54:41] [Rank 0] PRINT: step:9000/10000 val_loss:3.8629 svd_entropy: attn_qk:H=0.7570,top10E=0.25,eRank=158.4,q75/q25=111.26 attn_vo:H=0.8382,top10E=0.14,eRank=284.3,q75/q25=58.23 mlp_w1:H=0.7596,top10E=0.29,eRank=182.1,q75/q25=23.30 mlp_w2:H=0.8453,top10E=0.13,eRank=289.2,q75/q25=49.13 vo_prod:H=0.7536,top10E=0.22,eRank=158.5,q75/q25=3231.67 train_time:684000ms step_avg:76.00ms +[2025-09-03 05:54:41] [Rank 0] PRINT: step:9000/10000 val_loss:3.8629 svd_entropy: attn_qk:H=0.7570,top10E=0.25,eRank=158.4,q75/q25=111.26 attn_vo:H=0.8382,top10E=0.14,eRank=284.3,q75/q25=58.23 mlp_w1:H=0.7596,top10E=0.29,eRank=182.1,q75/q25=23.30 mlp_w2:H=0.8453,top10E=0.13,eRank=289.2,q75/q25=49.13 vo_prod:H=0.7536,top10E=0.22,eRank=158.5,q75/q25=3231.67 train_time:684000ms step_avg:76.00ms +[2025-09-03 05:54:41] [Rank 0] step:9001/10000 train_time:684012ms step_avg:75.99ms +[2025-09-03 05:54:41] [Rank 0] step:9001/10000 train_time:684012ms step_avg:75.99ms +[2025-09-03 05:54:42] [Rank 0] step:9021/10000 train_time:685474ms step_avg:75.99ms +[2025-09-03 05:54:42] [Rank 0] step:9021/10000 train_time:685474ms step_avg:75.99ms +[2025-09-03 05:54:44] [Rank 0] step:9041/10000 train_time:687093ms step_avg:76.00ms +[2025-09-03 05:54:44] [Rank 0] step:9041/10000 train_time:687093ms step_avg:76.00ms +[2025-09-03 05:54:46] [Rank 0] step:9061/10000 train_time:688726ms step_avg:76.01ms +[2025-09-03 05:54:46] [Rank 0] step:9061/10000 train_time:688726ms step_avg:76.01ms +[2025-09-03 05:54:47] [Rank 0] step:9081/10000 train_time:690359ms step_avg:76.02ms +[2025-09-03 05:54:47] [Rank 0] step:9081/10000 train_time:690359ms step_avg:76.02ms +[2025-09-03 05:54:49] [Rank 0] step:9101/10000 train_time:692004ms step_avg:76.04ms +[2025-09-03 05:54:49] [Rank 0] step:9101/10000 train_time:692004ms step_avg:76.04ms +[2025-09-03 05:54:51] [Rank 0] step:9121/10000 train_time:693633ms step_avg:76.05ms +[2025-09-03 05:54:51] [Rank 0] step:9121/10000 train_time:693633ms step_avg:76.05ms +[2025-09-03 05:54:52] [Rank 0] step:9141/10000 train_time:695253ms step_avg:76.06ms +[2025-09-03 05:54:52] [Rank 0] step:9141/10000 train_time:695253ms step_avg:76.06ms +[2025-09-03 05:54:54] [Rank 0] step:9161/10000 train_time:696863ms step_avg:76.07ms +[2025-09-03 05:54:54] [Rank 0] step:9161/10000 train_time:696863ms step_avg:76.07ms +[2025-09-03 05:54:55] [Rank 0] step:9181/10000 train_time:698518ms step_avg:76.08ms +[2025-09-03 05:54:55] [Rank 0] step:9181/10000 train_time:698518ms step_avg:76.08ms +[2025-09-03 05:54:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:54:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:55:09] [Rank 0] PRINT: step:9200/10000 val_loss:3.8551 svd_entropy: attn_qk:H=0.7575,top10E=0.25,eRank=158.8,q75/q25=111.38 attn_vo:H=0.8386,top10E=0.14,eRank=285.0,q75/q25=57.85 mlp_w1:H=0.7602,top10E=0.29,eRank=182.8,q75/q25=23.38 mlp_w2:H=0.8456,top10E=0.13,eRank=289.9,q75/q25=49.01 vo_prod:H=0.7542,top10E=0.22,eRank=159.2,q75/q25=3155.32 train_time:700302ms step_avg:76.12ms +[2025-09-03 05:55:09] [Rank 0] PRINT: step:9200/10000 val_loss:3.8551 svd_entropy: attn_qk:H=0.7575,top10E=0.25,eRank=158.8,q75/q25=111.38 attn_vo:H=0.8386,top10E=0.14,eRank=285.0,q75/q25=57.85 mlp_w1:H=0.7602,top10E=0.29,eRank=182.8,q75/q25=23.38 mlp_w2:H=0.8456,top10E=0.13,eRank=289.9,q75/q25=49.01 vo_prod:H=0.7542,top10E=0.22,eRank=159.2,q75/q25=3155.32 train_time:700302ms step_avg:76.12ms +[2025-09-03 05:55:09] [Rank 0] step:9201/10000 train_time:700313ms step_avg:76.11ms +[2025-09-03 05:55:09] [Rank 0] step:9201/10000 train_time:700313ms step_avg:76.11ms +[2025-09-03 05:55:11] [Rank 0] step:9221/10000 train_time:701795ms step_avg:76.11ms +[2025-09-03 05:55:11] [Rank 0] step:9221/10000 train_time:701795ms step_avg:76.11ms +[2025-09-03 05:55:12] [Rank 0] step:9241/10000 train_time:703427ms step_avg:76.12ms +[2025-09-03 05:55:12] [Rank 0] step:9241/10000 train_time:703427ms step_avg:76.12ms +[2025-09-03 05:55:14] [Rank 0] step:9261/10000 train_time:705064ms step_avg:76.13ms +[2025-09-03 05:55:14] [Rank 0] step:9261/10000 train_time:705064ms step_avg:76.13ms +[2025-09-03 05:55:15] [Rank 0] step:9281/10000 train_time:706677ms step_avg:76.14ms +[2025-09-03 05:55:15] [Rank 0] step:9281/10000 train_time:706677ms step_avg:76.14ms +[2025-09-03 05:55:17] [Rank 0] step:9301/10000 train_time:708300ms step_avg:76.15ms +[2025-09-03 05:55:17] [Rank 0] step:9301/10000 train_time:708300ms step_avg:76.15ms +[2025-09-03 05:55:19] [Rank 0] step:9321/10000 train_time:709930ms step_avg:76.16ms +[2025-09-03 05:55:19] [Rank 0] step:9321/10000 train_time:709930ms step_avg:76.16ms +[2025-09-03 05:55:20] [Rank 0] step:9341/10000 train_time:711557ms step_avg:76.18ms +[2025-09-03 05:55:20] [Rank 0] step:9341/10000 train_time:711557ms step_avg:76.18ms +[2025-09-03 05:55:22] [Rank 0] step:9361/10000 train_time:713188ms step_avg:76.19ms +[2025-09-03 05:55:22] [Rank 0] step:9361/10000 train_time:713188ms step_avg:76.19ms +[2025-09-03 05:55:24] [Rank 0] step:9381/10000 train_time:714832ms step_avg:76.20ms +[2025-09-03 05:55:24] [Rank 0] step:9381/10000 train_time:714832ms step_avg:76.20ms +[2025-09-03 05:55:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:55:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:55:37] [Rank 0] PRINT: step:9400/10000 val_loss:3.8480 svd_entropy: attn_qk:H=0.7578,top10E=0.25,eRank=159.2,q75/q25=111.44 attn_vo:H=0.8389,top10E=0.14,eRank=285.6,q75/q25=57.32 mlp_w1:H=0.7608,top10E=0.29,eRank=183.4,q75/q25=23.36 mlp_w2:H=0.8459,top10E=0.13,eRank=290.4,q75/q25=49.04 vo_prod:H=0.7548,top10E=0.22,eRank=159.8,q75/q25=3077.50 train_time:716627ms step_avg:76.24ms +[2025-09-03 05:55:37] [Rank 0] PRINT: step:9400/10000 val_loss:3.8480 svd_entropy: attn_qk:H=0.7578,top10E=0.25,eRank=159.2,q75/q25=111.44 attn_vo:H=0.8389,top10E=0.14,eRank=285.6,q75/q25=57.32 mlp_w1:H=0.7608,top10E=0.29,eRank=183.4,q75/q25=23.36 mlp_w2:H=0.8459,top10E=0.13,eRank=290.4,q75/q25=49.04 vo_prod:H=0.7548,top10E=0.22,eRank=159.8,q75/q25=3077.50 train_time:716627ms step_avg:76.24ms +[2025-09-03 05:55:37] [Rank 0] step:9401/10000 train_time:716639ms step_avg:76.23ms +[2025-09-03 05:55:37] [Rank 0] step:9401/10000 train_time:716639ms step_avg:76.23ms +[2025-09-03 05:55:39] [Rank 0] step:9421/10000 train_time:718102ms step_avg:76.22ms +[2025-09-03 05:55:39] [Rank 0] step:9421/10000 train_time:718102ms step_avg:76.22ms +[2025-09-03 05:55:40] [Rank 0] step:9441/10000 train_time:719728ms step_avg:76.23ms +[2025-09-03 05:55:40] [Rank 0] step:9441/10000 train_time:719728ms step_avg:76.23ms +[2025-09-03 05:55:42] [Rank 0] step:9461/10000 train_time:721360ms step_avg:76.25ms +[2025-09-03 05:55:42] [Rank 0] step:9461/10000 train_time:721360ms step_avg:76.25ms +[2025-09-03 05:55:43] [Rank 0] step:9481/10000 train_time:722992ms step_avg:76.26ms +[2025-09-03 05:55:43] [Rank 0] step:9481/10000 train_time:722992ms step_avg:76.26ms +[2025-09-03 05:55:45] [Rank 0] step:9501/10000 train_time:724635ms step_avg:76.27ms +[2025-09-03 05:55:45] [Rank 0] step:9501/10000 train_time:724635ms step_avg:76.27ms +[2025-09-03 05:55:47] [Rank 0] step:9521/10000 train_time:726255ms step_avg:76.28ms +[2025-09-03 05:55:47] [Rank 0] step:9521/10000 train_time:726255ms step_avg:76.28ms +[2025-09-03 05:55:48] [Rank 0] step:9541/10000 train_time:727884ms step_avg:76.29ms +[2025-09-03 05:55:48] [Rank 0] step:9541/10000 train_time:727884ms step_avg:76.29ms +[2025-09-03 05:55:50] [Rank 0] step:9561/10000 train_time:729505ms step_avg:76.30ms +[2025-09-03 05:55:50] [Rank 0] step:9561/10000 train_time:729505ms step_avg:76.30ms +[2025-09-03 05:55:52] [Rank 0] step:9581/10000 train_time:731136ms step_avg:76.31ms +[2025-09-03 05:55:52] [Rank 0] step:9581/10000 train_time:731136ms step_avg:76.31ms +[2025-09-03 05:55:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:55:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:56:05] [Rank 0] PRINT: step:9600/10000 val_loss:3.8418 svd_entropy: attn_qk:H=0.7582,top10E=0.25,eRank=159.6,q75/q25=111.20 attn_vo:H=0.8392,top10E=0.14,eRank=286.0,q75/q25=57.05 mlp_w1:H=0.7612,top10E=0.29,eRank=183.9,q75/q25=23.40 mlp_w2:H=0.8461,top10E=0.13,eRank=290.8,q75/q25=49.08 vo_prod:H=0.7552,top10E=0.22,eRank=160.2,q75/q25=3027.28 train_time:732933ms step_avg:76.35ms +[2025-09-03 05:56:05] [Rank 0] PRINT: step:9600/10000 val_loss:3.8418 svd_entropy: attn_qk:H=0.7582,top10E=0.25,eRank=159.6,q75/q25=111.20 attn_vo:H=0.8392,top10E=0.14,eRank=286.0,q75/q25=57.05 mlp_w1:H=0.7612,top10E=0.29,eRank=183.9,q75/q25=23.40 mlp_w2:H=0.8461,top10E=0.13,eRank=290.8,q75/q25=49.08 vo_prod:H=0.7552,top10E=0.22,eRank=160.2,q75/q25=3027.28 train_time:732933ms step_avg:76.35ms +[2025-09-03 05:56:05] [Rank 0] step:9601/10000 train_time:732943ms step_avg:76.34ms +[2025-09-03 05:56:05] [Rank 0] step:9601/10000 train_time:732943ms step_avg:76.34ms +[2025-09-03 05:56:07] [Rank 0] step:9621/10000 train_time:734436ms step_avg:76.34ms +[2025-09-03 05:56:07] [Rank 0] step:9621/10000 train_time:734436ms step_avg:76.34ms +[2025-09-03 05:56:08] [Rank 0] step:9641/10000 train_time:736066ms step_avg:76.35ms +[2025-09-03 05:56:08] [Rank 0] step:9641/10000 train_time:736066ms step_avg:76.35ms +[2025-09-03 05:56:10] [Rank 0] step:9661/10000 train_time:737720ms step_avg:76.36ms +[2025-09-03 05:56:10] [Rank 0] step:9661/10000 train_time:737720ms step_avg:76.36ms +[2025-09-03 05:56:12] [Rank 0] step:9681/10000 train_time:739368ms step_avg:76.37ms +[2025-09-03 05:56:12] [Rank 0] step:9681/10000 train_time:739368ms step_avg:76.37ms +[2025-09-03 05:56:13] [Rank 0] step:9701/10000 train_time:741036ms step_avg:76.39ms +[2025-09-03 05:56:13] [Rank 0] step:9701/10000 train_time:741036ms step_avg:76.39ms +[2025-09-03 05:56:15] [Rank 0] step:9721/10000 train_time:742680ms step_avg:76.40ms +[2025-09-03 05:56:15] [Rank 0] step:9721/10000 train_time:742680ms step_avg:76.40ms +[2025-09-03 05:56:17] [Rank 0] step:9741/10000 train_time:744349ms step_avg:76.41ms +[2025-09-03 05:56:17] [Rank 0] step:9741/10000 train_time:744349ms step_avg:76.41ms +[2025-09-03 05:56:18] [Rank 0] step:9761/10000 train_time:746002ms step_avg:76.43ms +[2025-09-03 05:56:18] [Rank 0] step:9761/10000 train_time:746002ms step_avg:76.43ms +[2025-09-03 05:56:20] [Rank 0] step:9781/10000 train_time:747668ms step_avg:76.44ms +[2025-09-03 05:56:20] [Rank 0] step:9781/10000 train_time:747668ms step_avg:76.44ms +[2025-09-03 05:56:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:56:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:56:33] [Rank 0] PRINT: step:9800/10000 val_loss:3.8361 svd_entropy: attn_qk:H=0.7584,top10E=0.25,eRank=159.8,q75/q25=111.06 attn_vo:H=0.8394,top10E=0.14,eRank=286.4,q75/q25=56.77 mlp_w1:H=0.7615,top10E=0.29,eRank=184.2,q75/q25=23.39 mlp_w2:H=0.8463,top10E=0.13,eRank=291.2,q75/q25=48.99 vo_prod:H=0.7556,top10E=0.22,eRank=160.6,q75/q25=3002.66 train_time:749503ms step_avg:76.48ms +[2025-09-03 05:56:33] [Rank 0] PRINT: step:9800/10000 val_loss:3.8361 svd_entropy: attn_qk:H=0.7584,top10E=0.25,eRank=159.8,q75/q25=111.06 attn_vo:H=0.8394,top10E=0.14,eRank=286.4,q75/q25=56.77 mlp_w1:H=0.7615,top10E=0.29,eRank=184.2,q75/q25=23.39 mlp_w2:H=0.8463,top10E=0.13,eRank=291.2,q75/q25=48.99 vo_prod:H=0.7556,top10E=0.22,eRank=160.6,q75/q25=3002.66 train_time:749503ms step_avg:76.48ms +[2025-09-03 05:56:33] [Rank 0] step:9801/10000 train_time:749514ms step_avg:76.47ms +[2025-09-03 05:56:33] [Rank 0] step:9801/10000 train_time:749514ms step_avg:76.47ms +[2025-09-03 05:56:35] [Rank 0] step:9821/10000 train_time:751013ms step_avg:76.47ms +[2025-09-03 05:56:35] [Rank 0] step:9821/10000 train_time:751013ms step_avg:76.47ms +[2025-09-03 05:56:37] [Rank 0] step:9841/10000 train_time:752682ms step_avg:76.48ms +[2025-09-03 05:56:37] [Rank 0] step:9841/10000 train_time:752682ms step_avg:76.48ms +[2025-09-03 05:56:38] [Rank 0] step:9861/10000 train_time:754325ms step_avg:76.50ms +[2025-09-03 05:56:38] [Rank 0] step:9861/10000 train_time:754325ms step_avg:76.50ms +[2025-09-03 05:56:40] [Rank 0] step:9881/10000 train_time:755964ms step_avg:76.51ms +[2025-09-03 05:56:40] [Rank 0] step:9881/10000 train_time:755964ms step_avg:76.51ms +[2025-09-03 05:56:42] [Rank 0] step:9901/10000 train_time:757620ms step_avg:76.52ms +[2025-09-03 05:56:42] [Rank 0] step:9901/10000 train_time:757620ms step_avg:76.52ms +[2025-09-03 05:56:43] [Rank 0] step:9921/10000 train_time:759271ms step_avg:76.53ms +[2025-09-03 05:56:43] [Rank 0] step:9921/10000 train_time:759271ms step_avg:76.53ms +[2025-09-03 05:56:45] [Rank 0] step:9941/10000 train_time:760930ms step_avg:76.54ms +[2025-09-03 05:56:45] [Rank 0] step:9941/10000 train_time:760930ms step_avg:76.54ms +[2025-09-03 05:56:47] [Rank 0] step:9961/10000 train_time:762585ms step_avg:76.56ms +[2025-09-03 05:56:47] [Rank 0] step:9961/10000 train_time:762585ms step_avg:76.56ms +[2025-09-03 05:56:48] [Rank 0] step:9981/10000 train_time:764237ms step_avg:76.57ms +[2025-09-03 05:56:48] [Rank 0] step:9981/10000 train_time:764237ms step_avg:76.57ms +[2025-09-03 05:56:50] [Rank 0] step:10000/10000 train_time:765815ms step_avg:76.58ms +[2025-09-03 05:56:50] [Rank 0] step:10000/10000 train_time:765815ms step_avg:76.58ms +[2025-09-03 05:56:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:56:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:57:02] [Rank 0] PRINT: step:10000/10000 val_loss:3.8305 svd_entropy: attn_qk:H=0.7586,top10E=0.25,eRank=160.0,q75/q25=111.04 attn_vo:H=0.8396,top10E=0.14,eRank=286.7,q75/q25=56.45 mlp_w1:H=0.7618,top10E=0.29,eRank=184.5,q75/q25=23.41 mlp_w2:H=0.8464,top10E=0.13,eRank=291.5,q75/q25=48.97 vo_prod:H=0.7559,top10E=0.22,eRank=160.9,q75/q25=2981.01 train_time:766070ms step_avg:76.61ms +[2025-09-03 05:57:02] [Rank 0] PRINT: step:10000/10000 val_loss:3.8305 svd_entropy: attn_qk:H=0.7586,top10E=0.25,eRank=160.0,q75/q25=111.04 attn_vo:H=0.8396,top10E=0.14,eRank=286.7,q75/q25=56.45 mlp_w1:H=0.7618,top10E=0.29,eRank=184.5,q75/q25=23.41 mlp_w2:H=0.8464,top10E=0.13,eRank=291.5,q75/q25=48.97 vo_prod:H=0.7559,top10E=0.22,eRank=160.9,q75/q25=2981.01 train_time:766070ms step_avg:76.61ms +[2025-09-03 05:57:02] [Rank 0] PRINT: --- Training Finished: Wed Sep 3 05:57:02 2025 --- +[2025-09-03 05:57:02] [Rank 0] PRINT: --- Training Finished: Wed Sep 3 05:57:02 2025 --- +[2025-09-03 05:57:02] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14356 MiB +[2025-09-03 05:57:02] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14356 MiB diff --git a/logs_svd_qkvo/mode_15_param_qkvo_seed_46/config.json b/logs_svd_qkvo/mode_15_param_qkvo_seed_46/config.json new file mode 100644 index 0000000000000000000000000000000000000000..94bb117abb008b548650f733e45e4806c311c59b --- /dev/null +++ b/logs_svd_qkvo/mode_15_param_qkvo_seed_46/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 46, + "optimizer_mode": 15, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "7b82ff61-cdb2-4712-abb9-0066149a19b9", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_15_param_qkvo_seed_46/training_log_7b82ff61-cdb2-4712-abb9-0066149a19b9.txt b/logs_svd_qkvo/mode_15_param_qkvo_seed_46/training_log_7b82ff61-cdb2-4712-abb9-0066149a19b9.txt new file mode 100644 index 0000000000000000000000000000000000000000..06fc628a95e6c4d82cf0b9103d93629480d4e58c --- /dev/null +++ b/logs_svd_qkvo/mode_15_param_qkvo_seed_46/training_log_7b82ff61-cdb2-4712-abb9-0066149a19b9.txt @@ -0,0 +1,2984 @@ +[2025-09-02 14:42:52] [Rank 0] PRINT: --- Script Start: Tue Sep 2 14:42:52 2025 --- +[2025-09-02 14:42:52] [Rank 0] PRINT: --- Script Start: Tue Sep 2 14:42:52 2025 --- +[2025-09-02 14:42:52] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=15, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 14:42:52] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=15, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 14:42:52] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 14:42:52] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 14:42:52] [Rank 0] PRINT: Using fixed seed: 46 +[2025-09-02 14:42:52] [Rank 0] PRINT: Using fixed seed: 46 +[2025-09-02 14:42:52] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_15_param_qkvo_seed_46 +[2025-09-02 14:42:52] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_15_param_qkvo_seed_46 +[2025-09-02 14:42:52] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 14:42:52] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 14:42:52] [Rank 0] PRINT: Constructing model... +[2025-09-02 14:42:52] [Rank 0] PRINT: Constructing model... +[2025-09-02 14:42:54] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 14:42:54] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 14:42:54] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 14:42:54] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 14:42:54] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 14:42:54] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 14:42:54] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 15 +[2025-09-02 14:42:54] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 15 +[2025-09-02 14:42:54] [Rank 0] PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 14:42:54] [Rank 0] PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 14:42:54] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 14:42:54] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 14:42:54] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 14:42:54] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 14:42:54] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 14:42:54] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 14:42:54] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 14:42:54] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 14:42:54] [Rank 0] PRINT: Starting warmup... +[2025-09-02 14:42:54] [Rank 0] PRINT: Starting warmup... +[2025-09-02 14:47:09] [Rank 0] PRINT: Warmup complete. +[2025-09-02 14:47:09] [Rank 0] PRINT: Warmup complete. +[2025-09-02 14:47:09] [Rank 0] PRINT: Starting training... +[2025-09-02 14:47:09] [Rank 0] PRINT: Starting training... +[2025-09-02 14:47:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:47:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:48:32] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.27 attn_vo:H=0.4624,top10E=0.02,eRank=233.0,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.7,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 14:48:32] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.27 attn_vo:H=0.4624,top10E=0.02,eRank=233.0,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.7,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 14:48:34] [Rank 0] step:21/10000 train_time:1306ms step_avg:62.19ms +[2025-09-02 14:48:34] [Rank 0] step:21/10000 train_time:1306ms step_avg:62.19ms +[2025-09-02 14:48:35] [Rank 0] step:41/10000 train_time:2703ms step_avg:65.94ms +[2025-09-02 14:48:35] [Rank 0] step:41/10000 train_time:2703ms step_avg:65.94ms +[2025-09-02 14:48:37] [Rank 0] step:61/10000 train_time:4108ms step_avg:67.34ms +[2025-09-02 14:48:37] [Rank 0] step:61/10000 train_time:4108ms step_avg:67.34ms +[2025-09-02 14:48:38] [Rank 0] step:81/10000 train_time:5512ms step_avg:68.05ms +[2025-09-02 14:48:38] [Rank 0] step:81/10000 train_time:5512ms step_avg:68.05ms +[2025-09-02 14:48:39] [Rank 0] step:101/10000 train_time:6954ms step_avg:68.86ms +[2025-09-02 14:48:39] [Rank 0] step:101/10000 train_time:6954ms step_avg:68.86ms +[2025-09-02 14:48:41] [Rank 0] step:121/10000 train_time:8360ms step_avg:69.09ms +[2025-09-02 14:48:41] [Rank 0] step:121/10000 train_time:8360ms step_avg:69.09ms +[2025-09-02 14:48:42] [Rank 0] step:141/10000 train_time:9769ms step_avg:69.28ms +[2025-09-02 14:48:42] [Rank 0] step:141/10000 train_time:9769ms step_avg:69.28ms +[2025-09-02 14:48:44] [Rank 0] step:161/10000 train_time:11180ms step_avg:69.44ms +[2025-09-02 14:48:44] [Rank 0] step:161/10000 train_time:11180ms step_avg:69.44ms +[2025-09-02 14:48:45] [Rank 0] step:181/10000 train_time:12591ms step_avg:69.56ms +[2025-09-02 14:48:45] [Rank 0] step:181/10000 train_time:12591ms step_avg:69.56ms +[2025-09-02 14:48:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:48:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:48:58] [Rank 0] PRINT: step:200/10000 val_loss:6.4395 svd_entropy: attn_qk:H=0.4318,top10E=0.81,eRank=35.3,q75/q25=12.23 attn_vo:H=0.5586,top10E=0.63,eRank=109.7,q75/q25=102.42 mlp_w1:H=0.4242,top10E=0.74,eRank=24.0,q75/q25=2.75 mlp_w2:H=0.1517,top10E=0.95,eRank=4.2,q75/q25=592.66 vo_prod:H=0.2777,top10E=0.96,eRank=7.7,q75/q25=662.76 train_time:14145ms step_avg:70.73ms +[2025-09-02 14:48:58] [Rank 0] PRINT: step:200/10000 val_loss:6.4395 svd_entropy: attn_qk:H=0.4318,top10E=0.81,eRank=35.3,q75/q25=12.23 attn_vo:H=0.5586,top10E=0.63,eRank=109.7,q75/q25=102.42 mlp_w1:H=0.4242,top10E=0.74,eRank=24.0,q75/q25=2.75 mlp_w2:H=0.1517,top10E=0.95,eRank=4.2,q75/q25=592.66 vo_prod:H=0.2777,top10E=0.96,eRank=7.7,q75/q25=662.76 train_time:14145ms step_avg:70.73ms +[2025-09-02 14:48:58] [Rank 0] step:201/10000 train_time:14157ms step_avg:70.43ms +[2025-09-02 14:48:58] [Rank 0] step:201/10000 train_time:14157ms step_avg:70.43ms +[2025-09-02 14:49:00] [Rank 0] step:221/10000 train_time:15432ms step_avg:69.83ms +[2025-09-02 14:49:00] [Rank 0] step:221/10000 train_time:15432ms step_avg:69.83ms +[2025-09-02 14:49:01] [Rank 0] step:241/10000 train_time:16839ms step_avg:69.87ms +[2025-09-02 14:49:01] [Rank 0] step:241/10000 train_time:16839ms step_avg:69.87ms +[2025-09-02 14:49:03] [Rank 0] step:261/10000 train_time:18248ms step_avg:69.92ms +[2025-09-02 14:49:03] [Rank 0] step:261/10000 train_time:18248ms step_avg:69.92ms +[2025-09-02 14:49:04] [Rank 0] step:281/10000 train_time:19658ms step_avg:69.96ms +[2025-09-02 14:49:04] [Rank 0] step:281/10000 train_time:19658ms step_avg:69.96ms +[2025-09-02 14:49:05] [Rank 0] step:301/10000 train_time:21070ms step_avg:70.00ms +[2025-09-02 14:49:05] [Rank 0] step:301/10000 train_time:21070ms step_avg:70.00ms +[2025-09-02 14:49:07] [Rank 0] step:321/10000 train_time:22482ms step_avg:70.04ms +[2025-09-02 14:49:07] [Rank 0] step:321/10000 train_time:22482ms step_avg:70.04ms +[2025-09-02 14:49:08] [Rank 0] step:341/10000 train_time:23892ms step_avg:70.07ms +[2025-09-02 14:49:08] [Rank 0] step:341/10000 train_time:23892ms step_avg:70.07ms +[2025-09-02 14:49:10] [Rank 0] step:361/10000 train_time:25304ms step_avg:70.09ms +[2025-09-02 14:49:10] [Rank 0] step:361/10000 train_time:25304ms step_avg:70.09ms +[2025-09-02 14:49:11] [Rank 0] step:381/10000 train_time:26715ms step_avg:70.12ms +[2025-09-02 14:49:11] [Rank 0] step:381/10000 train_time:26715ms step_avg:70.12ms +[2025-09-02 14:49:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:49:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:49:24] [Rank 0] PRINT: step:400/10000 val_loss:5.9619 svd_entropy: attn_qk:H=0.4960,top10E=0.71,eRank=43.8,q75/q25=13.64 attn_vo:H=0.5872,top10E=0.52,eRank=90.4,q75/q25=37.88 mlp_w1:H=0.4631,top10E=0.68,eRank=38.3,q75/q25=3.26 mlp_w2:H=0.5632,top10E=0.56,eRank=44.2,q75/q25=11.38 vo_prod:H=0.4174,top10E=0.81,eRank=17.5,q75/q25=275.89 train_time:28268ms step_avg:70.67ms +[2025-09-02 14:49:24] [Rank 0] PRINT: step:400/10000 val_loss:5.9619 svd_entropy: attn_qk:H=0.4960,top10E=0.71,eRank=43.8,q75/q25=13.64 attn_vo:H=0.5872,top10E=0.52,eRank=90.4,q75/q25=37.88 mlp_w1:H=0.4631,top10E=0.68,eRank=38.3,q75/q25=3.26 mlp_w2:H=0.5632,top10E=0.56,eRank=44.2,q75/q25=11.38 vo_prod:H=0.4174,top10E=0.81,eRank=17.5,q75/q25=275.89 train_time:28268ms step_avg:70.67ms +[2025-09-02 14:49:24] [Rank 0] step:401/10000 train_time:28280ms step_avg:70.52ms +[2025-09-02 14:49:24] [Rank 0] step:401/10000 train_time:28280ms step_avg:70.52ms +[2025-09-02 14:49:26] [Rank 0] step:421/10000 train_time:29554ms step_avg:70.20ms +[2025-09-02 14:49:26] [Rank 0] step:421/10000 train_time:29554ms step_avg:70.20ms +[2025-09-02 14:49:27] [Rank 0] step:441/10000 train_time:30962ms step_avg:70.21ms +[2025-09-02 14:49:27] [Rank 0] step:441/10000 train_time:30962ms step_avg:70.21ms +[2025-09-02 14:49:29] [Rank 0] step:461/10000 train_time:32369ms step_avg:70.22ms +[2025-09-02 14:49:29] [Rank 0] step:461/10000 train_time:32369ms step_avg:70.22ms +[2025-09-02 14:49:30] [Rank 0] step:481/10000 train_time:33779ms step_avg:70.23ms +[2025-09-02 14:49:30] [Rank 0] step:481/10000 train_time:33779ms step_avg:70.23ms +[2025-09-02 14:49:31] [Rank 0] step:501/10000 train_time:35187ms step_avg:70.23ms +[2025-09-02 14:49:31] [Rank 0] step:501/10000 train_time:35187ms step_avg:70.23ms +[2025-09-02 14:49:33] [Rank 0] step:521/10000 train_time:36599ms step_avg:70.25ms +[2025-09-02 14:49:33] [Rank 0] step:521/10000 train_time:36599ms step_avg:70.25ms +[2025-09-02 14:49:34] [Rank 0] step:541/10000 train_time:38012ms step_avg:70.26ms +[2025-09-02 14:49:34] [Rank 0] step:541/10000 train_time:38012ms step_avg:70.26ms +[2025-09-02 14:49:36] [Rank 0] step:561/10000 train_time:39423ms step_avg:70.27ms +[2025-09-02 14:49:36] [Rank 0] step:561/10000 train_time:39423ms step_avg:70.27ms +[2025-09-02 14:49:37] [Rank 0] step:581/10000 train_time:40834ms step_avg:70.28ms +[2025-09-02 14:49:37] [Rank 0] step:581/10000 train_time:40834ms step_avg:70.28ms +[2025-09-02 14:49:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:49:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:49:50] [Rank 0] PRINT: step:600/10000 val_loss:5.6652 svd_entropy: attn_qk:H=0.5358,top10E=0.63,eRank=51.2,q75/q25=15.33 attn_vo:H=0.6192,top10E=0.44,eRank=97.8,q75/q25=28.22 mlp_w1:H=0.4994,top10E=0.63,eRank=48.7,q75/q25=3.59 mlp_w2:H=0.6486,top10E=0.43,eRank=76.7,q75/q25=9.50 vo_prod:H=0.4837,top10E=0.67,eRank=26.6,q75/q25=244.31 train_time:42387ms step_avg:70.65ms +[2025-09-02 14:49:50] [Rank 0] PRINT: step:600/10000 val_loss:5.6652 svd_entropy: attn_qk:H=0.5358,top10E=0.63,eRank=51.2,q75/q25=15.33 attn_vo:H=0.6192,top10E=0.44,eRank=97.8,q75/q25=28.22 mlp_w1:H=0.4994,top10E=0.63,eRank=48.7,q75/q25=3.59 mlp_w2:H=0.6486,top10E=0.43,eRank=76.7,q75/q25=9.50 vo_prod:H=0.4837,top10E=0.67,eRank=26.6,q75/q25=244.31 train_time:42387ms step_avg:70.65ms +[2025-09-02 14:49:50] [Rank 0] step:601/10000 train_time:42399ms step_avg:70.55ms +[2025-09-02 14:49:50] [Rank 0] step:601/10000 train_time:42399ms step_avg:70.55ms +[2025-09-02 14:49:52] [Rank 0] step:621/10000 train_time:43673ms step_avg:70.33ms +[2025-09-02 14:49:52] [Rank 0] step:621/10000 train_time:43673ms step_avg:70.33ms +[2025-09-02 14:49:53] [Rank 0] step:641/10000 train_time:45081ms step_avg:70.33ms +[2025-09-02 14:49:53] [Rank 0] step:641/10000 train_time:45081ms step_avg:70.33ms +[2025-09-02 14:49:55] [Rank 0] step:661/10000 train_time:46490ms step_avg:70.33ms +[2025-09-02 14:49:55] [Rank 0] step:661/10000 train_time:46490ms step_avg:70.33ms +[2025-09-02 14:49:56] [Rank 0] step:681/10000 train_time:47900ms step_avg:70.34ms +[2025-09-02 14:49:56] [Rank 0] step:681/10000 train_time:47900ms step_avg:70.34ms +[2025-09-02 14:49:58] [Rank 0] step:701/10000 train_time:49312ms step_avg:70.35ms +[2025-09-02 14:49:58] [Rank 0] step:701/10000 train_time:49312ms step_avg:70.35ms +[2025-09-02 14:49:59] [Rank 0] step:721/10000 train_time:50723ms step_avg:70.35ms +[2025-09-02 14:49:59] [Rank 0] step:721/10000 train_time:50723ms step_avg:70.35ms +[2025-09-02 14:50:00] [Rank 0] step:741/10000 train_time:52135ms step_avg:70.36ms +[2025-09-02 14:50:00] [Rank 0] step:741/10000 train_time:52135ms step_avg:70.36ms +[2025-09-02 14:50:02] [Rank 0] step:761/10000 train_time:53560ms step_avg:70.38ms +[2025-09-02 14:50:02] [Rank 0] step:761/10000 train_time:53560ms step_avg:70.38ms +[2025-09-02 14:50:03] [Rank 0] step:781/10000 train_time:54987ms step_avg:70.41ms +[2025-09-02 14:50:03] [Rank 0] step:781/10000 train_time:54987ms step_avg:70.41ms +[2025-09-02 14:50:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:50:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:50:16] [Rank 0] PRINT: step:800/10000 val_loss:5.4458 svd_entropy: attn_qk:H=0.5631,top10E=0.57,eRank=57.2,q75/q25=17.43 attn_vo:H=0.6457,top10E=0.39,eRank=107.1,q75/q25=28.39 mlp_w1:H=0.5326,top10E=0.59,eRank=57.5,q75/q25=3.89 mlp_w2:H=0.6989,top10E=0.35,eRank=105.2,q75/q25=8.86 vo_prod:H=0.5264,top10E=0.59,eRank=34.9,q75/q25=344.71 train_time:56556ms step_avg:70.70ms +[2025-09-02 14:50:16] [Rank 0] PRINT: step:800/10000 val_loss:5.4458 svd_entropy: attn_qk:H=0.5631,top10E=0.57,eRank=57.2,q75/q25=17.43 attn_vo:H=0.6457,top10E=0.39,eRank=107.1,q75/q25=28.39 mlp_w1:H=0.5326,top10E=0.59,eRank=57.5,q75/q25=3.89 mlp_w2:H=0.6989,top10E=0.35,eRank=105.2,q75/q25=8.86 vo_prod:H=0.5264,top10E=0.59,eRank=34.9,q75/q25=344.71 train_time:56556ms step_avg:70.70ms +[2025-09-02 14:50:17] [Rank 0] step:801/10000 train_time:56568ms step_avg:70.62ms +[2025-09-02 14:50:17] [Rank 0] step:801/10000 train_time:56568ms step_avg:70.62ms +[2025-09-02 14:50:18] [Rank 0] step:821/10000 train_time:57880ms step_avg:70.50ms +[2025-09-02 14:50:18] [Rank 0] step:821/10000 train_time:57880ms step_avg:70.50ms +[2025-09-02 14:50:19] [Rank 0] step:841/10000 train_time:59304ms step_avg:70.52ms +[2025-09-02 14:50:19] [Rank 0] step:841/10000 train_time:59304ms step_avg:70.52ms +[2025-09-02 14:50:21] [Rank 0] step:861/10000 train_time:60731ms step_avg:70.54ms +[2025-09-02 14:50:21] [Rank 0] step:861/10000 train_time:60731ms step_avg:70.54ms +[2025-09-02 14:50:22] [Rank 0] step:881/10000 train_time:62158ms step_avg:70.55ms +[2025-09-02 14:50:22] [Rank 0] step:881/10000 train_time:62158ms step_avg:70.55ms +[2025-09-02 14:50:24] [Rank 0] step:901/10000 train_time:63586ms step_avg:70.57ms +[2025-09-02 14:50:24] [Rank 0] step:901/10000 train_time:63586ms step_avg:70.57ms +[2025-09-02 14:50:25] [Rank 0] step:921/10000 train_time:65013ms step_avg:70.59ms +[2025-09-02 14:50:25] [Rank 0] step:921/10000 train_time:65013ms step_avg:70.59ms +[2025-09-02 14:50:27] [Rank 0] step:941/10000 train_time:66440ms step_avg:70.61ms +[2025-09-02 14:50:27] [Rank 0] step:941/10000 train_time:66440ms step_avg:70.61ms +[2025-09-02 14:50:28] [Rank 0] step:961/10000 train_time:67867ms step_avg:70.62ms +[2025-09-02 14:50:28] [Rank 0] step:961/10000 train_time:67867ms step_avg:70.62ms +[2025-09-02 14:50:29] [Rank 0] step:981/10000 train_time:69294ms step_avg:70.64ms +[2025-09-02 14:50:29] [Rank 0] step:981/10000 train_time:69294ms step_avg:70.64ms +[2025-09-02 14:50:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:50:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:50:43] [Rank 0] PRINT: step:1000/10000 val_loss:5.2850 svd_entropy: attn_qk:H=0.5848,top10E=0.53,eRank=62.8,q75/q25=19.95 attn_vo:H=0.6676,top10E=0.35,eRank=117.1,q75/q25=34.28 mlp_w1:H=0.5565,top10E=0.56,eRank=64.4,q75/q25=4.21 mlp_w2:H=0.7293,top10E=0.30,eRank=127.8,q75/q25=9.55 vo_prod:H=0.5552,top10E=0.52,eRank=42.2,q75/q25=685.29 train_time:70866ms step_avg:70.87ms +[2025-09-02 14:50:43] [Rank 0] PRINT: step:1000/10000 val_loss:5.2850 svd_entropy: attn_qk:H=0.5848,top10E=0.53,eRank=62.8,q75/q25=19.95 attn_vo:H=0.6676,top10E=0.35,eRank=117.1,q75/q25=34.28 mlp_w1:H=0.5565,top10E=0.56,eRank=64.4,q75/q25=4.21 mlp_w2:H=0.7293,top10E=0.30,eRank=127.8,q75/q25=9.55 vo_prod:H=0.5552,top10E=0.52,eRank=42.2,q75/q25=685.29 train_time:70866ms step_avg:70.87ms +[2025-09-02 14:50:43] [Rank 0] step:1001/10000 train_time:70877ms step_avg:70.81ms +[2025-09-02 14:50:43] [Rank 0] step:1001/10000 train_time:70877ms step_avg:70.81ms +[2025-09-02 14:50:44] [Rank 0] step:1021/10000 train_time:72166ms step_avg:70.68ms +[2025-09-02 14:50:44] [Rank 0] step:1021/10000 train_time:72166ms step_avg:70.68ms +[2025-09-02 14:50:46] [Rank 0] step:1041/10000 train_time:73589ms step_avg:70.69ms +[2025-09-02 14:50:46] [Rank 0] step:1041/10000 train_time:73589ms step_avg:70.69ms +[2025-09-02 14:50:47] [Rank 0] step:1061/10000 train_time:75015ms step_avg:70.70ms +[2025-09-02 14:50:47] [Rank 0] step:1061/10000 train_time:75015ms step_avg:70.70ms +[2025-09-02 14:50:48] [Rank 0] step:1081/10000 train_time:76440ms step_avg:70.71ms +[2025-09-02 14:50:48] [Rank 0] step:1081/10000 train_time:76440ms step_avg:70.71ms +[2025-09-02 14:50:50] [Rank 0] step:1101/10000 train_time:77864ms step_avg:70.72ms +[2025-09-02 14:50:50] [Rank 0] step:1101/10000 train_time:77864ms step_avg:70.72ms +[2025-09-02 14:50:51] [Rank 0] step:1121/10000 train_time:79291ms step_avg:70.73ms +[2025-09-02 14:50:51] [Rank 0] step:1121/10000 train_time:79291ms step_avg:70.73ms +[2025-09-02 14:50:53] [Rank 0] step:1141/10000 train_time:80719ms step_avg:70.74ms +[2025-09-02 14:50:53] [Rank 0] step:1141/10000 train_time:80719ms step_avg:70.74ms +[2025-09-02 14:50:54] [Rank 0] step:1161/10000 train_time:82147ms step_avg:70.75ms +[2025-09-02 14:50:54] [Rank 0] step:1161/10000 train_time:82147ms step_avg:70.75ms +[2025-09-02 14:50:56] [Rank 0] step:1181/10000 train_time:83573ms step_avg:70.76ms +[2025-09-02 14:50:56] [Rank 0] step:1181/10000 train_time:83573ms step_avg:70.76ms +[2025-09-02 14:50:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:50:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:51:09] [Rank 0] PRINT: step:1200/10000 val_loss:5.1448 svd_entropy: attn_qk:H=0.6024,top10E=0.50,eRank=68.3,q75/q25=23.03 attn_vo:H=0.6868,top10E=0.32,eRank=127.3,q75/q25=44.47 mlp_w1:H=0.5785,top10E=0.54,eRank=71.2,q75/q25=4.55 mlp_w2:H=0.7499,top10E=0.27,eRank=146.6,q75/q25=10.94 vo_prod:H=0.5785,top10E=0.48,eRank=49.2,q75/q25=1431.15 train_time:85143ms step_avg:70.95ms +[2025-09-02 14:51:09] [Rank 0] PRINT: step:1200/10000 val_loss:5.1448 svd_entropy: attn_qk:H=0.6024,top10E=0.50,eRank=68.3,q75/q25=23.03 attn_vo:H=0.6868,top10E=0.32,eRank=127.3,q75/q25=44.47 mlp_w1:H=0.5785,top10E=0.54,eRank=71.2,q75/q25=4.55 mlp_w2:H=0.7499,top10E=0.27,eRank=146.6,q75/q25=10.94 vo_prod:H=0.5785,top10E=0.48,eRank=49.2,q75/q25=1431.15 train_time:85143ms step_avg:70.95ms +[2025-09-02 14:51:09] [Rank 0] step:1201/10000 train_time:85154ms step_avg:70.90ms +[2025-09-02 14:51:09] [Rank 0] step:1201/10000 train_time:85154ms step_avg:70.90ms +[2025-09-02 14:51:10] [Rank 0] step:1221/10000 train_time:86461ms step_avg:70.81ms +[2025-09-02 14:51:10] [Rank 0] step:1221/10000 train_time:86461ms step_avg:70.81ms +[2025-09-02 14:51:12] [Rank 0] step:1241/10000 train_time:87884ms step_avg:70.82ms +[2025-09-02 14:51:12] [Rank 0] step:1241/10000 train_time:87884ms step_avg:70.82ms +[2025-09-02 14:51:13] [Rank 0] step:1261/10000 train_time:89309ms step_avg:70.82ms +[2025-09-02 14:51:13] [Rank 0] step:1261/10000 train_time:89309ms step_avg:70.82ms +[2025-09-02 14:51:15] [Rank 0] step:1281/10000 train_time:90734ms step_avg:70.83ms +[2025-09-02 14:51:15] [Rank 0] step:1281/10000 train_time:90734ms step_avg:70.83ms +[2025-09-02 14:51:16] [Rank 0] step:1301/10000 train_time:92160ms step_avg:70.84ms +[2025-09-02 14:51:16] [Rank 0] step:1301/10000 train_time:92160ms step_avg:70.84ms +[2025-09-02 14:51:17] [Rank 0] step:1321/10000 train_time:93587ms step_avg:70.85ms +[2025-09-02 14:51:17] [Rank 0] step:1321/10000 train_time:93587ms step_avg:70.85ms +[2025-09-02 14:51:19] [Rank 0] step:1341/10000 train_time:95014ms step_avg:70.85ms +[2025-09-02 14:51:19] [Rank 0] step:1341/10000 train_time:95014ms step_avg:70.85ms +[2025-09-02 14:51:20] [Rank 0] step:1361/10000 train_time:96443ms step_avg:70.86ms +[2025-09-02 14:51:20] [Rank 0] step:1361/10000 train_time:96443ms step_avg:70.86ms +[2025-09-02 14:51:22] [Rank 0] step:1381/10000 train_time:97870ms step_avg:70.87ms +[2025-09-02 14:51:22] [Rank 0] step:1381/10000 train_time:97870ms step_avg:70.87ms +[2025-09-02 14:51:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:51:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:51:35] [Rank 0] PRINT: step:1400/10000 val_loss:5.0187 svd_entropy: attn_qk:H=0.6175,top10E=0.47,eRank=73.7,q75/q25=27.22 attn_vo:H=0.7029,top10E=0.30,eRank=137.3,q75/q25=55.99 mlp_w1:H=0.5977,top10E=0.51,eRank=77.8,q75/q25=4.96 mlp_w2:H=0.7665,top10E=0.25,eRank=163.8,q75/q25=12.68 vo_prod:H=0.5967,top10E=0.44,eRank=55.5,q75/q25=2562.05 train_time:99441ms step_avg:71.03ms +[2025-09-02 14:51:35] [Rank 0] PRINT: step:1400/10000 val_loss:5.0187 svd_entropy: attn_qk:H=0.6175,top10E=0.47,eRank=73.7,q75/q25=27.22 attn_vo:H=0.7029,top10E=0.30,eRank=137.3,q75/q25=55.99 mlp_w1:H=0.5977,top10E=0.51,eRank=77.8,q75/q25=4.96 mlp_w2:H=0.7665,top10E=0.25,eRank=163.8,q75/q25=12.68 vo_prod:H=0.5967,top10E=0.44,eRank=55.5,q75/q25=2562.05 train_time:99441ms step_avg:71.03ms +[2025-09-02 14:51:35] [Rank 0] step:1401/10000 train_time:99453ms step_avg:70.99ms +[2025-09-02 14:51:35] [Rank 0] step:1401/10000 train_time:99453ms step_avg:70.99ms +[2025-09-02 14:51:36] [Rank 0] step:1421/10000 train_time:100756ms step_avg:70.91ms +[2025-09-02 14:51:36] [Rank 0] step:1421/10000 train_time:100756ms step_avg:70.91ms +[2025-09-02 14:51:38] [Rank 0] step:1441/10000 train_time:102180ms step_avg:70.91ms +[2025-09-02 14:51:38] [Rank 0] step:1441/10000 train_time:102180ms step_avg:70.91ms +[2025-09-02 14:51:39] [Rank 0] step:1461/10000 train_time:103607ms step_avg:70.92ms +[2025-09-02 14:51:39] [Rank 0] step:1461/10000 train_time:103607ms step_avg:70.92ms +[2025-09-02 14:51:41] [Rank 0] step:1481/10000 train_time:105033ms step_avg:70.92ms +[2025-09-02 14:51:41] [Rank 0] step:1481/10000 train_time:105033ms step_avg:70.92ms +[2025-09-02 14:51:42] [Rank 0] step:1501/10000 train_time:106470ms step_avg:70.93ms +[2025-09-02 14:51:42] [Rank 0] step:1501/10000 train_time:106470ms step_avg:70.93ms +[2025-09-02 14:51:44] [Rank 0] step:1521/10000 train_time:107907ms step_avg:70.94ms +[2025-09-02 14:51:44] [Rank 0] step:1521/10000 train_time:107907ms step_avg:70.94ms +[2025-09-02 14:51:45] [Rank 0] step:1541/10000 train_time:109346ms step_avg:70.96ms +[2025-09-02 14:51:45] [Rank 0] step:1541/10000 train_time:109346ms step_avg:70.96ms +[2025-09-02 14:51:47] [Rank 0] step:1561/10000 train_time:110784ms step_avg:70.97ms +[2025-09-02 14:51:47] [Rank 0] step:1561/10000 train_time:110784ms step_avg:70.97ms +[2025-09-02 14:51:48] [Rank 0] step:1581/10000 train_time:112222ms step_avg:70.98ms +[2025-09-02 14:51:48] [Rank 0] step:1581/10000 train_time:112222ms step_avg:70.98ms +[2025-09-02 14:51:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:51:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:52:01] [Rank 0] PRINT: step:1600/10000 val_loss:4.8829 svd_entropy: attn_qk:H=0.6298,top10E=0.45,eRank=78.2,q75/q25=32.47 attn_vo:H=0.7169,top10E=0.28,eRank=147.0,q75/q25=68.06 mlp_w1:H=0.6144,top10E=0.49,eRank=84.4,q75/q25=5.41 mlp_w2:H=0.7797,top10E=0.23,eRank=178.9,q75/q25=14.29 vo_prod:H=0.6119,top10E=0.42,eRank=61.3,q75/q25=4097.38 train_time:113826ms step_avg:71.14ms +[2025-09-02 14:52:01] [Rank 0] PRINT: step:1600/10000 val_loss:4.8829 svd_entropy: attn_qk:H=0.6298,top10E=0.45,eRank=78.2,q75/q25=32.47 attn_vo:H=0.7169,top10E=0.28,eRank=147.0,q75/q25=68.06 mlp_w1:H=0.6144,top10E=0.49,eRank=84.4,q75/q25=5.41 mlp_w2:H=0.7797,top10E=0.23,eRank=178.9,q75/q25=14.29 vo_prod:H=0.6119,top10E=0.42,eRank=61.3,q75/q25=4097.38 train_time:113826ms step_avg:71.14ms +[2025-09-02 14:52:01] [Rank 0] step:1601/10000 train_time:113838ms step_avg:71.10ms +[2025-09-02 14:52:01] [Rank 0] step:1601/10000 train_time:113838ms step_avg:71.10ms +[2025-09-02 14:52:03] [Rank 0] step:1621/10000 train_time:115156ms step_avg:71.04ms +[2025-09-02 14:52:03] [Rank 0] step:1621/10000 train_time:115156ms step_avg:71.04ms +[2025-09-02 14:52:04] [Rank 0] step:1641/10000 train_time:116596ms step_avg:71.05ms +[2025-09-02 14:52:04] [Rank 0] step:1641/10000 train_time:116596ms step_avg:71.05ms +[2025-09-02 14:52:06] [Rank 0] step:1661/10000 train_time:118035ms step_avg:71.06ms +[2025-09-02 14:52:06] [Rank 0] step:1661/10000 train_time:118035ms step_avg:71.06ms +[2025-09-02 14:52:07] [Rank 0] step:1681/10000 train_time:119474ms step_avg:71.07ms +[2025-09-02 14:52:07] [Rank 0] step:1681/10000 train_time:119474ms step_avg:71.07ms +[2025-09-02 14:52:09] [Rank 0] step:1701/10000 train_time:120914ms step_avg:71.08ms +[2025-09-02 14:52:09] [Rank 0] step:1701/10000 train_time:120914ms step_avg:71.08ms +[2025-09-02 14:52:10] [Rank 0] step:1721/10000 train_time:122352ms step_avg:71.09ms +[2025-09-02 14:52:10] [Rank 0] step:1721/10000 train_time:122352ms step_avg:71.09ms +[2025-09-02 14:52:11] [Rank 0] step:1741/10000 train_time:123791ms step_avg:71.10ms +[2025-09-02 14:52:11] [Rank 0] step:1741/10000 train_time:123791ms step_avg:71.10ms +[2025-09-02 14:52:13] [Rank 0] step:1761/10000 train_time:125230ms step_avg:71.11ms +[2025-09-02 14:52:13] [Rank 0] step:1761/10000 train_time:125230ms step_avg:71.11ms +[2025-09-02 14:52:14] [Rank 0] step:1781/10000 train_time:126673ms step_avg:71.12ms +[2025-09-02 14:52:14] [Rank 0] step:1781/10000 train_time:126673ms step_avg:71.12ms +[2025-09-02 14:52:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:52:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:52:27] [Rank 0] PRINT: step:1800/10000 val_loss:4.7818 svd_entropy: attn_qk:H=0.6406,top10E=0.43,eRank=82.7,q75/q25=37.93 attn_vo:H=0.7286,top10E=0.26,eRank=156.0,q75/q25=76.92 mlp_w1:H=0.6289,top10E=0.47,eRank=90.5,q75/q25=5.91 mlp_w2:H=0.7904,top10E=0.21,eRank=192.3,q75/q25=15.93 vo_prod:H=0.6250,top10E=0.40,eRank=66.8,q75/q25=5634.40 train_time:128257ms step_avg:71.25ms +[2025-09-02 14:52:27] [Rank 0] PRINT: step:1800/10000 val_loss:4.7818 svd_entropy: attn_qk:H=0.6406,top10E=0.43,eRank=82.7,q75/q25=37.93 attn_vo:H=0.7286,top10E=0.26,eRank=156.0,q75/q25=76.92 mlp_w1:H=0.6289,top10E=0.47,eRank=90.5,q75/q25=5.91 mlp_w2:H=0.7904,top10E=0.21,eRank=192.3,q75/q25=15.93 vo_prod:H=0.6250,top10E=0.40,eRank=66.8,q75/q25=5634.40 train_time:128257ms step_avg:71.25ms +[2025-09-02 14:52:28] [Rank 0] step:1801/10000 train_time:128269ms step_avg:71.22ms +[2025-09-02 14:52:28] [Rank 0] step:1801/10000 train_time:128269ms step_avg:71.22ms +[2025-09-02 14:52:29] [Rank 0] step:1821/10000 train_time:129588ms step_avg:71.16ms +[2025-09-02 14:52:29] [Rank 0] step:1821/10000 train_time:129588ms step_avg:71.16ms +[2025-09-02 14:52:31] [Rank 0] step:1841/10000 train_time:131123ms step_avg:71.22ms +[2025-09-02 14:52:31] [Rank 0] step:1841/10000 train_time:131123ms step_avg:71.22ms +[2025-09-02 14:52:32] [Rank 0] step:1861/10000 train_time:132559ms step_avg:71.23ms +[2025-09-02 14:52:32] [Rank 0] step:1861/10000 train_time:132559ms step_avg:71.23ms +[2025-09-02 14:52:33] [Rank 0] step:1881/10000 train_time:133997ms step_avg:71.24ms +[2025-09-02 14:52:33] [Rank 0] step:1881/10000 train_time:133997ms step_avg:71.24ms +[2025-09-02 14:52:35] [Rank 0] step:1901/10000 train_time:135436ms step_avg:71.24ms +[2025-09-02 14:52:35] [Rank 0] step:1901/10000 train_time:135436ms step_avg:71.24ms +[2025-09-02 14:52:36] [Rank 0] step:1921/10000 train_time:136873ms step_avg:71.25ms +[2025-09-02 14:52:36] [Rank 0] step:1921/10000 train_time:136873ms step_avg:71.25ms +[2025-09-02 14:52:38] [Rank 0] step:1941/10000 train_time:138311ms step_avg:71.26ms +[2025-09-02 14:52:38] [Rank 0] step:1941/10000 train_time:138311ms step_avg:71.26ms +[2025-09-02 14:52:39] [Rank 0] step:1961/10000 train_time:139749ms step_avg:71.26ms +[2025-09-02 14:52:39] [Rank 0] step:1961/10000 train_time:139749ms step_avg:71.26ms +[2025-09-02 14:52:41] [Rank 0] step:1981/10000 train_time:141189ms step_avg:71.27ms +[2025-09-02 14:52:41] [Rank 0] step:1981/10000 train_time:141189ms step_avg:71.27ms +[2025-09-02 14:52:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:52:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:52:54] [Rank 0] PRINT: step:2000/10000 val_loss:4.7123 svd_entropy: attn_qk:H=0.6499,top10E=0.41,eRank=86.8,q75/q25=43.91 attn_vo:H=0.7389,top10E=0.25,eRank=164.5,q75/q25=84.93 mlp_w1:H=0.6417,top10E=0.46,eRank=96.2,q75/q25=6.40 mlp_w2:H=0.7989,top10E=0.20,eRank=203.7,q75/q25=17.48 vo_prod:H=0.6365,top10E=0.38,eRank=72.2,q75/q25=7481.72 train_time:142773ms step_avg:71.39ms +[2025-09-02 14:52:54] [Rank 0] PRINT: step:2000/10000 val_loss:4.7123 svd_entropy: attn_qk:H=0.6499,top10E=0.41,eRank=86.8,q75/q25=43.91 attn_vo:H=0.7389,top10E=0.25,eRank=164.5,q75/q25=84.93 mlp_w1:H=0.6417,top10E=0.46,eRank=96.2,q75/q25=6.40 mlp_w2:H=0.7989,top10E=0.20,eRank=203.7,q75/q25=17.48 vo_prod:H=0.6365,top10E=0.38,eRank=72.2,q75/q25=7481.72 train_time:142773ms step_avg:71.39ms +[2025-09-02 14:52:54] [Rank 0] step:2001/10000 train_time:142785ms step_avg:71.36ms +[2025-09-02 14:52:54] [Rank 0] step:2001/10000 train_time:142785ms step_avg:71.36ms +[2025-09-02 14:52:55] [Rank 0] step:2021/10000 train_time:144098ms step_avg:71.30ms +[2025-09-02 14:52:55] [Rank 0] step:2021/10000 train_time:144098ms step_avg:71.30ms +[2025-09-02 14:52:57] [Rank 0] step:2041/10000 train_time:145663ms step_avg:71.37ms +[2025-09-02 14:52:57] [Rank 0] step:2041/10000 train_time:145663ms step_avg:71.37ms +[2025-09-02 14:52:58] [Rank 0] step:2061/10000 train_time:147104ms step_avg:71.38ms +[2025-09-02 14:52:58] [Rank 0] step:2061/10000 train_time:147104ms step_avg:71.38ms +[2025-09-02 14:53:00] [Rank 0] step:2081/10000 train_time:148541ms step_avg:71.38ms +[2025-09-02 14:53:00] [Rank 0] step:2081/10000 train_time:148541ms step_avg:71.38ms +[2025-09-02 14:53:01] [Rank 0] step:2101/10000 train_time:149977ms step_avg:71.38ms +[2025-09-02 14:53:01] [Rank 0] step:2101/10000 train_time:149977ms step_avg:71.38ms +[2025-09-02 14:53:03] [Rank 0] step:2121/10000 train_time:151414ms step_avg:71.39ms +[2025-09-02 14:53:03] [Rank 0] step:2121/10000 train_time:151414ms step_avg:71.39ms +[2025-09-02 14:53:04] [Rank 0] step:2141/10000 train_time:152851ms step_avg:71.39ms +[2025-09-02 14:53:04] [Rank 0] step:2141/10000 train_time:152851ms step_avg:71.39ms +[2025-09-02 14:53:06] [Rank 0] step:2161/10000 train_time:154291ms step_avg:71.40ms +[2025-09-02 14:53:06] [Rank 0] step:2161/10000 train_time:154291ms step_avg:71.40ms +[2025-09-02 14:53:07] [Rank 0] step:2181/10000 train_time:155728ms step_avg:71.40ms +[2025-09-02 14:53:07] [Rank 0] step:2181/10000 train_time:155728ms step_avg:71.40ms +[2025-09-02 14:53:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:53:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:53:20] [Rank 0] PRINT: step:2200/10000 val_loss:4.6375 svd_entropy: attn_qk:H=0.6580,top10E=0.40,eRank=90.6,q75/q25=49.82 attn_vo:H=0.7474,top10E=0.24,eRank=172.1,q75/q25=90.30 mlp_w1:H=0.6531,top10E=0.44,eRank=101.7,q75/q25=6.96 mlp_w2:H=0.8059,top10E=0.19,eRank=213.6,q75/q25=18.91 vo_prod:H=0.6463,top10E=0.36,eRank=77.0,q75/q25=9019.41 train_time:157308ms step_avg:71.50ms +[2025-09-02 14:53:20] [Rank 0] PRINT: step:2200/10000 val_loss:4.6375 svd_entropy: attn_qk:H=0.6580,top10E=0.40,eRank=90.6,q75/q25=49.82 attn_vo:H=0.7474,top10E=0.24,eRank=172.1,q75/q25=90.30 mlp_w1:H=0.6531,top10E=0.44,eRank=101.7,q75/q25=6.96 mlp_w2:H=0.8059,top10E=0.19,eRank=213.6,q75/q25=18.91 vo_prod:H=0.6463,top10E=0.36,eRank=77.0,q75/q25=9019.41 train_time:157308ms step_avg:71.50ms +[2025-09-02 14:53:20] [Rank 0] step:2201/10000 train_time:157319ms step_avg:71.48ms +[2025-09-02 14:53:20] [Rank 0] step:2201/10000 train_time:157319ms step_avg:71.48ms +[2025-09-02 14:53:22] [Rank 0] step:2221/10000 train_time:158645ms step_avg:71.43ms +[2025-09-02 14:53:22] [Rank 0] step:2221/10000 train_time:158645ms step_avg:71.43ms +[2025-09-02 14:53:23] [Rank 0] step:2241/10000 train_time:160112ms step_avg:71.45ms +[2025-09-02 14:53:23] [Rank 0] step:2241/10000 train_time:160112ms step_avg:71.45ms +[2025-09-02 14:53:25] [Rank 0] step:2261/10000 train_time:161592ms step_avg:71.47ms +[2025-09-02 14:53:25] [Rank 0] step:2261/10000 train_time:161592ms step_avg:71.47ms +[2025-09-02 14:53:26] [Rank 0] step:2281/10000 train_time:163073ms step_avg:71.49ms +[2025-09-02 14:53:26] [Rank 0] step:2281/10000 train_time:163073ms step_avg:71.49ms +[2025-09-02 14:53:28] [Rank 0] step:2301/10000 train_time:164553ms step_avg:71.51ms +[2025-09-02 14:53:28] [Rank 0] step:2301/10000 train_time:164553ms step_avg:71.51ms +[2025-09-02 14:53:29] [Rank 0] step:2321/10000 train_time:166035ms step_avg:71.54ms +[2025-09-02 14:53:29] [Rank 0] step:2321/10000 train_time:166035ms step_avg:71.54ms +[2025-09-02 14:53:31] [Rank 0] step:2341/10000 train_time:167517ms step_avg:71.56ms +[2025-09-02 14:53:31] [Rank 0] step:2341/10000 train_time:167517ms step_avg:71.56ms +[2025-09-02 14:53:32] [Rank 0] step:2361/10000 train_time:168998ms step_avg:71.58ms +[2025-09-02 14:53:32] [Rank 0] step:2361/10000 train_time:168998ms step_avg:71.58ms +[2025-09-02 14:53:33] [Rank 0] step:2381/10000 train_time:170480ms step_avg:71.60ms +[2025-09-02 14:53:33] [Rank 0] step:2381/10000 train_time:170480ms step_avg:71.60ms +[2025-09-02 14:53:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:53:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:53:47] [Rank 0] PRINT: step:2400/10000 val_loss:4.5601 svd_entropy: attn_qk:H=0.6646,top10E=0.39,eRank=93.8,q75/q25=55.44 attn_vo:H=0.7551,top10E=0.23,eRank=179.4,q75/q25=95.00 mlp_w1:H=0.6630,top10E=0.43,eRank=106.7,q75/q25=7.49 mlp_w2:H=0.8119,top10E=0.18,eRank=222.7,q75/q25=20.16 vo_prod:H=0.6546,top10E=0.35,eRank=81.5,q75/q25=10139.31 train_time:172112ms step_avg:71.71ms +[2025-09-02 14:53:47] [Rank 0] PRINT: step:2400/10000 val_loss:4.5601 svd_entropy: attn_qk:H=0.6646,top10E=0.39,eRank=93.8,q75/q25=55.44 attn_vo:H=0.7551,top10E=0.23,eRank=179.4,q75/q25=95.00 mlp_w1:H=0.6630,top10E=0.43,eRank=106.7,q75/q25=7.49 mlp_w2:H=0.8119,top10E=0.18,eRank=222.7,q75/q25=20.16 vo_prod:H=0.6546,top10E=0.35,eRank=81.5,q75/q25=10139.31 train_time:172112ms step_avg:71.71ms +[2025-09-02 14:53:47] [Rank 0] step:2401/10000 train_time:172124ms step_avg:71.69ms +[2025-09-02 14:53:47] [Rank 0] step:2401/10000 train_time:172124ms step_avg:71.69ms +[2025-09-02 14:53:48] [Rank 0] step:2421/10000 train_time:173484ms step_avg:71.66ms +[2025-09-02 14:53:48] [Rank 0] step:2421/10000 train_time:173484ms step_avg:71.66ms +[2025-09-02 14:53:50] [Rank 0] step:2441/10000 train_time:174965ms step_avg:71.68ms +[2025-09-02 14:53:50] [Rank 0] step:2441/10000 train_time:174965ms step_avg:71.68ms +[2025-09-02 14:53:51] [Rank 0] step:2461/10000 train_time:176447ms step_avg:71.70ms +[2025-09-02 14:53:51] [Rank 0] step:2461/10000 train_time:176447ms step_avg:71.70ms +[2025-09-02 14:53:53] [Rank 0] step:2481/10000 train_time:177929ms step_avg:71.72ms +[2025-09-02 14:53:53] [Rank 0] step:2481/10000 train_time:177929ms step_avg:71.72ms +[2025-09-02 14:53:54] [Rank 0] step:2501/10000 train_time:179410ms step_avg:71.74ms +[2025-09-02 14:53:54] [Rank 0] step:2501/10000 train_time:179410ms step_avg:71.74ms +[2025-09-02 14:53:56] [Rank 0] step:2521/10000 train_time:180935ms step_avg:71.77ms +[2025-09-02 14:53:56] [Rank 0] step:2521/10000 train_time:180935ms step_avg:71.77ms +[2025-09-02 14:53:57] [Rank 0] step:2541/10000 train_time:182416ms step_avg:71.79ms +[2025-09-02 14:53:57] [Rank 0] step:2541/10000 train_time:182416ms step_avg:71.79ms +[2025-09-02 14:53:59] [Rank 0] step:2561/10000 train_time:183895ms step_avg:71.81ms +[2025-09-02 14:53:59] [Rank 0] step:2561/10000 train_time:183895ms step_avg:71.81ms +[2025-09-02 14:54:00] [Rank 0] step:2581/10000 train_time:185378ms step_avg:71.82ms +[2025-09-02 14:54:00] [Rank 0] step:2581/10000 train_time:185378ms step_avg:71.82ms +[2025-09-02 14:54:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:54:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:54:13] [Rank 0] PRINT: step:2600/10000 val_loss:4.5029 svd_entropy: attn_qk:H=0.6711,top10E=0.38,eRank=97.1,q75/q25=61.31 attn_vo:H=0.7620,top10E=0.22,eRank=186.2,q75/q25=98.32 mlp_w1:H=0.6719,top10E=0.42,eRank=111.5,q75/q25=8.10 mlp_w2:H=0.8171,top10E=0.17,eRank=230.7,q75/q25=21.44 vo_prod:H=0.6625,top10E=0.34,eRank=85.9,q75/q25=10982.33 train_time:187010ms step_avg:71.93ms +[2025-09-02 14:54:13] [Rank 0] PRINT: step:2600/10000 val_loss:4.5029 svd_entropy: attn_qk:H=0.6711,top10E=0.38,eRank=97.1,q75/q25=61.31 attn_vo:H=0.7620,top10E=0.22,eRank=186.2,q75/q25=98.32 mlp_w1:H=0.6719,top10E=0.42,eRank=111.5,q75/q25=8.10 mlp_w2:H=0.8171,top10E=0.17,eRank=230.7,q75/q25=21.44 vo_prod:H=0.6625,top10E=0.34,eRank=85.9,q75/q25=10982.33 train_time:187010ms step_avg:71.93ms +[2025-09-02 14:54:14] [Rank 0] step:2601/10000 train_time:187022ms step_avg:71.90ms +[2025-09-02 14:54:14] [Rank 0] step:2601/10000 train_time:187022ms step_avg:71.90ms +[2025-09-02 14:54:15] [Rank 0] step:2621/10000 train_time:188381ms step_avg:71.87ms +[2025-09-02 14:54:15] [Rank 0] step:2621/10000 train_time:188381ms step_avg:71.87ms +[2025-09-02 14:54:16] [Rank 0] step:2641/10000 train_time:189860ms step_avg:71.89ms +[2025-09-02 14:54:16] [Rank 0] step:2641/10000 train_time:189860ms step_avg:71.89ms +[2025-09-02 14:54:18] [Rank 0] step:2661/10000 train_time:191343ms step_avg:71.91ms +[2025-09-02 14:54:18] [Rank 0] step:2661/10000 train_time:191343ms step_avg:71.91ms +[2025-09-02 14:54:19] [Rank 0] step:2681/10000 train_time:192827ms step_avg:71.92ms +[2025-09-02 14:54:19] [Rank 0] step:2681/10000 train_time:192827ms step_avg:71.92ms +[2025-09-02 14:54:21] [Rank 0] step:2701/10000 train_time:194310ms step_avg:71.94ms +[2025-09-02 14:54:21] [Rank 0] step:2701/10000 train_time:194310ms step_avg:71.94ms +[2025-09-02 14:54:22] [Rank 0] step:2721/10000 train_time:195793ms step_avg:71.96ms +[2025-09-02 14:54:22] [Rank 0] step:2721/10000 train_time:195793ms step_avg:71.96ms +[2025-09-02 14:54:24] [Rank 0] step:2741/10000 train_time:197277ms step_avg:71.97ms +[2025-09-02 14:54:24] [Rank 0] step:2741/10000 train_time:197277ms step_avg:71.97ms +[2025-09-02 14:54:25] [Rank 0] step:2761/10000 train_time:198762ms step_avg:71.99ms +[2025-09-02 14:54:25] [Rank 0] step:2761/10000 train_time:198762ms step_avg:71.99ms +[2025-09-02 14:54:27] [Rank 0] step:2781/10000 train_time:200246ms step_avg:72.00ms +[2025-09-02 14:54:27] [Rank 0] step:2781/10000 train_time:200246ms step_avg:72.00ms +[2025-09-02 14:54:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:54:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:54:40] [Rank 0] PRINT: step:2800/10000 val_loss:4.4647 svd_entropy: attn_qk:H=0.6772,top10E=0.37,eRank=100.5,q75/q25=66.90 attn_vo:H=0.7684,top10E=0.21,eRank=192.9,q75/q25=100.27 mlp_w1:H=0.6800,top10E=0.41,eRank=116.2,q75/q25=8.64 mlp_w2:H=0.8215,top10E=0.17,eRank=237.9,q75/q25=22.59 vo_prod:H=0.6700,top10E=0.33,eRank=90.3,q75/q25=11829.04 train_time:201880ms step_avg:72.10ms +[2025-09-02 14:54:40] [Rank 0] PRINT: step:2800/10000 val_loss:4.4647 svd_entropy: attn_qk:H=0.6772,top10E=0.37,eRank=100.5,q75/q25=66.90 attn_vo:H=0.7684,top10E=0.21,eRank=192.9,q75/q25=100.27 mlp_w1:H=0.6800,top10E=0.41,eRank=116.2,q75/q25=8.64 mlp_w2:H=0.8215,top10E=0.17,eRank=237.9,q75/q25=22.59 vo_prod:H=0.6700,top10E=0.33,eRank=90.3,q75/q25=11829.04 train_time:201880ms step_avg:72.10ms +[2025-09-02 14:54:40] [Rank 0] step:2801/10000 train_time:201892ms step_avg:72.08ms +[2025-09-02 14:54:40] [Rank 0] step:2801/10000 train_time:201892ms step_avg:72.08ms +[2025-09-02 14:54:42] [Rank 0] step:2821/10000 train_time:203248ms step_avg:72.05ms +[2025-09-02 14:54:42] [Rank 0] step:2821/10000 train_time:203248ms step_avg:72.05ms +[2025-09-02 14:54:43] [Rank 0] step:2841/10000 train_time:204729ms step_avg:72.06ms +[2025-09-02 14:54:43] [Rank 0] step:2841/10000 train_time:204729ms step_avg:72.06ms +[2025-09-02 14:54:45] [Rank 0] step:2861/10000 train_time:206210ms step_avg:72.08ms +[2025-09-02 14:54:45] [Rank 0] step:2861/10000 train_time:206210ms step_avg:72.08ms +[2025-09-02 14:54:46] [Rank 0] step:2881/10000 train_time:207691ms step_avg:72.09ms +[2025-09-02 14:54:46] [Rank 0] step:2881/10000 train_time:207691ms step_avg:72.09ms +[2025-09-02 14:54:48] [Rank 0] step:2901/10000 train_time:209172ms step_avg:72.10ms +[2025-09-02 14:54:48] [Rank 0] step:2901/10000 train_time:209172ms step_avg:72.10ms +[2025-09-02 14:54:49] [Rank 0] step:2921/10000 train_time:210653ms step_avg:72.12ms +[2025-09-02 14:54:49] [Rank 0] step:2921/10000 train_time:210653ms step_avg:72.12ms +[2025-09-02 14:54:51] [Rank 0] step:2941/10000 train_time:212136ms step_avg:72.13ms +[2025-09-02 14:54:51] [Rank 0] step:2941/10000 train_time:212136ms step_avg:72.13ms +[2025-09-02 14:54:52] [Rank 0] step:2961/10000 train_time:213620ms step_avg:72.14ms +[2025-09-02 14:54:52] [Rank 0] step:2961/10000 train_time:213620ms step_avg:72.14ms +[2025-09-02 14:54:54] [Rank 0] step:2981/10000 train_time:215109ms step_avg:72.16ms +[2025-09-02 14:54:54] [Rank 0] step:2981/10000 train_time:215109ms step_avg:72.16ms +[2025-09-02 14:54:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:54:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:55:07] [Rank 0] PRINT: step:3000/10000 val_loss:4.4201 svd_entropy: attn_qk:H=0.6826,top10E=0.36,eRank=103.5,q75/q25=71.37 attn_vo:H=0.7740,top10E=0.21,eRank=198.8,q75/q25=101.11 mlp_w1:H=0.6872,top10E=0.40,eRank=120.5,q75/q25=9.20 mlp_w2:H=0.8253,top10E=0.16,eRank=244.3,q75/q25=23.60 vo_prod:H=0.6764,top10E=0.32,eRank=94.2,q75/q25=12059.70 train_time:216751ms step_avg:72.25ms +[2025-09-02 14:55:07] [Rank 0] PRINT: step:3000/10000 val_loss:4.4201 svd_entropy: attn_qk:H=0.6826,top10E=0.36,eRank=103.5,q75/q25=71.37 attn_vo:H=0.7740,top10E=0.21,eRank=198.8,q75/q25=101.11 mlp_w1:H=0.6872,top10E=0.40,eRank=120.5,q75/q25=9.20 mlp_w2:H=0.8253,top10E=0.16,eRank=244.3,q75/q25=23.60 vo_prod:H=0.6764,top10E=0.32,eRank=94.2,q75/q25=12059.70 train_time:216751ms step_avg:72.25ms +[2025-09-02 14:55:07] [Rank 0] step:3001/10000 train_time:216762ms step_avg:72.23ms +[2025-09-02 14:55:07] [Rank 0] step:3001/10000 train_time:216762ms step_avg:72.23ms +[2025-09-02 14:55:09] [Rank 0] step:3021/10000 train_time:218112ms step_avg:72.20ms +[2025-09-02 14:55:09] [Rank 0] step:3021/10000 train_time:218112ms step_avg:72.20ms +[2025-09-02 14:55:10] [Rank 0] step:3041/10000 train_time:219600ms step_avg:72.21ms +[2025-09-02 14:55:10] [Rank 0] step:3041/10000 train_time:219600ms step_avg:72.21ms +[2025-09-02 14:55:12] [Rank 0] step:3061/10000 train_time:221088ms step_avg:72.23ms +[2025-09-02 14:55:12] [Rank 0] step:3061/10000 train_time:221088ms step_avg:72.23ms +[2025-09-02 14:55:13] [Rank 0] step:3081/10000 train_time:222577ms step_avg:72.24ms +[2025-09-02 14:55:13] [Rank 0] step:3081/10000 train_time:222577ms step_avg:72.24ms +[2025-09-02 14:55:15] [Rank 0] step:3101/10000 train_time:224067ms step_avg:72.26ms +[2025-09-02 14:55:15] [Rank 0] step:3101/10000 train_time:224067ms step_avg:72.26ms +[2025-09-02 14:55:16] [Rank 0] step:3121/10000 train_time:225556ms step_avg:72.27ms +[2025-09-02 14:55:16] [Rank 0] step:3121/10000 train_time:225556ms step_avg:72.27ms +[2025-09-02 14:55:18] [Rank 0] step:3141/10000 train_time:227046ms step_avg:72.28ms +[2025-09-02 14:55:18] [Rank 0] step:3141/10000 train_time:227046ms step_avg:72.28ms +[2025-09-02 14:55:19] [Rank 0] step:3161/10000 train_time:228538ms step_avg:72.30ms +[2025-09-02 14:55:19] [Rank 0] step:3161/10000 train_time:228538ms step_avg:72.30ms +[2025-09-02 14:55:21] [Rank 0] step:3181/10000 train_time:230028ms step_avg:72.31ms +[2025-09-02 14:55:21] [Rank 0] step:3181/10000 train_time:230028ms step_avg:72.31ms +[2025-09-02 14:55:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:55:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:55:34] [Rank 0] PRINT: step:3200/10000 val_loss:4.3850 svd_entropy: attn_qk:H=0.6876,top10E=0.35,eRank=106.4,q75/q25=76.58 attn_vo:H=0.7792,top10E=0.20,eRank=204.6,q75/q25=100.92 mlp_w1:H=0.6939,top10E=0.39,eRank=124.6,q75/q25=9.75 mlp_w2:H=0.8287,top10E=0.16,eRank=250.2,q75/q25=24.57 vo_prod:H=0.6825,top10E=0.31,eRank=98.1,q75/q25=12563.58 train_time:231667ms step_avg:72.40ms +[2025-09-02 14:55:34] [Rank 0] PRINT: step:3200/10000 val_loss:4.3850 svd_entropy: attn_qk:H=0.6876,top10E=0.35,eRank=106.4,q75/q25=76.58 attn_vo:H=0.7792,top10E=0.20,eRank=204.6,q75/q25=100.92 mlp_w1:H=0.6939,top10E=0.39,eRank=124.6,q75/q25=9.75 mlp_w2:H=0.8287,top10E=0.16,eRank=250.2,q75/q25=24.57 vo_prod:H=0.6825,top10E=0.31,eRank=98.1,q75/q25=12563.58 train_time:231667ms step_avg:72.40ms +[2025-09-02 14:55:34] [Rank 0] step:3201/10000 train_time:231679ms step_avg:72.38ms +[2025-09-02 14:55:34] [Rank 0] step:3201/10000 train_time:231679ms step_avg:72.38ms +[2025-09-02 14:55:36] [Rank 0] step:3221/10000 train_time:233036ms step_avg:72.35ms +[2025-09-02 14:55:36] [Rank 0] step:3221/10000 train_time:233036ms step_avg:72.35ms +[2025-09-02 14:55:37] [Rank 0] step:3241/10000 train_time:234525ms step_avg:72.36ms +[2025-09-02 14:55:37] [Rank 0] step:3241/10000 train_time:234525ms step_avg:72.36ms +[2025-09-02 14:55:39] [Rank 0] step:3261/10000 train_time:236012ms step_avg:72.37ms +[2025-09-02 14:55:39] [Rank 0] step:3261/10000 train_time:236012ms step_avg:72.37ms +[2025-09-02 14:55:40] [Rank 0] step:3281/10000 train_time:237503ms step_avg:72.39ms +[2025-09-02 14:55:40] [Rank 0] step:3281/10000 train_time:237503ms step_avg:72.39ms +[2025-09-02 14:55:42] [Rank 0] step:3301/10000 train_time:238994ms step_avg:72.40ms +[2025-09-02 14:55:42] [Rank 0] step:3301/10000 train_time:238994ms step_avg:72.40ms +[2025-09-02 14:55:43] [Rank 0] step:3321/10000 train_time:240484ms step_avg:72.41ms +[2025-09-02 14:55:43] [Rank 0] step:3321/10000 train_time:240484ms step_avg:72.41ms +[2025-09-02 14:55:45] [Rank 0] step:3341/10000 train_time:242078ms step_avg:72.46ms +[2025-09-02 14:55:45] [Rank 0] step:3341/10000 train_time:242078ms step_avg:72.46ms +[2025-09-02 14:55:46] [Rank 0] step:3361/10000 train_time:243568ms step_avg:72.47ms +[2025-09-02 14:55:46] [Rank 0] step:3361/10000 train_time:243568ms step_avg:72.47ms +[2025-09-02 14:55:48] [Rank 0] step:3381/10000 train_time:245058ms step_avg:72.48ms +[2025-09-02 14:55:48] [Rank 0] step:3381/10000 train_time:245058ms step_avg:72.48ms +[2025-09-02 14:55:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:55:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:56:01] [Rank 0] PRINT: step:3400/10000 val_loss:4.3453 svd_entropy: attn_qk:H=0.6926,top10E=0.34,eRank=109.3,q75/q25=81.34 attn_vo:H=0.7841,top10E=0.20,eRank=210.2,q75/q25=100.20 mlp_w1:H=0.7001,top10E=0.38,eRank=128.7,q75/q25=10.31 mlp_w2:H=0.8320,top10E=0.15,eRank=256.0,q75/q25=25.27 vo_prod:H=0.6882,top10E=0.30,eRank=101.9,q75/q25=12680.23 train_time:246699ms step_avg:72.56ms +[2025-09-02 14:56:01] [Rank 0] PRINT: step:3400/10000 val_loss:4.3453 svd_entropy: attn_qk:H=0.6926,top10E=0.34,eRank=109.3,q75/q25=81.34 attn_vo:H=0.7841,top10E=0.20,eRank=210.2,q75/q25=100.20 mlp_w1:H=0.7001,top10E=0.38,eRank=128.7,q75/q25=10.31 mlp_w2:H=0.8320,top10E=0.15,eRank=256.0,q75/q25=25.27 vo_prod:H=0.6882,top10E=0.30,eRank=101.9,q75/q25=12680.23 train_time:246699ms step_avg:72.56ms +[2025-09-02 14:56:01] [Rank 0] step:3401/10000 train_time:246711ms step_avg:72.54ms +[2025-09-02 14:56:01] [Rank 0] step:3401/10000 train_time:246711ms step_avg:72.54ms +[2025-09-02 14:56:03] [Rank 0] step:3421/10000 train_time:248058ms step_avg:72.51ms +[2025-09-02 14:56:03] [Rank 0] step:3421/10000 train_time:248058ms step_avg:72.51ms +[2025-09-02 14:56:04] [Rank 0] step:3441/10000 train_time:249648ms step_avg:72.55ms +[2025-09-02 14:56:04] [Rank 0] step:3441/10000 train_time:249648ms step_avg:72.55ms +[2025-09-02 14:56:06] [Rank 0] step:3461/10000 train_time:251240ms step_avg:72.59ms +[2025-09-02 14:56:06] [Rank 0] step:3461/10000 train_time:251240ms step_avg:72.59ms +[2025-09-02 14:56:07] [Rank 0] step:3481/10000 train_time:252731ms step_avg:72.60ms +[2025-09-02 14:56:07] [Rank 0] step:3481/10000 train_time:252731ms step_avg:72.60ms +[2025-09-02 14:56:09] [Rank 0] step:3501/10000 train_time:254224ms step_avg:72.61ms +[2025-09-02 14:56:09] [Rank 0] step:3501/10000 train_time:254224ms step_avg:72.61ms +[2025-09-02 14:56:10] [Rank 0] step:3521/10000 train_time:255717ms step_avg:72.63ms +[2025-09-02 14:56:10] [Rank 0] step:3521/10000 train_time:255717ms step_avg:72.63ms +[2025-09-02 14:56:12] [Rank 0] step:3541/10000 train_time:257207ms step_avg:72.64ms +[2025-09-02 14:56:12] [Rank 0] step:3541/10000 train_time:257207ms step_avg:72.64ms +[2025-09-02 14:56:13] [Rank 0] step:3561/10000 train_time:258699ms step_avg:72.65ms +[2025-09-02 14:56:13] [Rank 0] step:3561/10000 train_time:258699ms step_avg:72.65ms +[2025-09-02 14:56:15] [Rank 0] step:3581/10000 train_time:260192ms step_avg:72.66ms +[2025-09-02 14:56:15] [Rank 0] step:3581/10000 train_time:260192ms step_avg:72.66ms +[2025-09-02 14:56:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:56:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:56:28] [Rank 0] PRINT: step:3600/10000 val_loss:4.3341 svd_entropy: attn_qk:H=0.6968,top10E=0.34,eRank=112.0,q75/q25=84.67 attn_vo:H=0.7884,top10E=0.19,eRank=215.3,q75/q25=99.19 mlp_w1:H=0.7060,top10E=0.37,eRank=132.6,q75/q25=10.88 mlp_w2:H=0.8347,top10E=0.15,eRank=261.1,q75/q25=26.09 vo_prod:H=0.6933,top10E=0.30,eRank=105.4,q75/q25=12464.60 train_time:261836ms step_avg:72.73ms +[2025-09-02 14:56:28] [Rank 0] PRINT: step:3600/10000 val_loss:4.3341 svd_entropy: attn_qk:H=0.6968,top10E=0.34,eRank=112.0,q75/q25=84.67 attn_vo:H=0.7884,top10E=0.19,eRank=215.3,q75/q25=99.19 mlp_w1:H=0.7060,top10E=0.37,eRank=132.6,q75/q25=10.88 mlp_w2:H=0.8347,top10E=0.15,eRank=261.1,q75/q25=26.09 vo_prod:H=0.6933,top10E=0.30,eRank=105.4,q75/q25=12464.60 train_time:261836ms step_avg:72.73ms +[2025-09-02 14:56:28] [Rank 0] step:3601/10000 train_time:261847ms step_avg:72.72ms +[2025-09-02 14:56:28] [Rank 0] step:3601/10000 train_time:261847ms step_avg:72.72ms +[2025-09-02 14:56:30] [Rank 0] step:3621/10000 train_time:263296ms step_avg:72.71ms +[2025-09-02 14:56:30] [Rank 0] step:3621/10000 train_time:263296ms step_avg:72.71ms +[2025-09-02 14:56:31] [Rank 0] step:3641/10000 train_time:264783ms step_avg:72.72ms +[2025-09-02 14:56:31] [Rank 0] step:3641/10000 train_time:264783ms step_avg:72.72ms +[2025-09-02 14:56:33] [Rank 0] step:3661/10000 train_time:266273ms step_avg:72.73ms +[2025-09-02 14:56:33] [Rank 0] step:3661/10000 train_time:266273ms step_avg:72.73ms +[2025-09-02 14:56:34] [Rank 0] step:3681/10000 train_time:267763ms step_avg:72.74ms +[2025-09-02 14:56:34] [Rank 0] step:3681/10000 train_time:267763ms step_avg:72.74ms +[2025-09-02 14:56:36] [Rank 0] step:3701/10000 train_time:269359ms step_avg:72.78ms +[2025-09-02 14:56:36] [Rank 0] step:3701/10000 train_time:269359ms step_avg:72.78ms +[2025-09-02 14:56:37] [Rank 0] step:3721/10000 train_time:270875ms step_avg:72.80ms +[2025-09-02 14:56:37] [Rank 0] step:3721/10000 train_time:270875ms step_avg:72.80ms +[2025-09-02 14:56:39] [Rank 0] step:3741/10000 train_time:272399ms step_avg:72.81ms +[2025-09-02 14:56:39] [Rank 0] step:3741/10000 train_time:272399ms step_avg:72.81ms +[2025-09-02 14:56:40] [Rank 0] step:3761/10000 train_time:273925ms step_avg:72.83ms +[2025-09-02 14:56:40] [Rank 0] step:3761/10000 train_time:273925ms step_avg:72.83ms +[2025-09-02 14:56:42] [Rank 0] step:3781/10000 train_time:275452ms step_avg:72.85ms +[2025-09-02 14:56:42] [Rank 0] step:3781/10000 train_time:275452ms step_avg:72.85ms +[2025-09-02 14:56:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:56:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:56:55] [Rank 0] PRINT: step:3800/10000 val_loss:4.2781 svd_entropy: attn_qk:H=0.7007,top10E=0.33,eRank=114.5,q75/q25=88.50 attn_vo:H=0.7925,top10E=0.19,eRank=220.2,q75/q25=98.08 mlp_w1:H=0.7115,top10E=0.36,eRank=136.5,q75/q25=11.41 mlp_w2:H=0.8372,top10E=0.15,eRank=265.7,q75/q25=26.98 vo_prod:H=0.6981,top10E=0.29,eRank=108.7,q75/q25=12284.81 train_time:277143ms step_avg:72.93ms +[2025-09-02 14:56:55] [Rank 0] PRINT: step:3800/10000 val_loss:4.2781 svd_entropy: attn_qk:H=0.7007,top10E=0.33,eRank=114.5,q75/q25=88.50 attn_vo:H=0.7925,top10E=0.19,eRank=220.2,q75/q25=98.08 mlp_w1:H=0.7115,top10E=0.36,eRank=136.5,q75/q25=11.41 mlp_w2:H=0.8372,top10E=0.15,eRank=265.7,q75/q25=26.98 vo_prod:H=0.6981,top10E=0.29,eRank=108.7,q75/q25=12284.81 train_time:277143ms step_avg:72.93ms +[2025-09-02 14:56:55] [Rank 0] step:3801/10000 train_time:277155ms step_avg:72.92ms +[2025-09-02 14:56:55] [Rank 0] step:3801/10000 train_time:277155ms step_avg:72.92ms +[2025-09-02 14:56:57] [Rank 0] step:3821/10000 train_time:278641ms step_avg:72.92ms +[2025-09-02 14:56:57] [Rank 0] step:3821/10000 train_time:278641ms step_avg:72.92ms +[2025-09-02 14:56:59] [Rank 0] step:3841/10000 train_time:280166ms step_avg:72.94ms +[2025-09-02 14:56:59] [Rank 0] step:3841/10000 train_time:280166ms step_avg:72.94ms +[2025-09-02 14:57:00] [Rank 0] step:3861/10000 train_time:281690ms step_avg:72.96ms +[2025-09-02 14:57:00] [Rank 0] step:3861/10000 train_time:281690ms step_avg:72.96ms +[2025-09-02 14:57:02] [Rank 0] step:3881/10000 train_time:283313ms step_avg:73.00ms +[2025-09-02 14:57:02] [Rank 0] step:3881/10000 train_time:283313ms step_avg:73.00ms +[2025-09-02 14:57:03] [Rank 0] step:3901/10000 train_time:284842ms step_avg:73.02ms +[2025-09-02 14:57:03] [Rank 0] step:3901/10000 train_time:284842ms step_avg:73.02ms +[2025-09-02 14:57:05] [Rank 0] step:3921/10000 train_time:286366ms step_avg:73.03ms +[2025-09-02 14:57:05] [Rank 0] step:3921/10000 train_time:286366ms step_avg:73.03ms +[2025-09-02 14:57:06] [Rank 0] step:3941/10000 train_time:287892ms step_avg:73.05ms +[2025-09-02 14:57:06] [Rank 0] step:3941/10000 train_time:287892ms step_avg:73.05ms +[2025-09-02 14:57:08] [Rank 0] step:3961/10000 train_time:289495ms step_avg:73.09ms +[2025-09-02 14:57:08] [Rank 0] step:3961/10000 train_time:289495ms step_avg:73.09ms +[2025-09-02 14:57:09] [Rank 0] step:3981/10000 train_time:291020ms step_avg:73.10ms +[2025-09-02 14:57:09] [Rank 0] step:3981/10000 train_time:291020ms step_avg:73.10ms +[2025-09-02 14:57:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:57:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:57:23] [Rank 0] PRINT: step:4000/10000 val_loss:4.2517 svd_entropy: attn_qk:H=0.7045,top10E=0.32,eRank=117.0,q75/q25=91.03 attn_vo:H=0.7962,top10E=0.18,eRank=224.9,q75/q25=95.57 mlp_w1:H=0.7164,top10E=0.35,eRank=140.1,q75/q25=11.92 mlp_w2:H=0.8395,top10E=0.14,eRank=270.2,q75/q25=27.68 vo_prod:H=0.7025,top10E=0.28,eRank=112.0,q75/q25=11481.76 train_time:292697ms step_avg:73.17ms +[2025-09-02 14:57:23] [Rank 0] PRINT: step:4000/10000 val_loss:4.2517 svd_entropy: attn_qk:H=0.7045,top10E=0.32,eRank=117.0,q75/q25=91.03 attn_vo:H=0.7962,top10E=0.18,eRank=224.9,q75/q25=95.57 mlp_w1:H=0.7164,top10E=0.35,eRank=140.1,q75/q25=11.92 mlp_w2:H=0.8395,top10E=0.14,eRank=270.2,q75/q25=27.68 vo_prod:H=0.7025,top10E=0.28,eRank=112.0,q75/q25=11481.76 train_time:292697ms step_avg:73.17ms +[2025-09-02 14:57:23] [Rank 0] step:4001/10000 train_time:292708ms step_avg:73.16ms +[2025-09-02 14:57:23] [Rank 0] step:4001/10000 train_time:292708ms step_avg:73.16ms +[2025-09-02 14:57:24] [Rank 0] step:4021/10000 train_time:294089ms step_avg:73.14ms +[2025-09-02 14:57:24] [Rank 0] step:4021/10000 train_time:294089ms step_avg:73.14ms +[2025-09-02 14:57:26] [Rank 0] step:4041/10000 train_time:295613ms step_avg:73.15ms +[2025-09-02 14:57:26] [Rank 0] step:4041/10000 train_time:295613ms step_avg:73.15ms +[2025-09-02 14:57:27] [Rank 0] step:4061/10000 train_time:297135ms step_avg:73.17ms +[2025-09-02 14:57:27] [Rank 0] step:4061/10000 train_time:297135ms step_avg:73.17ms +[2025-09-02 14:57:29] [Rank 0] step:4081/10000 train_time:298775ms step_avg:73.21ms +[2025-09-02 14:57:29] [Rank 0] step:4081/10000 train_time:298775ms step_avg:73.21ms +[2025-09-02 14:57:30] [Rank 0] step:4101/10000 train_time:300301ms step_avg:73.23ms +[2025-09-02 14:57:30] [Rank 0] step:4101/10000 train_time:300301ms step_avg:73.23ms +[2025-09-02 14:57:32] [Rank 0] step:4121/10000 train_time:301827ms step_avg:73.24ms +[2025-09-02 14:57:32] [Rank 0] step:4121/10000 train_time:301827ms step_avg:73.24ms +[2025-09-02 14:57:34] [Rank 0] step:4141/10000 train_time:303451ms step_avg:73.28ms +[2025-09-02 14:57:34] [Rank 0] step:4141/10000 train_time:303451ms step_avg:73.28ms +[2025-09-02 14:57:35] [Rank 0] step:4161/10000 train_time:304975ms step_avg:73.29ms +[2025-09-02 14:57:35] [Rank 0] step:4161/10000 train_time:304975ms step_avg:73.29ms +[2025-09-02 14:57:37] [Rank 0] step:4181/10000 train_time:306500ms step_avg:73.31ms +[2025-09-02 14:57:37] [Rank 0] step:4181/10000 train_time:306500ms step_avg:73.31ms +[2025-09-02 14:57:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:57:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:57:50] [Rank 0] PRINT: step:4200/10000 val_loss:4.2344 svd_entropy: attn_qk:H=0.7081,top10E=0.32,eRank=119.4,q75/q25=94.23 attn_vo:H=0.7997,top10E=0.18,eRank=229.3,q75/q25=93.84 mlp_w1:H=0.7210,top10E=0.35,eRank=143.6,q75/q25=12.48 mlp_w2:H=0.8417,top10E=0.14,eRank=274.3,q75/q25=28.31 vo_prod:H=0.7066,top10E=0.28,eRank=115.1,q75/q25=11161.12 train_time:308178ms step_avg:73.38ms +[2025-09-02 14:57:50] [Rank 0] PRINT: step:4200/10000 val_loss:4.2344 svd_entropy: attn_qk:H=0.7081,top10E=0.32,eRank=119.4,q75/q25=94.23 attn_vo:H=0.7997,top10E=0.18,eRank=229.3,q75/q25=93.84 mlp_w1:H=0.7210,top10E=0.35,eRank=143.6,q75/q25=12.48 mlp_w2:H=0.8417,top10E=0.14,eRank=274.3,q75/q25=28.31 vo_prod:H=0.7066,top10E=0.28,eRank=115.1,q75/q25=11161.12 train_time:308178ms step_avg:73.38ms +[2025-09-02 14:57:50] [Rank 0] step:4201/10000 train_time:308189ms step_avg:73.36ms +[2025-09-02 14:57:50] [Rank 0] step:4201/10000 train_time:308189ms step_avg:73.36ms +[2025-09-02 14:57:52] [Rank 0] step:4221/10000 train_time:309578ms step_avg:73.34ms +[2025-09-02 14:57:52] [Rank 0] step:4221/10000 train_time:309578ms step_avg:73.34ms +[2025-09-02 14:57:53] [Rank 0] step:4241/10000 train_time:311104ms step_avg:73.36ms +[2025-09-02 14:57:53] [Rank 0] step:4241/10000 train_time:311104ms step_avg:73.36ms +[2025-09-02 14:57:55] [Rank 0] step:4261/10000 train_time:312627ms step_avg:73.37ms +[2025-09-02 14:57:55] [Rank 0] step:4261/10000 train_time:312627ms step_avg:73.37ms +[2025-09-02 14:57:56] [Rank 0] step:4281/10000 train_time:314151ms step_avg:73.38ms +[2025-09-02 14:57:56] [Rank 0] step:4281/10000 train_time:314151ms step_avg:73.38ms +[2025-09-02 14:57:58] [Rank 0] step:4301/10000 train_time:315677ms step_avg:73.40ms +[2025-09-02 14:57:58] [Rank 0] step:4301/10000 train_time:315677ms step_avg:73.40ms +[2025-09-02 14:57:59] [Rank 0] step:4321/10000 train_time:317207ms step_avg:73.41ms +[2025-09-02 14:57:59] [Rank 0] step:4321/10000 train_time:317207ms step_avg:73.41ms +[2025-09-02 14:58:01] [Rank 0] step:4341/10000 train_time:318732ms step_avg:73.42ms +[2025-09-02 14:58:01] [Rank 0] step:4341/10000 train_time:318732ms step_avg:73.42ms +[2025-09-02 14:58:02] [Rank 0] step:4361/10000 train_time:320259ms step_avg:73.44ms +[2025-09-02 14:58:02] [Rank 0] step:4361/10000 train_time:320259ms step_avg:73.44ms +[2025-09-02 14:58:04] [Rank 0] step:4381/10000 train_time:321783ms step_avg:73.45ms +[2025-09-02 14:58:04] [Rank 0] step:4381/10000 train_time:321783ms step_avg:73.45ms +[2025-09-02 14:58:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:58:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:58:17] [Rank 0] PRINT: step:4400/10000 val_loss:4.2106 svd_entropy: attn_qk:H=0.7115,top10E=0.31,eRank=121.8,q75/q25=96.79 attn_vo:H=0.8030,top10E=0.18,eRank=233.5,q75/q25=92.06 mlp_w1:H=0.7256,top10E=0.34,eRank=147.1,q75/q25=13.06 mlp_w2:H=0.8436,top10E=0.14,eRank=278.1,q75/q25=28.91 vo_prod:H=0.7106,top10E=0.27,eRank=118.1,q75/q25=10482.72 train_time:323462ms step_avg:73.51ms +[2025-09-02 14:58:17] [Rank 0] PRINT: step:4400/10000 val_loss:4.2106 svd_entropy: attn_qk:H=0.7115,top10E=0.31,eRank=121.8,q75/q25=96.79 attn_vo:H=0.8030,top10E=0.18,eRank=233.5,q75/q25=92.06 mlp_w1:H=0.7256,top10E=0.34,eRank=147.1,q75/q25=13.06 mlp_w2:H=0.8436,top10E=0.14,eRank=278.1,q75/q25=28.91 vo_prod:H=0.7106,top10E=0.27,eRank=118.1,q75/q25=10482.72 train_time:323462ms step_avg:73.51ms +[2025-09-02 14:58:17] [Rank 0] step:4401/10000 train_time:323473ms step_avg:73.50ms +[2025-09-02 14:58:17] [Rank 0] step:4401/10000 train_time:323473ms step_avg:73.50ms +[2025-09-02 14:58:19] [Rank 0] step:4421/10000 train_time:324961ms step_avg:73.50ms +[2025-09-02 14:58:19] [Rank 0] step:4421/10000 train_time:324961ms step_avg:73.50ms +[2025-09-02 14:58:20] [Rank 0] step:4441/10000 train_time:326483ms step_avg:73.52ms +[2025-09-02 14:58:20] [Rank 0] step:4441/10000 train_time:326483ms step_avg:73.52ms +[2025-09-02 14:58:22] [Rank 0] step:4461/10000 train_time:328112ms step_avg:73.55ms +[2025-09-02 14:58:22] [Rank 0] step:4461/10000 train_time:328112ms step_avg:73.55ms +[2025-09-02 14:58:23] [Rank 0] step:4481/10000 train_time:329639ms step_avg:73.56ms +[2025-09-02 14:58:23] [Rank 0] step:4481/10000 train_time:329639ms step_avg:73.56ms +[2025-09-02 14:58:25] [Rank 0] step:4501/10000 train_time:331170ms step_avg:73.58ms +[2025-09-02 14:58:25] [Rank 0] step:4501/10000 train_time:331170ms step_avg:73.58ms +[2025-09-02 14:58:26] [Rank 0] step:4521/10000 train_time:332699ms step_avg:73.59ms +[2025-09-02 14:58:26] [Rank 0] step:4521/10000 train_time:332699ms step_avg:73.59ms +[2025-09-02 14:58:28] [Rank 0] step:4541/10000 train_time:334229ms step_avg:73.60ms +[2025-09-02 14:58:28] [Rank 0] step:4541/10000 train_time:334229ms step_avg:73.60ms +[2025-09-02 14:58:30] [Rank 0] step:4561/10000 train_time:335760ms step_avg:73.62ms +[2025-09-02 14:58:30] [Rank 0] step:4561/10000 train_time:335760ms step_avg:73.62ms +[2025-09-02 14:58:31] [Rank 0] step:4581/10000 train_time:337292ms step_avg:73.63ms +[2025-09-02 14:58:31] [Rank 0] step:4581/10000 train_time:337292ms step_avg:73.63ms +[2025-09-02 14:58:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:58:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:58:44] [Rank 0] PRINT: step:4600/10000 val_loss:4.1801 svd_entropy: attn_qk:H=0.7148,top10E=0.31,eRank=124.1,q75/q25=99.56 attn_vo:H=0.8062,top10E=0.17,eRank=237.7,q75/q25=89.62 mlp_w1:H=0.7296,top10E=0.34,eRank=150.4,q75/q25=13.64 mlp_w2:H=0.8454,top10E=0.14,eRank=281.8,q75/q25=29.63 vo_prod:H=0.7143,top10E=0.27,eRank=121.1,q75/q25=10077.11 train_time:338979ms step_avg:73.69ms +[2025-09-02 14:58:44] [Rank 0] PRINT: step:4600/10000 val_loss:4.1801 svd_entropy: attn_qk:H=0.7148,top10E=0.31,eRank=124.1,q75/q25=99.56 attn_vo:H=0.8062,top10E=0.17,eRank=237.7,q75/q25=89.62 mlp_w1:H=0.7296,top10E=0.34,eRank=150.4,q75/q25=13.64 mlp_w2:H=0.8454,top10E=0.14,eRank=281.8,q75/q25=29.63 vo_prod:H=0.7143,top10E=0.27,eRank=121.1,q75/q25=10077.11 train_time:338979ms step_avg:73.69ms +[2025-09-02 14:58:44] [Rank 0] step:4601/10000 train_time:338990ms step_avg:73.68ms +[2025-09-02 14:58:44] [Rank 0] step:4601/10000 train_time:338990ms step_avg:73.68ms +[2025-09-02 14:58:46] [Rank 0] step:4621/10000 train_time:340372ms step_avg:73.66ms +[2025-09-02 14:58:46] [Rank 0] step:4621/10000 train_time:340372ms step_avg:73.66ms +[2025-09-02 14:58:47] [Rank 0] step:4641/10000 train_time:341902ms step_avg:73.67ms +[2025-09-02 14:58:47] [Rank 0] step:4641/10000 train_time:341902ms step_avg:73.67ms +[2025-09-02 14:58:49] [Rank 0] step:4661/10000 train_time:343530ms step_avg:73.70ms +[2025-09-02 14:58:49] [Rank 0] step:4661/10000 train_time:343530ms step_avg:73.70ms +[2025-09-02 14:58:51] [Rank 0] step:4681/10000 train_time:345059ms step_avg:73.71ms +[2025-09-02 14:58:51] [Rank 0] step:4681/10000 train_time:345059ms step_avg:73.71ms +[2025-09-02 14:58:52] [Rank 0] step:4701/10000 train_time:346591ms step_avg:73.73ms +[2025-09-02 14:58:52] [Rank 0] step:4701/10000 train_time:346591ms step_avg:73.73ms +[2025-09-02 14:58:54] [Rank 0] step:4721/10000 train_time:348121ms step_avg:73.74ms +[2025-09-02 14:58:54] [Rank 0] step:4721/10000 train_time:348121ms step_avg:73.74ms +[2025-09-02 14:58:55] [Rank 0] step:4741/10000 train_time:349653ms step_avg:73.75ms +[2025-09-02 14:58:55] [Rank 0] step:4741/10000 train_time:349653ms step_avg:73.75ms +[2025-09-02 14:58:57] [Rank 0] step:4761/10000 train_time:351183ms step_avg:73.76ms +[2025-09-02 14:58:57] [Rank 0] step:4761/10000 train_time:351183ms step_avg:73.76ms +[2025-09-02 14:58:58] [Rank 0] step:4781/10000 train_time:352717ms step_avg:73.77ms +[2025-09-02 14:58:58] [Rank 0] step:4781/10000 train_time:352717ms step_avg:73.77ms +[2025-09-02 14:59:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:59:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:59:12] [Rank 0] PRINT: step:4800/10000 val_loss:4.1667 svd_entropy: attn_qk:H=0.7181,top10E=0.30,eRank=126.5,q75/q25=102.01 attn_vo:H=0.8091,top10E=0.17,eRank=241.7,q75/q25=87.71 mlp_w1:H=0.7332,top10E=0.33,eRank=153.4,q75/q25=14.24 mlp_w2:H=0.8471,top10E=0.13,eRank=285.3,q75/q25=30.43 vo_prod:H=0.7178,top10E=0.26,eRank=123.9,q75/q25=9359.71 train_time:354403ms step_avg:73.83ms +[2025-09-02 14:59:12] [Rank 0] PRINT: step:4800/10000 val_loss:4.1667 svd_entropy: attn_qk:H=0.7181,top10E=0.30,eRank=126.5,q75/q25=102.01 attn_vo:H=0.8091,top10E=0.17,eRank=241.7,q75/q25=87.71 mlp_w1:H=0.7332,top10E=0.33,eRank=153.4,q75/q25=14.24 mlp_w2:H=0.8471,top10E=0.13,eRank=285.3,q75/q25=30.43 vo_prod:H=0.7178,top10E=0.26,eRank=123.9,q75/q25=9359.71 train_time:354403ms step_avg:73.83ms +[2025-09-02 14:59:12] [Rank 0] step:4801/10000 train_time:354414ms step_avg:73.82ms +[2025-09-02 14:59:12] [Rank 0] step:4801/10000 train_time:354414ms step_avg:73.82ms +[2025-09-02 14:59:13] [Rank 0] step:4821/10000 train_time:355822ms step_avg:73.81ms +[2025-09-02 14:59:13] [Rank 0] step:4821/10000 train_time:355822ms step_avg:73.81ms +[2025-09-02 14:59:15] [Rank 0] step:4841/10000 train_time:357350ms step_avg:73.82ms +[2025-09-02 14:59:15] [Rank 0] step:4841/10000 train_time:357350ms step_avg:73.82ms +[2025-09-02 14:59:16] [Rank 0] step:4861/10000 train_time:358880ms step_avg:73.83ms +[2025-09-02 14:59:16] [Rank 0] step:4861/10000 train_time:358880ms step_avg:73.83ms +[2025-09-02 14:59:18] [Rank 0] step:4881/10000 train_time:360408ms step_avg:73.84ms +[2025-09-02 14:59:18] [Rank 0] step:4881/10000 train_time:360408ms step_avg:73.84ms +[2025-09-02 14:59:19] [Rank 0] step:4901/10000 train_time:361937ms step_avg:73.85ms +[2025-09-02 14:59:19] [Rank 0] step:4901/10000 train_time:361937ms step_avg:73.85ms +[2025-09-02 14:59:21] [Rank 0] step:4921/10000 train_time:363468ms step_avg:73.86ms +[2025-09-02 14:59:21] [Rank 0] step:4921/10000 train_time:363468ms step_avg:73.86ms +[2025-09-02 14:59:22] [Rank 0] step:4941/10000 train_time:365002ms step_avg:73.87ms +[2025-09-02 14:59:22] [Rank 0] step:4941/10000 train_time:365002ms step_avg:73.87ms +[2025-09-02 14:59:24] [Rank 0] step:4961/10000 train_time:366530ms step_avg:73.88ms +[2025-09-02 14:59:24] [Rank 0] step:4961/10000 train_time:366530ms step_avg:73.88ms +[2025-09-02 14:59:26] [Rank 0] step:4981/10000 train_time:368062ms step_avg:73.89ms +[2025-09-02 14:59:26] [Rank 0] step:4981/10000 train_time:368062ms step_avg:73.89ms +[2025-09-02 14:59:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:59:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:59:39] [Rank 0] PRINT: step:5000/10000 val_loss:4.1454 svd_entropy: attn_qk:H=0.7210,top10E=0.30,eRank=128.7,q75/q25=104.06 attn_vo:H=0.8119,top10E=0.17,eRank=245.5,q75/q25=85.18 mlp_w1:H=0.7369,top10E=0.33,eRank=156.5,q75/q25=14.76 mlp_w2:H=0.8486,top10E=0.13,eRank=288.5,q75/q25=31.17 vo_prod:H=0.7212,top10E=0.26,eRank=126.7,q75/q25=8751.58 train_time:369747ms step_avg:73.95ms +[2025-09-02 14:59:39] [Rank 0] PRINT: step:5000/10000 val_loss:4.1454 svd_entropy: attn_qk:H=0.7210,top10E=0.30,eRank=128.7,q75/q25=104.06 attn_vo:H=0.8119,top10E=0.17,eRank=245.5,q75/q25=85.18 mlp_w1:H=0.7369,top10E=0.33,eRank=156.5,q75/q25=14.76 mlp_w2:H=0.8486,top10E=0.13,eRank=288.5,q75/q25=31.17 vo_prod:H=0.7212,top10E=0.26,eRank=126.7,q75/q25=8751.58 train_time:369747ms step_avg:73.95ms +[2025-09-02 14:59:39] [Rank 0] step:5001/10000 train_time:369758ms step_avg:73.94ms +[2025-09-02 14:59:39] [Rank 0] step:5001/10000 train_time:369758ms step_avg:73.94ms +[2025-09-02 14:59:41] [Rank 0] step:5021/10000 train_time:371155ms step_avg:73.92ms +[2025-09-02 14:59:41] [Rank 0] step:5021/10000 train_time:371155ms step_avg:73.92ms +[2025-09-02 14:59:42] [Rank 0] step:5041/10000 train_time:372686ms step_avg:73.93ms +[2025-09-02 14:59:42] [Rank 0] step:5041/10000 train_time:372686ms step_avg:73.93ms +[2025-09-02 14:59:44] [Rank 0] step:5061/10000 train_time:374213ms step_avg:73.94ms +[2025-09-02 14:59:44] [Rank 0] step:5061/10000 train_time:374213ms step_avg:73.94ms +[2025-09-02 14:59:45] [Rank 0] step:5081/10000 train_time:375743ms step_avg:73.95ms +[2025-09-02 14:59:45] [Rank 0] step:5081/10000 train_time:375743ms step_avg:73.95ms +[2025-09-02 14:59:47] [Rank 0] step:5101/10000 train_time:377272ms step_avg:73.96ms +[2025-09-02 14:59:47] [Rank 0] step:5101/10000 train_time:377272ms step_avg:73.96ms +[2025-09-02 14:59:48] [Rank 0] step:5121/10000 train_time:378809ms step_avg:73.97ms +[2025-09-02 14:59:48] [Rank 0] step:5121/10000 train_time:378809ms step_avg:73.97ms +[2025-09-02 14:59:50] [Rank 0] step:5141/10000 train_time:380342ms step_avg:73.98ms +[2025-09-02 14:59:50] [Rank 0] step:5141/10000 train_time:380342ms step_avg:73.98ms +[2025-09-02 14:59:51] [Rank 0] step:5161/10000 train_time:381872ms step_avg:73.99ms +[2025-09-02 14:59:51] [Rank 0] step:5161/10000 train_time:381872ms step_avg:73.99ms +[2025-09-02 14:59:53] [Rank 0] step:5181/10000 train_time:383406ms step_avg:74.00ms +[2025-09-02 14:59:53] [Rank 0] step:5181/10000 train_time:383406ms step_avg:74.00ms +[2025-09-02 14:59:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 14:59:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:00:06] [Rank 0] PRINT: step:5200/10000 val_loss:4.1241 svd_entropy: attn_qk:H=0.7237,top10E=0.30,eRank=130.8,q75/q25=105.14 attn_vo:H=0.8145,top10E=0.16,eRank=249.1,q75/q25=83.02 mlp_w1:H=0.7402,top10E=0.32,eRank=159.4,q75/q25=15.24 mlp_w2:H=0.8500,top10E=0.13,eRank=291.5,q75/q25=31.64 vo_prod:H=0.7243,top10E=0.26,eRank=129.3,q75/q25=8145.31 train_time:385118ms step_avg:74.06ms +[2025-09-02 15:00:06] [Rank 0] PRINT: step:5200/10000 val_loss:4.1241 svd_entropy: attn_qk:H=0.7237,top10E=0.30,eRank=130.8,q75/q25=105.14 attn_vo:H=0.8145,top10E=0.16,eRank=249.1,q75/q25=83.02 mlp_w1:H=0.7402,top10E=0.32,eRank=159.4,q75/q25=15.24 mlp_w2:H=0.8500,top10E=0.13,eRank=291.5,q75/q25=31.64 vo_prod:H=0.7243,top10E=0.26,eRank=129.3,q75/q25=8145.31 train_time:385118ms step_avg:74.06ms +[2025-09-02 15:00:06] [Rank 0] step:5201/10000 train_time:385129ms step_avg:74.05ms +[2025-09-02 15:00:06] [Rank 0] step:5201/10000 train_time:385129ms step_avg:74.05ms +[2025-09-02 15:00:08] [Rank 0] step:5221/10000 train_time:386555ms step_avg:74.04ms +[2025-09-02 15:00:08] [Rank 0] step:5221/10000 train_time:386555ms step_avg:74.04ms +[2025-09-02 15:00:09] [Rank 0] step:5241/10000 train_time:388115ms step_avg:74.05ms +[2025-09-02 15:00:09] [Rank 0] step:5241/10000 train_time:388115ms step_avg:74.05ms +[2025-09-02 15:00:11] [Rank 0] step:5261/10000 train_time:389676ms step_avg:74.07ms +[2025-09-02 15:00:11] [Rank 0] step:5261/10000 train_time:389676ms step_avg:74.07ms +[2025-09-02 15:00:12] [Rank 0] step:5281/10000 train_time:391240ms step_avg:74.08ms +[2025-09-02 15:00:12] [Rank 0] step:5281/10000 train_time:391240ms step_avg:74.08ms +[2025-09-02 15:00:14] [Rank 0] step:5301/10000 train_time:392947ms step_avg:74.13ms +[2025-09-02 15:00:14] [Rank 0] step:5301/10000 train_time:392947ms step_avg:74.13ms +[2025-09-02 15:00:16] [Rank 0] step:5321/10000 train_time:394374ms step_avg:74.12ms +[2025-09-02 15:00:16] [Rank 0] step:5321/10000 train_time:394374ms step_avg:74.12ms +[2025-09-02 15:00:17] [Rank 0] step:5341/10000 train_time:395936ms step_avg:74.13ms +[2025-09-02 15:00:17] [Rank 0] step:5341/10000 train_time:395936ms step_avg:74.13ms +[2025-09-02 15:00:19] [Rank 0] step:5361/10000 train_time:397503ms step_avg:74.15ms +[2025-09-02 15:00:19] [Rank 0] step:5361/10000 train_time:397503ms step_avg:74.15ms +[2025-09-02 15:00:20] [Rank 0] step:5381/10000 train_time:399069ms step_avg:74.16ms +[2025-09-02 15:00:20] [Rank 0] step:5381/10000 train_time:399069ms step_avg:74.16ms +[2025-09-02 15:00:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:00:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:00:34] [Rank 0] PRINT: step:5400/10000 val_loss:4.1060 svd_entropy: attn_qk:H=0.7264,top10E=0.29,eRank=132.8,q75/q25=106.20 attn_vo:H=0.8169,top10E=0.16,eRank=252.5,q75/q25=80.41 mlp_w1:H=0.7435,top10E=0.32,eRank=162.3,q75/q25=15.76 mlp_w2:H=0.8513,top10E=0.13,eRank=294.3,q75/q25=32.06 vo_prod:H=0.7271,top10E=0.25,eRank=131.7,q75/q25=7635.20 train_time:400789ms step_avg:74.22ms +[2025-09-02 15:00:34] [Rank 0] PRINT: step:5400/10000 val_loss:4.1060 svd_entropy: attn_qk:H=0.7264,top10E=0.29,eRank=132.8,q75/q25=106.20 attn_vo:H=0.8169,top10E=0.16,eRank=252.5,q75/q25=80.41 mlp_w1:H=0.7435,top10E=0.32,eRank=162.3,q75/q25=15.76 mlp_w2:H=0.8513,top10E=0.13,eRank=294.3,q75/q25=32.06 vo_prod:H=0.7271,top10E=0.25,eRank=131.7,q75/q25=7635.20 train_time:400789ms step_avg:74.22ms +[2025-09-02 15:00:34] [Rank 0] step:5401/10000 train_time:400800ms step_avg:74.21ms +[2025-09-02 15:00:34] [Rank 0] step:5401/10000 train_time:400800ms step_avg:74.21ms +[2025-09-02 15:00:35] [Rank 0] step:5421/10000 train_time:402224ms step_avg:74.20ms +[2025-09-02 15:00:35] [Rank 0] step:5421/10000 train_time:402224ms step_avg:74.20ms +[2025-09-02 15:00:37] [Rank 0] step:5441/10000 train_time:403784ms step_avg:74.21ms +[2025-09-02 15:00:37] [Rank 0] step:5441/10000 train_time:403784ms step_avg:74.21ms +[2025-09-02 15:00:38] [Rank 0] step:5461/10000 train_time:405349ms step_avg:74.23ms +[2025-09-02 15:00:38] [Rank 0] step:5461/10000 train_time:405349ms step_avg:74.23ms +[2025-09-02 15:00:40] [Rank 0] step:5481/10000 train_time:406918ms step_avg:74.24ms +[2025-09-02 15:00:40] [Rank 0] step:5481/10000 train_time:406918ms step_avg:74.24ms +[2025-09-02 15:00:42] [Rank 0] step:5501/10000 train_time:408487ms step_avg:74.26ms +[2025-09-02 15:00:42] [Rank 0] step:5501/10000 train_time:408487ms step_avg:74.26ms +[2025-09-02 15:00:43] [Rank 0] step:5521/10000 train_time:410057ms step_avg:74.27ms +[2025-09-02 15:00:43] [Rank 0] step:5521/10000 train_time:410057ms step_avg:74.27ms +[2025-09-02 15:00:45] [Rank 0] step:5541/10000 train_time:411624ms step_avg:74.29ms +[2025-09-02 15:00:45] [Rank 0] step:5541/10000 train_time:411624ms step_avg:74.29ms +[2025-09-02 15:00:46] [Rank 0] step:5561/10000 train_time:413190ms step_avg:74.30ms +[2025-09-02 15:00:46] [Rank 0] step:5561/10000 train_time:413190ms step_avg:74.30ms +[2025-09-02 15:00:48] [Rank 0] step:5581/10000 train_time:414758ms step_avg:74.32ms +[2025-09-02 15:00:48] [Rank 0] step:5581/10000 train_time:414758ms step_avg:74.32ms +[2025-09-02 15:00:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:00:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:01:01] [Rank 0] PRINT: step:5600/10000 val_loss:4.0928 svd_entropy: attn_qk:H=0.7290,top10E=0.29,eRank=134.8,q75/q25=107.20 attn_vo:H=0.8192,top10E=0.16,eRank=255.8,q75/q25=77.84 mlp_w1:H=0.7464,top10E=0.31,eRank=165.0,q75/q25=16.29 mlp_w2:H=0.8526,top10E=0.13,eRank=297.0,q75/q25=32.61 vo_prod:H=0.7299,top10E=0.25,eRank=134.2,q75/q25=7113.96 train_time:416482ms step_avg:74.37ms +[2025-09-02 15:01:01] [Rank 0] PRINT: step:5600/10000 val_loss:4.0928 svd_entropy: attn_qk:H=0.7290,top10E=0.29,eRank=134.8,q75/q25=107.20 attn_vo:H=0.8192,top10E=0.16,eRank=255.8,q75/q25=77.84 mlp_w1:H=0.7464,top10E=0.31,eRank=165.0,q75/q25=16.29 mlp_w2:H=0.8526,top10E=0.13,eRank=297.0,q75/q25=32.61 vo_prod:H=0.7299,top10E=0.25,eRank=134.2,q75/q25=7113.96 train_time:416482ms step_avg:74.37ms +[2025-09-02 15:01:01] [Rank 0] step:5601/10000 train_time:416493ms step_avg:74.36ms +[2025-09-02 15:01:01] [Rank 0] step:5601/10000 train_time:416493ms step_avg:74.36ms +[2025-09-02 15:01:03] [Rank 0] step:5621/10000 train_time:417911ms step_avg:74.35ms +[2025-09-02 15:01:03] [Rank 0] step:5621/10000 train_time:417911ms step_avg:74.35ms +[2025-09-02 15:01:04] [Rank 0] step:5641/10000 train_time:419473ms step_avg:74.36ms +[2025-09-02 15:01:04] [Rank 0] step:5641/10000 train_time:419473ms step_avg:74.36ms +[2025-09-02 15:01:06] [Rank 0] step:5661/10000 train_time:421033ms step_avg:74.37ms +[2025-09-02 15:01:06] [Rank 0] step:5661/10000 train_time:421033ms step_avg:74.37ms +[2025-09-02 15:01:07] [Rank 0] step:5681/10000 train_time:422601ms step_avg:74.39ms +[2025-09-02 15:01:07] [Rank 0] step:5681/10000 train_time:422601ms step_avg:74.39ms +[2025-09-02 15:01:09] [Rank 0] step:5701/10000 train_time:424163ms step_avg:74.40ms +[2025-09-02 15:01:09] [Rank 0] step:5701/10000 train_time:424163ms step_avg:74.40ms +[2025-09-02 15:01:11] [Rank 0] step:5721/10000 train_time:425732ms step_avg:74.42ms +[2025-09-02 15:01:11] [Rank 0] step:5721/10000 train_time:425732ms step_avg:74.42ms +[2025-09-02 15:01:12] [Rank 0] step:5741/10000 train_time:427297ms step_avg:74.43ms +[2025-09-02 15:01:12] [Rank 0] step:5741/10000 train_time:427297ms step_avg:74.43ms +[2025-09-02 15:01:14] [Rank 0] step:5761/10000 train_time:428862ms step_avg:74.44ms +[2025-09-02 15:01:14] [Rank 0] step:5761/10000 train_time:428862ms step_avg:74.44ms +[2025-09-02 15:01:15] [Rank 0] step:5781/10000 train_time:430431ms step_avg:74.46ms +[2025-09-02 15:01:15] [Rank 0] step:5781/10000 train_time:430431ms step_avg:74.46ms +[2025-09-02 15:01:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:01:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:01:28] [Rank 0] PRINT: step:5800/10000 val_loss:4.0822 svd_entropy: attn_qk:H=0.7313,top10E=0.29,eRank=136.7,q75/q25=107.90 attn_vo:H=0.8214,top10E=0.16,eRank=259.0,q75/q25=75.56 mlp_w1:H=0.7493,top10E=0.31,eRank=167.6,q75/q25=16.77 mlp_w2:H=0.8537,top10E=0.12,eRank=299.5,q75/q25=33.14 vo_prod:H=0.7325,top10E=0.25,eRank=136.6,q75/q25=6579.98 train_time:432158ms step_avg:74.51ms +[2025-09-02 15:01:28] [Rank 0] PRINT: step:5800/10000 val_loss:4.0822 svd_entropy: attn_qk:H=0.7313,top10E=0.29,eRank=136.7,q75/q25=107.90 attn_vo:H=0.8214,top10E=0.16,eRank=259.0,q75/q25=75.56 mlp_w1:H=0.7493,top10E=0.31,eRank=167.6,q75/q25=16.77 mlp_w2:H=0.8537,top10E=0.12,eRank=299.5,q75/q25=33.14 vo_prod:H=0.7325,top10E=0.25,eRank=136.6,q75/q25=6579.98 train_time:432158ms step_avg:74.51ms +[2025-09-02 15:01:28] [Rank 0] step:5801/10000 train_time:432169ms step_avg:74.50ms +[2025-09-02 15:01:28] [Rank 0] step:5801/10000 train_time:432169ms step_avg:74.50ms +[2025-09-02 15:01:30] [Rank 0] step:5821/10000 train_time:433599ms step_avg:74.49ms +[2025-09-02 15:01:30] [Rank 0] step:5821/10000 train_time:433599ms step_avg:74.49ms +[2025-09-02 15:01:32] [Rank 0] step:5841/10000 train_time:435158ms step_avg:74.50ms +[2025-09-02 15:01:32] [Rank 0] step:5841/10000 train_time:435158ms step_avg:74.50ms +[2025-09-02 15:01:33] [Rank 0] step:5861/10000 train_time:436725ms step_avg:74.51ms +[2025-09-02 15:01:33] [Rank 0] step:5861/10000 train_time:436725ms step_avg:74.51ms +[2025-09-02 15:01:35] [Rank 0] step:5881/10000 train_time:438290ms step_avg:74.53ms +[2025-09-02 15:01:35] [Rank 0] step:5881/10000 train_time:438290ms step_avg:74.53ms +[2025-09-02 15:01:36] [Rank 0] step:5901/10000 train_time:439853ms step_avg:74.54ms +[2025-09-02 15:01:36] [Rank 0] step:5901/10000 train_time:439853ms step_avg:74.54ms +[2025-09-02 15:01:38] [Rank 0] step:5921/10000 train_time:441417ms step_avg:74.55ms +[2025-09-02 15:01:38] [Rank 0] step:5921/10000 train_time:441417ms step_avg:74.55ms +[2025-09-02 15:01:39] [Rank 0] step:5941/10000 train_time:442985ms step_avg:74.56ms +[2025-09-02 15:01:39] [Rank 0] step:5941/10000 train_time:442985ms step_avg:74.56ms +[2025-09-02 15:01:41] [Rank 0] step:5961/10000 train_time:444554ms step_avg:74.58ms +[2025-09-02 15:01:41] [Rank 0] step:5961/10000 train_time:444554ms step_avg:74.58ms +[2025-09-02 15:01:43] [Rank 0] step:5981/10000 train_time:446121ms step_avg:74.59ms +[2025-09-02 15:01:43] [Rank 0] step:5981/10000 train_time:446121ms step_avg:74.59ms +[2025-09-02 15:01:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:01:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:01:56] [Rank 0] PRINT: step:6000/10000 val_loss:4.0563 svd_entropy: attn_qk:H=0.7336,top10E=0.28,eRank=138.6,q75/q25=109.15 attn_vo:H=0.8235,top10E=0.15,eRank=262.1,q75/q25=73.74 mlp_w1:H=0.7520,top10E=0.30,eRank=170.2,q75/q25=17.27 mlp_w2:H=0.8548,top10E=0.12,eRank=302.0,q75/q25=33.60 vo_prod:H=0.7350,top10E=0.24,eRank=138.9,q75/q25=6078.82 train_time:447843ms step_avg:74.64ms +[2025-09-02 15:01:56] [Rank 0] PRINT: step:6000/10000 val_loss:4.0563 svd_entropy: attn_qk:H=0.7336,top10E=0.28,eRank=138.6,q75/q25=109.15 attn_vo:H=0.8235,top10E=0.15,eRank=262.1,q75/q25=73.74 mlp_w1:H=0.7520,top10E=0.30,eRank=170.2,q75/q25=17.27 mlp_w2:H=0.8548,top10E=0.12,eRank=302.0,q75/q25=33.60 vo_prod:H=0.7350,top10E=0.24,eRank=138.9,q75/q25=6078.82 train_time:447843ms step_avg:74.64ms +[2025-09-02 15:01:56] [Rank 0] step:6001/10000 train_time:447854ms step_avg:74.63ms +[2025-09-02 15:01:56] [Rank 0] step:6001/10000 train_time:447854ms step_avg:74.63ms +[2025-09-02 15:01:58] [Rank 0] step:6021/10000 train_time:449290ms step_avg:74.62ms +[2025-09-02 15:01:58] [Rank 0] step:6021/10000 train_time:449290ms step_avg:74.62ms +[2025-09-02 15:01:59] [Rank 0] step:6041/10000 train_time:450859ms step_avg:74.63ms +[2025-09-02 15:01:59] [Rank 0] step:6041/10000 train_time:450859ms step_avg:74.63ms +[2025-09-02 15:02:01] [Rank 0] step:6061/10000 train_time:452433ms step_avg:74.65ms +[2025-09-02 15:02:01] [Rank 0] step:6061/10000 train_time:452433ms step_avg:74.65ms +[2025-09-02 15:02:02] [Rank 0] step:6081/10000 train_time:454001ms step_avg:74.66ms +[2025-09-02 15:02:02] [Rank 0] step:6081/10000 train_time:454001ms step_avg:74.66ms +[2025-09-02 15:02:04] [Rank 0] step:6101/10000 train_time:455572ms step_avg:74.67ms +[2025-09-02 15:02:04] [Rank 0] step:6101/10000 train_time:455572ms step_avg:74.67ms +[2025-09-02 15:02:06] [Rank 0] step:6121/10000 train_time:457417ms step_avg:74.73ms +[2025-09-02 15:02:06] [Rank 0] step:6121/10000 train_time:457417ms step_avg:74.73ms +[2025-09-02 15:02:07] [Rank 0] step:6141/10000 train_time:459099ms step_avg:74.76ms +[2025-09-02 15:02:07] [Rank 0] step:6141/10000 train_time:459099ms step_avg:74.76ms +[2025-09-02 15:02:09] [Rank 0] step:6161/10000 train_time:460669ms step_avg:74.77ms +[2025-09-02 15:02:09] [Rank 0] step:6161/10000 train_time:460669ms step_avg:74.77ms +[2025-09-02 15:02:11] [Rank 0] step:6181/10000 train_time:462338ms step_avg:74.80ms +[2025-09-02 15:02:11] [Rank 0] step:6181/10000 train_time:462338ms step_avg:74.80ms +[2025-09-02 15:02:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:02:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:02:24] [Rank 0] PRINT: step:6200/10000 val_loss:4.0412 svd_entropy: attn_qk:H=0.7358,top10E=0.28,eRank=140.4,q75/q25=109.50 attn_vo:H=0.8255,top10E=0.15,eRank=265.1,q75/q25=71.10 mlp_w1:H=0.7544,top10E=0.30,eRank=172.6,q75/q25=17.68 mlp_w2:H=0.8560,top10E=0.12,eRank=304.5,q75/q25=33.84 vo_prod:H=0.7375,top10E=0.24,eRank=141.1,q75/q25=5657.43 train_time:464166ms step_avg:74.87ms +[2025-09-02 15:02:24] [Rank 0] PRINT: step:6200/10000 val_loss:4.0412 svd_entropy: attn_qk:H=0.7358,top10E=0.28,eRank=140.4,q75/q25=109.50 attn_vo:H=0.8255,top10E=0.15,eRank=265.1,q75/q25=71.10 mlp_w1:H=0.7544,top10E=0.30,eRank=172.6,q75/q25=17.68 mlp_w2:H=0.8560,top10E=0.12,eRank=304.5,q75/q25=33.84 vo_prod:H=0.7375,top10E=0.24,eRank=141.1,q75/q25=5657.43 train_time:464166ms step_avg:74.87ms +[2025-09-02 15:02:24] [Rank 0] step:6201/10000 train_time:464177ms step_avg:74.86ms +[2025-09-02 15:02:24] [Rank 0] step:6201/10000 train_time:464177ms step_avg:74.86ms +[2025-09-02 15:02:26] [Rank 0] step:6221/10000 train_time:465602ms step_avg:74.84ms +[2025-09-02 15:02:26] [Rank 0] step:6221/10000 train_time:465602ms step_avg:74.84ms +[2025-09-02 15:02:27] [Rank 0] step:6241/10000 train_time:467169ms step_avg:74.85ms +[2025-09-02 15:02:27] [Rank 0] step:6241/10000 train_time:467169ms step_avg:74.85ms +[2025-09-02 15:02:29] [Rank 0] step:6261/10000 train_time:468738ms step_avg:74.87ms +[2025-09-02 15:02:29] [Rank 0] step:6261/10000 train_time:468738ms step_avg:74.87ms +[2025-09-02 15:02:30] [Rank 0] step:6281/10000 train_time:470312ms step_avg:74.88ms +[2025-09-02 15:02:30] [Rank 0] step:6281/10000 train_time:470312ms step_avg:74.88ms +[2025-09-02 15:02:32] [Rank 0] step:6301/10000 train_time:471883ms step_avg:74.89ms +[2025-09-02 15:02:32] [Rank 0] step:6301/10000 train_time:471883ms step_avg:74.89ms +[2025-09-02 15:02:34] [Rank 0] step:6321/10000 train_time:473450ms step_avg:74.90ms +[2025-09-02 15:02:34] [Rank 0] step:6321/10000 train_time:473450ms step_avg:74.90ms +[2025-09-02 15:02:35] [Rank 0] step:6341/10000 train_time:475025ms step_avg:74.91ms +[2025-09-02 15:02:35] [Rank 0] step:6341/10000 train_time:475025ms step_avg:74.91ms +[2025-09-02 15:02:37] [Rank 0] step:6361/10000 train_time:476600ms step_avg:74.93ms +[2025-09-02 15:02:37] [Rank 0] step:6361/10000 train_time:476600ms step_avg:74.93ms +[2025-09-02 15:02:38] [Rank 0] step:6381/10000 train_time:478176ms step_avg:74.94ms +[2025-09-02 15:02:38] [Rank 0] step:6381/10000 train_time:478176ms step_avg:74.94ms +[2025-09-02 15:02:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:02:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:02:52] [Rank 0] PRINT: step:6400/10000 val_loss:4.0246 svd_entropy: attn_qk:H=0.7378,top10E=0.28,eRank=142.1,q75/q25=109.98 attn_vo:H=0.8273,top10E=0.15,eRank=267.7,q75/q25=69.34 mlp_w1:H=0.7567,top10E=0.30,eRank=174.8,q75/q25=18.13 mlp_w2:H=0.8570,top10E=0.12,eRank=306.7,q75/q25=34.18 vo_prod:H=0.7398,top10E=0.24,eRank=143.3,q75/q25=5257.54 train_time:480003ms step_avg:75.00ms +[2025-09-02 15:02:52] [Rank 0] PRINT: step:6400/10000 val_loss:4.0246 svd_entropy: attn_qk:H=0.7378,top10E=0.28,eRank=142.1,q75/q25=109.98 attn_vo:H=0.8273,top10E=0.15,eRank=267.7,q75/q25=69.34 mlp_w1:H=0.7567,top10E=0.30,eRank=174.8,q75/q25=18.13 mlp_w2:H=0.8570,top10E=0.12,eRank=306.7,q75/q25=34.18 vo_prod:H=0.7398,top10E=0.24,eRank=143.3,q75/q25=5257.54 train_time:480003ms step_avg:75.00ms +[2025-09-02 15:02:52] [Rank 0] step:6401/10000 train_time:480014ms step_avg:74.99ms +[2025-09-02 15:02:52] [Rank 0] step:6401/10000 train_time:480014ms step_avg:74.99ms +[2025-09-02 15:02:53] [Rank 0] step:6421/10000 train_time:481439ms step_avg:74.98ms +[2025-09-02 15:02:53] [Rank 0] step:6421/10000 train_time:481439ms step_avg:74.98ms +[2025-09-02 15:02:55] [Rank 0] step:6441/10000 train_time:483009ms step_avg:74.99ms +[2025-09-02 15:02:55] [Rank 0] step:6441/10000 train_time:483009ms step_avg:74.99ms +[2025-09-02 15:02:57] [Rank 0] step:6461/10000 train_time:484582ms step_avg:75.00ms +[2025-09-02 15:02:57] [Rank 0] step:6461/10000 train_time:484582ms step_avg:75.00ms +[2025-09-02 15:02:58] [Rank 0] step:6481/10000 train_time:486162ms step_avg:75.01ms +[2025-09-02 15:02:58] [Rank 0] step:6481/10000 train_time:486162ms step_avg:75.01ms +[2025-09-02 15:03:00] [Rank 0] step:6501/10000 train_time:487729ms step_avg:75.02ms +[2025-09-02 15:03:00] [Rank 0] step:6501/10000 train_time:487729ms step_avg:75.02ms +[2025-09-02 15:03:01] [Rank 0] step:6521/10000 train_time:489298ms step_avg:75.03ms +[2025-09-02 15:03:01] [Rank 0] step:6521/10000 train_time:489298ms step_avg:75.03ms +[2025-09-02 15:03:03] [Rank 0] step:6541/10000 train_time:490869ms step_avg:75.04ms +[2025-09-02 15:03:03] [Rank 0] step:6541/10000 train_time:490869ms step_avg:75.04ms +[2025-09-02 15:03:04] [Rank 0] step:6561/10000 train_time:492445ms step_avg:75.06ms +[2025-09-02 15:03:04] [Rank 0] step:6561/10000 train_time:492445ms step_avg:75.06ms +[2025-09-02 15:03:06] [Rank 0] step:6581/10000 train_time:494117ms step_avg:75.08ms +[2025-09-02 15:03:06] [Rank 0] step:6581/10000 train_time:494117ms step_avg:75.08ms +[2025-09-02 15:03:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:03:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:03:20] [Rank 0] PRINT: step:6600/10000 val_loss:4.0116 svd_entropy: attn_qk:H=0.7396,top10E=0.27,eRank=143.7,q75/q25=110.75 attn_vo:H=0.8289,top10E=0.15,eRank=270.2,q75/q25=67.34 mlp_w1:H=0.7589,top10E=0.29,eRank=177.1,q75/q25=18.46 mlp_w2:H=0.8579,top10E=0.12,eRank=308.9,q75/q25=34.11 vo_prod:H=0.7419,top10E=0.24,eRank=145.3,q75/q25=4826.74 train_time:495948ms step_avg:75.14ms +[2025-09-02 15:03:20] [Rank 0] PRINT: step:6600/10000 val_loss:4.0116 svd_entropy: attn_qk:H=0.7396,top10E=0.27,eRank=143.7,q75/q25=110.75 attn_vo:H=0.8289,top10E=0.15,eRank=270.2,q75/q25=67.34 mlp_w1:H=0.7589,top10E=0.29,eRank=177.1,q75/q25=18.46 mlp_w2:H=0.8579,top10E=0.12,eRank=308.9,q75/q25=34.11 vo_prod:H=0.7419,top10E=0.24,eRank=145.3,q75/q25=4826.74 train_time:495948ms step_avg:75.14ms +[2025-09-02 15:03:20] [Rank 0] step:6601/10000 train_time:495959ms step_avg:75.13ms +[2025-09-02 15:03:20] [Rank 0] step:6601/10000 train_time:495959ms step_avg:75.13ms +[2025-09-02 15:03:21] [Rank 0] step:6621/10000 train_time:497398ms step_avg:75.12ms +[2025-09-02 15:03:21] [Rank 0] step:6621/10000 train_time:497398ms step_avg:75.12ms +[2025-09-02 15:03:23] [Rank 0] step:6641/10000 train_time:498971ms step_avg:75.13ms +[2025-09-02 15:03:23] [Rank 0] step:6641/10000 train_time:498971ms step_avg:75.13ms +[2025-09-02 15:03:24] [Rank 0] step:6661/10000 train_time:500540ms step_avg:75.14ms +[2025-09-02 15:03:24] [Rank 0] step:6661/10000 train_time:500540ms step_avg:75.14ms +[2025-09-02 15:03:26] [Rank 0] step:6681/10000 train_time:502231ms step_avg:75.17ms +[2025-09-02 15:03:26] [Rank 0] step:6681/10000 train_time:502231ms step_avg:75.17ms +[2025-09-02 15:03:28] [Rank 0] step:6701/10000 train_time:503837ms step_avg:75.19ms +[2025-09-02 15:03:28] [Rank 0] step:6701/10000 train_time:503837ms step_avg:75.19ms +[2025-09-02 15:03:29] [Rank 0] step:6721/10000 train_time:505437ms step_avg:75.20ms +[2025-09-02 15:03:29] [Rank 0] step:6721/10000 train_time:505437ms step_avg:75.20ms +[2025-09-02 15:03:31] [Rank 0] step:6741/10000 train_time:507032ms step_avg:75.22ms +[2025-09-02 15:03:31] [Rank 0] step:6741/10000 train_time:507032ms step_avg:75.22ms +[2025-09-02 15:03:33] [Rank 0] step:6761/10000 train_time:508629ms step_avg:75.23ms +[2025-09-02 15:03:33] [Rank 0] step:6761/10000 train_time:508629ms step_avg:75.23ms +[2025-09-02 15:03:34] [Rank 0] step:6781/10000 train_time:510233ms step_avg:75.24ms +[2025-09-02 15:03:34] [Rank 0] step:6781/10000 train_time:510233ms step_avg:75.24ms +[2025-09-02 15:03:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:03:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:03:48] [Rank 0] PRINT: step:6800/10000 val_loss:3.9958 svd_entropy: attn_qk:H=0.7411,top10E=0.27,eRank=145.0,q75/q25=111.02 attn_vo:H=0.8304,top10E=0.15,eRank=272.4,q75/q25=65.91 mlp_w1:H=0.7609,top10E=0.29,eRank=179.1,q75/q25=18.80 mlp_w2:H=0.8588,top10E=0.12,eRank=310.8,q75/q25=34.36 vo_prod:H=0.7438,top10E=0.23,eRank=147.1,q75/q25=4520.51 train_time:511995ms step_avg:75.29ms +[2025-09-02 15:03:48] [Rank 0] PRINT: step:6800/10000 val_loss:3.9958 svd_entropy: attn_qk:H=0.7411,top10E=0.27,eRank=145.0,q75/q25=111.02 attn_vo:H=0.8304,top10E=0.15,eRank=272.4,q75/q25=65.91 mlp_w1:H=0.7609,top10E=0.29,eRank=179.1,q75/q25=18.80 mlp_w2:H=0.8588,top10E=0.12,eRank=310.8,q75/q25=34.36 vo_prod:H=0.7438,top10E=0.23,eRank=147.1,q75/q25=4520.51 train_time:511995ms step_avg:75.29ms +[2025-09-02 15:03:48] [Rank 0] step:6801/10000 train_time:512006ms step_avg:75.28ms +[2025-09-02 15:03:48] [Rank 0] step:6801/10000 train_time:512006ms step_avg:75.28ms +[2025-09-02 15:03:49] [Rank 0] step:6821/10000 train_time:513455ms step_avg:75.28ms +[2025-09-02 15:03:49] [Rank 0] step:6821/10000 train_time:513455ms step_avg:75.28ms +[2025-09-02 15:03:51] [Rank 0] step:6841/10000 train_time:515045ms step_avg:75.29ms +[2025-09-02 15:03:51] [Rank 0] step:6841/10000 train_time:515045ms step_avg:75.29ms +[2025-09-02 15:03:53] [Rank 0] step:6861/10000 train_time:516642ms step_avg:75.30ms +[2025-09-02 15:03:53] [Rank 0] step:6861/10000 train_time:516642ms step_avg:75.30ms +[2025-09-02 15:03:54] [Rank 0] step:6881/10000 train_time:518239ms step_avg:75.31ms +[2025-09-02 15:03:54] [Rank 0] step:6881/10000 train_time:518239ms step_avg:75.31ms +[2025-09-02 15:03:56] [Rank 0] step:6901/10000 train_time:519832ms step_avg:75.33ms +[2025-09-02 15:03:56] [Rank 0] step:6901/10000 train_time:519832ms step_avg:75.33ms +[2025-09-02 15:03:57] [Rank 0] step:6921/10000 train_time:521424ms step_avg:75.34ms +[2025-09-02 15:03:57] [Rank 0] step:6921/10000 train_time:521424ms step_avg:75.34ms +[2025-09-02 15:03:59] [Rank 0] step:6941/10000 train_time:523129ms step_avg:75.37ms +[2025-09-02 15:03:59] [Rank 0] step:6941/10000 train_time:523129ms step_avg:75.37ms +[2025-09-02 15:04:01] [Rank 0] step:6961/10000 train_time:524743ms step_avg:75.38ms +[2025-09-02 15:04:01] [Rank 0] step:6961/10000 train_time:524743ms step_avg:75.38ms +[2025-09-02 15:04:02] [Rank 0] step:6981/10000 train_time:526346ms step_avg:75.40ms +[2025-09-02 15:04:02] [Rank 0] step:6981/10000 train_time:526346ms step_avg:75.40ms +[2025-09-02 15:04:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:04:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:04:16] [Rank 0] PRINT: step:7000/10000 val_loss:3.9786 svd_entropy: attn_qk:H=0.7427,top10E=0.27,eRank=146.4,q75/q25=110.53 attn_vo:H=0.8317,top10E=0.15,eRank=274.5,q75/q25=64.14 mlp_w1:H=0.7627,top10E=0.29,eRank=181.0,q75/q25=19.04 mlp_w2:H=0.8596,top10E=0.12,eRank=312.7,q75/q25=34.29 vo_prod:H=0.7456,top10E=0.23,eRank=148.8,q75/q25=4243.02 train_time:528112ms step_avg:75.44ms +[2025-09-02 15:04:16] [Rank 0] PRINT: step:7000/10000 val_loss:3.9786 svd_entropy: attn_qk:H=0.7427,top10E=0.27,eRank=146.4,q75/q25=110.53 attn_vo:H=0.8317,top10E=0.15,eRank=274.5,q75/q25=64.14 mlp_w1:H=0.7627,top10E=0.29,eRank=181.0,q75/q25=19.04 mlp_w2:H=0.8596,top10E=0.12,eRank=312.7,q75/q25=34.29 vo_prod:H=0.7456,top10E=0.23,eRank=148.8,q75/q25=4243.02 train_time:528112ms step_avg:75.44ms +[2025-09-02 15:04:16] [Rank 0] step:7001/10000 train_time:528123ms step_avg:75.44ms +[2025-09-02 15:04:16] [Rank 0] step:7001/10000 train_time:528123ms step_avg:75.44ms +[2025-09-02 15:04:17] [Rank 0] step:7021/10000 train_time:529570ms step_avg:75.43ms +[2025-09-02 15:04:17] [Rank 0] step:7021/10000 train_time:529570ms step_avg:75.43ms +[2025-09-02 15:04:19] [Rank 0] step:7041/10000 train_time:531168ms step_avg:75.44ms +[2025-09-02 15:04:19] [Rank 0] step:7041/10000 train_time:531168ms step_avg:75.44ms +[2025-09-02 15:04:21] [Rank 0] step:7061/10000 train_time:532766ms step_avg:75.45ms +[2025-09-02 15:04:21] [Rank 0] step:7061/10000 train_time:532766ms step_avg:75.45ms +[2025-09-02 15:04:22] [Rank 0] step:7081/10000 train_time:534365ms step_avg:75.46ms +[2025-09-02 15:04:22] [Rank 0] step:7081/10000 train_time:534365ms step_avg:75.46ms +[2025-09-02 15:04:24] [Rank 0] step:7101/10000 train_time:535965ms step_avg:75.48ms +[2025-09-02 15:04:24] [Rank 0] step:7101/10000 train_time:535965ms step_avg:75.48ms +[2025-09-02 15:04:25] [Rank 0] step:7121/10000 train_time:537564ms step_avg:75.49ms +[2025-09-02 15:04:25] [Rank 0] step:7121/10000 train_time:537564ms step_avg:75.49ms +[2025-09-02 15:04:27] [Rank 0] step:7141/10000 train_time:539162ms step_avg:75.50ms +[2025-09-02 15:04:27] [Rank 0] step:7141/10000 train_time:539162ms step_avg:75.50ms +[2025-09-02 15:04:29] [Rank 0] step:7161/10000 train_time:540797ms step_avg:75.52ms +[2025-09-02 15:04:29] [Rank 0] step:7161/10000 train_time:540797ms step_avg:75.52ms +[2025-09-02 15:04:30] [Rank 0] step:7181/10000 train_time:542398ms step_avg:75.53ms +[2025-09-02 15:04:30] [Rank 0] step:7181/10000 train_time:542398ms step_avg:75.53ms +[2025-09-02 15:04:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:04:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:04:44] [Rank 0] PRINT: step:7200/10000 val_loss:3.9677 svd_entropy: attn_qk:H=0.7441,top10E=0.27,eRank=147.6,q75/q25=111.00 attn_vo:H=0.8330,top10E=0.14,eRank=276.4,q75/q25=62.74 mlp_w1:H=0.7644,top10E=0.29,eRank=182.8,q75/q25=19.29 mlp_w2:H=0.8604,top10E=0.12,eRank=314.4,q75/q25=34.40 vo_prod:H=0.7473,top10E=0.23,eRank=150.4,q75/q25=4017.04 train_time:544165ms step_avg:75.58ms +[2025-09-02 15:04:44] [Rank 0] PRINT: step:7200/10000 val_loss:3.9677 svd_entropy: attn_qk:H=0.7441,top10E=0.27,eRank=147.6,q75/q25=111.00 attn_vo:H=0.8330,top10E=0.14,eRank=276.4,q75/q25=62.74 mlp_w1:H=0.7644,top10E=0.29,eRank=182.8,q75/q25=19.29 mlp_w2:H=0.8604,top10E=0.12,eRank=314.4,q75/q25=34.40 vo_prod:H=0.7473,top10E=0.23,eRank=150.4,q75/q25=4017.04 train_time:544165ms step_avg:75.58ms +[2025-09-02 15:04:44] [Rank 0] step:7201/10000 train_time:544176ms step_avg:75.57ms +[2025-09-02 15:04:44] [Rank 0] step:7201/10000 train_time:544176ms step_avg:75.57ms +[2025-09-02 15:04:46] [Rank 0] step:7221/10000 train_time:545625ms step_avg:75.56ms +[2025-09-02 15:04:46] [Rank 0] step:7221/10000 train_time:545625ms step_avg:75.56ms +[2025-09-02 15:04:47] [Rank 0] step:7241/10000 train_time:547220ms step_avg:75.57ms +[2025-09-02 15:04:47] [Rank 0] step:7241/10000 train_time:547220ms step_avg:75.57ms +[2025-09-02 15:04:49] [Rank 0] step:7261/10000 train_time:548817ms step_avg:75.58ms +[2025-09-02 15:04:49] [Rank 0] step:7261/10000 train_time:548817ms step_avg:75.58ms +[2025-09-02 15:04:50] [Rank 0] step:7281/10000 train_time:550429ms step_avg:75.60ms +[2025-09-02 15:04:50] [Rank 0] step:7281/10000 train_time:550429ms step_avg:75.60ms +[2025-09-02 15:04:52] [Rank 0] step:7301/10000 train_time:552031ms step_avg:75.61ms +[2025-09-02 15:04:52] [Rank 0] step:7301/10000 train_time:552031ms step_avg:75.61ms +[2025-09-02 15:04:54] [Rank 0] step:7321/10000 train_time:553640ms step_avg:75.62ms +[2025-09-02 15:04:54] [Rank 0] step:7321/10000 train_time:553640ms step_avg:75.62ms +[2025-09-02 15:04:55] [Rank 0] step:7341/10000 train_time:555243ms step_avg:75.64ms +[2025-09-02 15:04:55] [Rank 0] step:7341/10000 train_time:555243ms step_avg:75.64ms +[2025-09-02 15:04:57] [Rank 0] step:7361/10000 train_time:556846ms step_avg:75.65ms +[2025-09-02 15:04:57] [Rank 0] step:7361/10000 train_time:556846ms step_avg:75.65ms +[2025-09-02 15:04:58] [Rank 0] step:7381/10000 train_time:558457ms step_avg:75.66ms +[2025-09-02 15:04:58] [Rank 0] step:7381/10000 train_time:558457ms step_avg:75.66ms +[2025-09-02 15:05:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:05:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:05:12] [Rank 0] PRINT: step:7400/10000 val_loss:3.9476 svd_entropy: attn_qk:H=0.7454,top10E=0.27,eRank=148.8,q75/q25=111.57 attn_vo:H=0.8341,top10E=0.14,eRank=278.1,q75/q25=61.36 mlp_w1:H=0.7658,top10E=0.29,eRank=184.3,q75/q25=19.49 mlp_w2:H=0.8611,top10E=0.12,eRank=316.0,q75/q25=34.52 vo_prod:H=0.7488,top10E=0.23,eRank=151.9,q75/q25=3781.99 train_time:560204ms step_avg:75.70ms +[2025-09-02 15:05:12] [Rank 0] PRINT: step:7400/10000 val_loss:3.9476 svd_entropy: attn_qk:H=0.7454,top10E=0.27,eRank=148.8,q75/q25=111.57 attn_vo:H=0.8341,top10E=0.14,eRank=278.1,q75/q25=61.36 mlp_w1:H=0.7658,top10E=0.29,eRank=184.3,q75/q25=19.49 mlp_w2:H=0.8611,top10E=0.12,eRank=316.0,q75/q25=34.52 vo_prod:H=0.7488,top10E=0.23,eRank=151.9,q75/q25=3781.99 train_time:560204ms step_avg:75.70ms +[2025-09-02 15:05:12] [Rank 0] step:7401/10000 train_time:560215ms step_avg:75.69ms +[2025-09-02 15:05:12] [Rank 0] step:7401/10000 train_time:560215ms step_avg:75.69ms +[2025-09-02 15:05:13] [Rank 0] step:7421/10000 train_time:561684ms step_avg:75.69ms +[2025-09-02 15:05:13] [Rank 0] step:7421/10000 train_time:561684ms step_avg:75.69ms +[2025-09-02 15:05:15] [Rank 0] step:7441/10000 train_time:563284ms step_avg:75.70ms +[2025-09-02 15:05:15] [Rank 0] step:7441/10000 train_time:563284ms step_avg:75.70ms +[2025-09-02 15:05:17] [Rank 0] step:7461/10000 train_time:564885ms step_avg:75.71ms +[2025-09-02 15:05:17] [Rank 0] step:7461/10000 train_time:564885ms step_avg:75.71ms +[2025-09-02 15:05:18] [Rank 0] step:7481/10000 train_time:566494ms step_avg:75.72ms +[2025-09-02 15:05:18] [Rank 0] step:7481/10000 train_time:566494ms step_avg:75.72ms +[2025-09-02 15:05:20] [Rank 0] step:7501/10000 train_time:568100ms step_avg:75.74ms +[2025-09-02 15:05:20] [Rank 0] step:7501/10000 train_time:568100ms step_avg:75.74ms +[2025-09-02 15:05:22] [Rank 0] step:7521/10000 train_time:569704ms step_avg:75.75ms +[2025-09-02 15:05:22] [Rank 0] step:7521/10000 train_time:569704ms step_avg:75.75ms +[2025-09-02 15:05:23] [Rank 0] step:7541/10000 train_time:571322ms step_avg:75.76ms +[2025-09-02 15:05:23] [Rank 0] step:7541/10000 train_time:571322ms step_avg:75.76ms +[2025-09-02 15:05:25] [Rank 0] step:7561/10000 train_time:572914ms step_avg:75.77ms +[2025-09-02 15:05:25] [Rank 0] step:7561/10000 train_time:572914ms step_avg:75.77ms +[2025-09-02 15:05:26] [Rank 0] step:7581/10000 train_time:574527ms step_avg:75.79ms +[2025-09-02 15:05:26] [Rank 0] step:7581/10000 train_time:574527ms step_avg:75.79ms +[2025-09-02 15:05:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:05:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:05:40] [Rank 0] PRINT: step:7600/10000 val_loss:3.9449 svd_entropy: attn_qk:H=0.7466,top10E=0.27,eRank=149.8,q75/q25=111.23 attn_vo:H=0.8351,top10E=0.14,eRank=279.6,q75/q25=60.24 mlp_w1:H=0.7673,top10E=0.28,eRank=185.9,q75/q25=19.66 mlp_w2:H=0.8618,top10E=0.12,eRank=317.4,q75/q25=34.48 vo_prod:H=0.7501,top10E=0.23,eRank=153.2,q75/q25=3556.25 train_time:576304ms step_avg:75.83ms +[2025-09-02 15:05:40] [Rank 0] PRINT: step:7600/10000 val_loss:3.9449 svd_entropy: attn_qk:H=0.7466,top10E=0.27,eRank=149.8,q75/q25=111.23 attn_vo:H=0.8351,top10E=0.14,eRank=279.6,q75/q25=60.24 mlp_w1:H=0.7673,top10E=0.28,eRank=185.9,q75/q25=19.66 mlp_w2:H=0.8618,top10E=0.12,eRank=317.4,q75/q25=34.48 vo_prod:H=0.7501,top10E=0.23,eRank=153.2,q75/q25=3556.25 train_time:576304ms step_avg:75.83ms +[2025-09-02 15:05:40] [Rank 0] step:7601/10000 train_time:576315ms step_avg:75.82ms +[2025-09-02 15:05:40] [Rank 0] step:7601/10000 train_time:576315ms step_avg:75.82ms +[2025-09-02 15:05:41] [Rank 0] step:7621/10000 train_time:577758ms step_avg:75.81ms +[2025-09-02 15:05:41] [Rank 0] step:7621/10000 train_time:577758ms step_avg:75.81ms +[2025-09-02 15:05:43] [Rank 0] step:7641/10000 train_time:579360ms step_avg:75.82ms +[2025-09-02 15:05:43] [Rank 0] step:7641/10000 train_time:579360ms step_avg:75.82ms +[2025-09-02 15:05:45] [Rank 0] step:7661/10000 train_time:580968ms step_avg:75.83ms +[2025-09-02 15:05:45] [Rank 0] step:7661/10000 train_time:580968ms step_avg:75.83ms +[2025-09-02 15:05:46] [Rank 0] step:7681/10000 train_time:582570ms step_avg:75.85ms +[2025-09-02 15:05:46] [Rank 0] step:7681/10000 train_time:582570ms step_avg:75.85ms +[2025-09-02 15:05:48] [Rank 0] step:7701/10000 train_time:584171ms step_avg:75.86ms +[2025-09-02 15:05:48] [Rank 0] step:7701/10000 train_time:584171ms step_avg:75.86ms +[2025-09-02 15:05:49] [Rank 0] step:7721/10000 train_time:585785ms step_avg:75.87ms +[2025-09-02 15:05:49] [Rank 0] step:7721/10000 train_time:585785ms step_avg:75.87ms +[2025-09-02 15:05:51] [Rank 0] step:7741/10000 train_time:587392ms step_avg:75.88ms +[2025-09-02 15:05:51] [Rank 0] step:7741/10000 train_time:587392ms step_avg:75.88ms +[2025-09-02 15:05:53] [Rank 0] step:7761/10000 train_time:589001ms step_avg:75.89ms +[2025-09-02 15:05:53] [Rank 0] step:7761/10000 train_time:589001ms step_avg:75.89ms +[2025-09-02 15:05:54] [Rank 0] step:7781/10000 train_time:590613ms step_avg:75.90ms +[2025-09-02 15:05:54] [Rank 0] step:7781/10000 train_time:590613ms step_avg:75.90ms +[2025-09-02 15:05:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:05:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:06:08] [Rank 0] PRINT: step:7800/10000 val_loss:3.9287 svd_entropy: attn_qk:H=0.7477,top10E=0.26,eRank=150.8,q75/q25=111.58 attn_vo:H=0.8360,top10E=0.14,eRank=281.1,q75/q25=59.29 mlp_w1:H=0.7687,top10E=0.28,eRank=187.4,q75/q25=19.84 mlp_w2:H=0.8624,top10E=0.12,eRank=318.9,q75/q25=34.43 vo_prod:H=0.7514,top10E=0.23,eRank=154.5,q75/q25=3372.38 train_time:592395ms step_avg:75.95ms +[2025-09-02 15:06:08] [Rank 0] PRINT: step:7800/10000 val_loss:3.9287 svd_entropy: attn_qk:H=0.7477,top10E=0.26,eRank=150.8,q75/q25=111.58 attn_vo:H=0.8360,top10E=0.14,eRank=281.1,q75/q25=59.29 mlp_w1:H=0.7687,top10E=0.28,eRank=187.4,q75/q25=19.84 mlp_w2:H=0.8624,top10E=0.12,eRank=318.9,q75/q25=34.43 vo_prod:H=0.7514,top10E=0.23,eRank=154.5,q75/q25=3372.38 train_time:592395ms step_avg:75.95ms +[2025-09-02 15:06:08] [Rank 0] step:7801/10000 train_time:592407ms step_avg:75.94ms +[2025-09-02 15:06:08] [Rank 0] step:7801/10000 train_time:592407ms step_avg:75.94ms +[2025-09-02 15:06:09] [Rank 0] step:7821/10000 train_time:593845ms step_avg:75.93ms +[2025-09-02 15:06:09] [Rank 0] step:7821/10000 train_time:593845ms step_avg:75.93ms +[2025-09-02 15:06:11] [Rank 0] step:7841/10000 train_time:595441ms step_avg:75.94ms +[2025-09-02 15:06:11] [Rank 0] step:7841/10000 train_time:595441ms step_avg:75.94ms +[2025-09-02 15:06:13] [Rank 0] step:7861/10000 train_time:597045ms step_avg:75.95ms +[2025-09-02 15:06:13] [Rank 0] step:7861/10000 train_time:597045ms step_avg:75.95ms +[2025-09-02 15:06:14] [Rank 0] step:7881/10000 train_time:598661ms step_avg:75.96ms +[2025-09-02 15:06:14] [Rank 0] step:7881/10000 train_time:598661ms step_avg:75.96ms +[2025-09-02 15:06:16] [Rank 0] step:7901/10000 train_time:600262ms step_avg:75.97ms +[2025-09-02 15:06:16] [Rank 0] step:7901/10000 train_time:600262ms step_avg:75.97ms +[2025-09-02 15:06:17] [Rank 0] step:7921/10000 train_time:601865ms step_avg:75.98ms +[2025-09-02 15:06:17] [Rank 0] step:7921/10000 train_time:601865ms step_avg:75.98ms +[2025-09-02 15:06:19] [Rank 0] step:7941/10000 train_time:603479ms step_avg:76.00ms +[2025-09-02 15:06:19] [Rank 0] step:7941/10000 train_time:603479ms step_avg:76.00ms +[2025-09-02 15:06:21] [Rank 0] step:7961/10000 train_time:605087ms step_avg:76.01ms +[2025-09-02 15:06:21] [Rank 0] step:7961/10000 train_time:605087ms step_avg:76.01ms +[2025-09-02 15:06:22] [Rank 0] step:7981/10000 train_time:606689ms step_avg:76.02ms +[2025-09-02 15:06:22] [Rank 0] step:7981/10000 train_time:606689ms step_avg:76.02ms +[2025-09-02 15:06:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:06:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:06:36] [Rank 0] PRINT: step:8000/10000 val_loss:3.9138 svd_entropy: attn_qk:H=0.7487,top10E=0.26,eRank=151.7,q75/q25=111.26 attn_vo:H=0.8368,top10E=0.14,eRank=282.4,q75/q25=58.25 mlp_w1:H=0.7698,top10E=0.28,eRank=188.6,q75/q25=19.97 mlp_w2:H=0.8630,top10E=0.11,eRank=320.2,q75/q25=34.42 vo_prod:H=0.7526,top10E=0.22,eRank=155.8,q75/q25=3264.99 train_time:608454ms step_avg:76.06ms +[2025-09-02 15:06:36] [Rank 0] PRINT: step:8000/10000 val_loss:3.9138 svd_entropy: attn_qk:H=0.7487,top10E=0.26,eRank=151.7,q75/q25=111.26 attn_vo:H=0.8368,top10E=0.14,eRank=282.4,q75/q25=58.25 mlp_w1:H=0.7698,top10E=0.28,eRank=188.6,q75/q25=19.97 mlp_w2:H=0.8630,top10E=0.11,eRank=320.2,q75/q25=34.42 vo_prod:H=0.7526,top10E=0.22,eRank=155.8,q75/q25=3264.99 train_time:608454ms step_avg:76.06ms +[2025-09-02 15:06:36] [Rank 0] step:8001/10000 train_time:608465ms step_avg:76.05ms +[2025-09-02 15:06:36] [Rank 0] step:8001/10000 train_time:608465ms step_avg:76.05ms +[2025-09-02 15:06:37] [Rank 0] step:8021/10000 train_time:609925ms step_avg:76.04ms +[2025-09-02 15:06:37] [Rank 0] step:8021/10000 train_time:609925ms step_avg:76.04ms +[2025-09-02 15:06:39] [Rank 0] step:8041/10000 train_time:611639ms step_avg:76.07ms +[2025-09-02 15:06:39] [Rank 0] step:8041/10000 train_time:611639ms step_avg:76.07ms +[2025-09-02 15:06:41] [Rank 0] step:8061/10000 train_time:613239ms step_avg:76.07ms +[2025-09-02 15:06:41] [Rank 0] step:8061/10000 train_time:613239ms step_avg:76.07ms +[2025-09-02 15:06:42] [Rank 0] step:8081/10000 train_time:614834ms step_avg:76.08ms +[2025-09-02 15:06:42] [Rank 0] step:8081/10000 train_time:614834ms step_avg:76.08ms +[2025-09-02 15:06:44] [Rank 0] step:8101/10000 train_time:616444ms step_avg:76.09ms +[2025-09-02 15:06:44] [Rank 0] step:8101/10000 train_time:616444ms step_avg:76.09ms +[2025-09-02 15:06:45] [Rank 0] step:8121/10000 train_time:618047ms step_avg:76.10ms +[2025-09-02 15:06:45] [Rank 0] step:8121/10000 train_time:618047ms step_avg:76.10ms +[2025-09-02 15:06:47] [Rank 0] step:8141/10000 train_time:619759ms step_avg:76.13ms +[2025-09-02 15:06:47] [Rank 0] step:8141/10000 train_time:619759ms step_avg:76.13ms +[2025-09-02 15:06:49] [Rank 0] step:8161/10000 train_time:621375ms step_avg:76.14ms +[2025-09-02 15:06:49] [Rank 0] step:8161/10000 train_time:621375ms step_avg:76.14ms +[2025-09-02 15:06:50] [Rank 0] step:8181/10000 train_time:623011ms step_avg:76.15ms +[2025-09-02 15:06:50] [Rank 0] step:8181/10000 train_time:623011ms step_avg:76.15ms +[2025-09-02 15:06:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:06:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:07:04] [Rank 0] PRINT: step:8200/10000 val_loss:3.9042 svd_entropy: attn_qk:H=0.7495,top10E=0.26,eRank=152.5,q75/q25=110.87 attn_vo:H=0.8376,top10E=0.14,eRank=283.6,q75/q25=57.11 mlp_w1:H=0.7709,top10E=0.28,eRank=189.8,q75/q25=20.12 mlp_w2:H=0.8636,top10E=0.11,eRank=321.4,q75/q25=34.35 vo_prod:H=0.7537,top10E=0.22,eRank=156.8,q75/q25=3128.26 train_time:624832ms step_avg:76.20ms +[2025-09-02 15:07:04] [Rank 0] PRINT: step:8200/10000 val_loss:3.9042 svd_entropy: attn_qk:H=0.7495,top10E=0.26,eRank=152.5,q75/q25=110.87 attn_vo:H=0.8376,top10E=0.14,eRank=283.6,q75/q25=57.11 mlp_w1:H=0.7709,top10E=0.28,eRank=189.8,q75/q25=20.12 mlp_w2:H=0.8636,top10E=0.11,eRank=321.4,q75/q25=34.35 vo_prod:H=0.7537,top10E=0.22,eRank=156.8,q75/q25=3128.26 train_time:624832ms step_avg:76.20ms +[2025-09-02 15:07:04] [Rank 0] step:8201/10000 train_time:624843ms step_avg:76.19ms +[2025-09-02 15:07:04] [Rank 0] step:8201/10000 train_time:624843ms step_avg:76.19ms +[2025-09-02 15:07:05] [Rank 0] step:8221/10000 train_time:626344ms step_avg:76.19ms +[2025-09-02 15:07:05] [Rank 0] step:8221/10000 train_time:626344ms step_avg:76.19ms +[2025-09-02 15:07:07] [Rank 0] step:8241/10000 train_time:627985ms step_avg:76.20ms +[2025-09-02 15:07:07] [Rank 0] step:8241/10000 train_time:627985ms step_avg:76.20ms +[2025-09-02 15:07:09] [Rank 0] step:8261/10000 train_time:629616ms step_avg:76.22ms +[2025-09-02 15:07:09] [Rank 0] step:8261/10000 train_time:629616ms step_avg:76.22ms +[2025-09-02 15:07:10] [Rank 0] step:8281/10000 train_time:631250ms step_avg:76.23ms +[2025-09-02 15:07:10] [Rank 0] step:8281/10000 train_time:631250ms step_avg:76.23ms +[2025-09-02 15:07:12] [Rank 0] step:8301/10000 train_time:632888ms step_avg:76.24ms +[2025-09-02 15:07:12] [Rank 0] step:8301/10000 train_time:632888ms step_avg:76.24ms +[2025-09-02 15:07:14] [Rank 0] step:8321/10000 train_time:634506ms step_avg:76.25ms +[2025-09-02 15:07:14] [Rank 0] step:8321/10000 train_time:634506ms step_avg:76.25ms +[2025-09-02 15:07:15] [Rank 0] step:8341/10000 train_time:636140ms step_avg:76.27ms +[2025-09-02 15:07:15] [Rank 0] step:8341/10000 train_time:636140ms step_avg:76.27ms +[2025-09-02 15:07:17] [Rank 0] step:8361/10000 train_time:637777ms step_avg:76.28ms +[2025-09-02 15:07:17] [Rank 0] step:8361/10000 train_time:637777ms step_avg:76.28ms +[2025-09-02 15:07:19] [Rank 0] step:8381/10000 train_time:639419ms step_avg:76.29ms +[2025-09-02 15:07:19] [Rank 0] step:8381/10000 train_time:639419ms step_avg:76.29ms +[2025-09-02 15:07:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:07:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:07:32] [Rank 0] PRINT: step:8400/10000 val_loss:3.8934 svd_entropy: attn_qk:H=0.7503,top10E=0.26,eRank=153.2,q75/q25=111.00 attn_vo:H=0.8383,top10E=0.14,eRank=284.6,q75/q25=56.09 mlp_w1:H=0.7718,top10E=0.28,eRank=190.9,q75/q25=20.25 mlp_w2:H=0.8640,top10E=0.11,eRank=322.5,q75/q25=34.38 vo_prod:H=0.7547,top10E=0.22,eRank=157.8,q75/q25=3013.77 train_time:641216ms step_avg:76.34ms +[2025-09-02 15:07:32] [Rank 0] PRINT: step:8400/10000 val_loss:3.8934 svd_entropy: attn_qk:H=0.7503,top10E=0.26,eRank=153.2,q75/q25=111.00 attn_vo:H=0.8383,top10E=0.14,eRank=284.6,q75/q25=56.09 mlp_w1:H=0.7718,top10E=0.28,eRank=190.9,q75/q25=20.25 mlp_w2:H=0.8640,top10E=0.11,eRank=322.5,q75/q25=34.38 vo_prod:H=0.7547,top10E=0.22,eRank=157.8,q75/q25=3013.77 train_time:641216ms step_avg:76.34ms +[2025-09-02 15:07:32] [Rank 0] step:8401/10000 train_time:641227ms step_avg:76.33ms +[2025-09-02 15:07:32] [Rank 0] step:8401/10000 train_time:641227ms step_avg:76.33ms +[2025-09-02 15:07:34] [Rank 0] step:8421/10000 train_time:642703ms step_avg:76.32ms +[2025-09-02 15:07:34] [Rank 0] step:8421/10000 train_time:642703ms step_avg:76.32ms +[2025-09-02 15:07:35] [Rank 0] step:8441/10000 train_time:644336ms step_avg:76.33ms +[2025-09-02 15:07:35] [Rank 0] step:8441/10000 train_time:644336ms step_avg:76.33ms +[2025-09-02 15:07:37] [Rank 0] step:8461/10000 train_time:645960ms step_avg:76.35ms +[2025-09-02 15:07:37] [Rank 0] step:8461/10000 train_time:645960ms step_avg:76.35ms +[2025-09-02 15:07:39] [Rank 0] step:8481/10000 train_time:647598ms step_avg:76.36ms +[2025-09-02 15:07:39] [Rank 0] step:8481/10000 train_time:647598ms step_avg:76.36ms +[2025-09-02 15:07:40] [Rank 0] step:8501/10000 train_time:649253ms step_avg:76.37ms +[2025-09-02 15:07:40] [Rank 0] step:8501/10000 train_time:649253ms step_avg:76.37ms +[2025-09-02 15:07:42] [Rank 0] step:8521/10000 train_time:650894ms step_avg:76.39ms +[2025-09-02 15:07:42] [Rank 0] step:8521/10000 train_time:650894ms step_avg:76.39ms +[2025-09-02 15:07:44] [Rank 0] step:8541/10000 train_time:652541ms step_avg:76.40ms +[2025-09-02 15:07:44] [Rank 0] step:8541/10000 train_time:652541ms step_avg:76.40ms +[2025-09-02 15:07:45] [Rank 0] step:8561/10000 train_time:654176ms step_avg:76.41ms +[2025-09-02 15:07:45] [Rank 0] step:8561/10000 train_time:654176ms step_avg:76.41ms +[2025-09-02 15:07:47] [Rank 0] step:8581/10000 train_time:655814ms step_avg:76.43ms +[2025-09-02 15:07:47] [Rank 0] step:8581/10000 train_time:655814ms step_avg:76.43ms +[2025-09-02 15:07:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:07:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:08:00] [Rank 0] PRINT: step:8600/10000 val_loss:3.8843 svd_entropy: attn_qk:H=0.7510,top10E=0.26,eRank=153.8,q75/q25=110.73 attn_vo:H=0.8388,top10E=0.14,eRank=285.5,q75/q25=55.37 mlp_w1:H=0.7727,top10E=0.28,eRank=191.9,q75/q25=20.35 mlp_w2:H=0.8645,top10E=0.11,eRank=323.4,q75/q25=34.28 vo_prod:H=0.7554,top10E=0.22,eRank=158.6,q75/q25=2890.53 train_time:657607ms step_avg:76.47ms +[2025-09-02 15:08:00] [Rank 0] PRINT: step:8600/10000 val_loss:3.8843 svd_entropy: attn_qk:H=0.7510,top10E=0.26,eRank=153.8,q75/q25=110.73 attn_vo:H=0.8388,top10E=0.14,eRank=285.5,q75/q25=55.37 mlp_w1:H=0.7727,top10E=0.28,eRank=191.9,q75/q25=20.35 mlp_w2:H=0.8645,top10E=0.11,eRank=323.4,q75/q25=34.28 vo_prod:H=0.7554,top10E=0.22,eRank=158.6,q75/q25=2890.53 train_time:657607ms step_avg:76.47ms +[2025-09-02 15:08:00] [Rank 0] step:8601/10000 train_time:657618ms step_avg:76.46ms +[2025-09-02 15:08:00] [Rank 0] step:8601/10000 train_time:657618ms step_avg:76.46ms +[2025-09-02 15:08:02] [Rank 0] step:8621/10000 train_time:659101ms step_avg:76.45ms +[2025-09-02 15:08:02] [Rank 0] step:8621/10000 train_time:659101ms step_avg:76.45ms +[2025-09-02 15:08:04] [Rank 0] step:8641/10000 train_time:660737ms step_avg:76.47ms +[2025-09-02 15:08:04] [Rank 0] step:8641/10000 train_time:660737ms step_avg:76.47ms +[2025-09-02 15:08:05] [Rank 0] step:8661/10000 train_time:662370ms step_avg:76.48ms +[2025-09-02 15:08:05] [Rank 0] step:8661/10000 train_time:662370ms step_avg:76.48ms +[2025-09-02 15:08:07] [Rank 0] step:8681/10000 train_time:664002ms step_avg:76.49ms +[2025-09-02 15:08:07] [Rank 0] step:8681/10000 train_time:664002ms step_avg:76.49ms +[2025-09-02 15:08:09] [Rank 0] step:8701/10000 train_time:665631ms step_avg:76.50ms +[2025-09-02 15:08:09] [Rank 0] step:8701/10000 train_time:665631ms step_avg:76.50ms +[2025-09-02 15:08:10] [Rank 0] step:8721/10000 train_time:667270ms step_avg:76.51ms +[2025-09-02 15:08:10] [Rank 0] step:8721/10000 train_time:667270ms step_avg:76.51ms +[2025-09-02 15:08:12] [Rank 0] step:8741/10000 train_time:668899ms step_avg:76.52ms +[2025-09-02 15:08:12] [Rank 0] step:8741/10000 train_time:668899ms step_avg:76.52ms +[2025-09-02 15:08:13] [Rank 0] step:8761/10000 train_time:670528ms step_avg:76.54ms +[2025-09-02 15:08:13] [Rank 0] step:8761/10000 train_time:670528ms step_avg:76.54ms +[2025-09-02 15:08:15] [Rank 0] step:8781/10000 train_time:672171ms step_avg:76.55ms +[2025-09-02 15:08:15] [Rank 0] step:8781/10000 train_time:672171ms step_avg:76.55ms +[2025-09-02 15:08:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:08:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:08:29] [Rank 0] PRINT: step:8800/10000 val_loss:3.8753 svd_entropy: attn_qk:H=0.7516,top10E=0.26,eRank=154.4,q75/q25=111.20 attn_vo:H=0.8394,top10E=0.14,eRank=286.4,q75/q25=54.82 mlp_w1:H=0.7735,top10E=0.28,eRank=192.8,q75/q25=20.39 mlp_w2:H=0.8648,top10E=0.11,eRank=324.3,q75/q25=34.35 vo_prod:H=0.7562,top10E=0.22,eRank=159.5,q75/q25=2796.86 train_time:673975ms step_avg:76.59ms +[2025-09-02 15:08:29] [Rank 0] PRINT: step:8800/10000 val_loss:3.8753 svd_entropy: attn_qk:H=0.7516,top10E=0.26,eRank=154.4,q75/q25=111.20 attn_vo:H=0.8394,top10E=0.14,eRank=286.4,q75/q25=54.82 mlp_w1:H=0.7735,top10E=0.28,eRank=192.8,q75/q25=20.39 mlp_w2:H=0.8648,top10E=0.11,eRank=324.3,q75/q25=34.35 vo_prod:H=0.7562,top10E=0.22,eRank=159.5,q75/q25=2796.86 train_time:673975ms step_avg:76.59ms +[2025-09-02 15:08:29] [Rank 0] step:8801/10000 train_time:673986ms step_avg:76.58ms +[2025-09-02 15:08:29] [Rank 0] step:8801/10000 train_time:673986ms step_avg:76.58ms +[2025-09-02 15:08:30] [Rank 0] step:8821/10000 train_time:675461ms step_avg:76.57ms +[2025-09-02 15:08:30] [Rank 0] step:8821/10000 train_time:675461ms step_avg:76.57ms +[2025-09-02 15:08:32] [Rank 0] step:8841/10000 train_time:677113ms step_avg:76.59ms +[2025-09-02 15:08:32] [Rank 0] step:8841/10000 train_time:677113ms step_avg:76.59ms +[2025-09-02 15:08:34] [Rank 0] step:8861/10000 train_time:678742ms step_avg:76.60ms +[2025-09-02 15:08:34] [Rank 0] step:8861/10000 train_time:678742ms step_avg:76.60ms +[2025-09-02 15:08:35] [Rank 0] step:8881/10000 train_time:680373ms step_avg:76.61ms +[2025-09-02 15:08:35] [Rank 0] step:8881/10000 train_time:680373ms step_avg:76.61ms +[2025-09-02 15:08:37] [Rank 0] step:8901/10000 train_time:682012ms step_avg:76.62ms +[2025-09-02 15:08:37] [Rank 0] step:8901/10000 train_time:682012ms step_avg:76.62ms +[2025-09-02 15:08:38] [Rank 0] step:8921/10000 train_time:683653ms step_avg:76.63ms +[2025-09-02 15:08:38] [Rank 0] step:8921/10000 train_time:683653ms step_avg:76.63ms +[2025-09-02 15:08:40] [Rank 0] step:8941/10000 train_time:685299ms step_avg:76.65ms +[2025-09-02 15:08:40] [Rank 0] step:8941/10000 train_time:685299ms step_avg:76.65ms +[2025-09-02 15:08:42] [Rank 0] step:8961/10000 train_time:686950ms step_avg:76.66ms +[2025-09-02 15:08:42] [Rank 0] step:8961/10000 train_time:686950ms step_avg:76.66ms +[2025-09-02 15:08:43] [Rank 0] step:8981/10000 train_time:688579ms step_avg:76.67ms +[2025-09-02 15:08:43] [Rank 0] step:8981/10000 train_time:688579ms step_avg:76.67ms +[2025-09-02 15:08:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:08:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:08:57] [Rank 0] PRINT: step:9000/10000 val_loss:3.8656 svd_entropy: attn_qk:H=0.7521,top10E=0.26,eRank=154.9,q75/q25=110.90 attn_vo:H=0.8398,top10E=0.14,eRank=287.1,q75/q25=54.26 mlp_w1:H=0.7742,top10E=0.27,eRank=193.6,q75/q25=20.47 mlp_w2:H=0.8652,top10E=0.11,eRank=325.0,q75/q25=34.37 vo_prod:H=0.7569,top10E=0.22,eRank=160.2,q75/q25=2711.91 train_time:690370ms step_avg:76.71ms +[2025-09-02 15:08:57] [Rank 0] PRINT: step:9000/10000 val_loss:3.8656 svd_entropy: attn_qk:H=0.7521,top10E=0.26,eRank=154.9,q75/q25=110.90 attn_vo:H=0.8398,top10E=0.14,eRank=287.1,q75/q25=54.26 mlp_w1:H=0.7742,top10E=0.27,eRank=193.6,q75/q25=20.47 mlp_w2:H=0.8652,top10E=0.11,eRank=325.0,q75/q25=34.37 vo_prod:H=0.7569,top10E=0.22,eRank=160.2,q75/q25=2711.91 train_time:690370ms step_avg:76.71ms +[2025-09-02 15:08:57] [Rank 0] step:9001/10000 train_time:690381ms step_avg:76.70ms +[2025-09-02 15:08:57] [Rank 0] step:9001/10000 train_time:690381ms step_avg:76.70ms +[2025-09-02 15:08:58] [Rank 0] step:9021/10000 train_time:691865ms step_avg:76.69ms +[2025-09-02 15:08:58] [Rank 0] step:9021/10000 train_time:691865ms step_avg:76.69ms +[2025-09-02 15:09:00] [Rank 0] step:9041/10000 train_time:693491ms step_avg:76.71ms +[2025-09-02 15:09:00] [Rank 0] step:9041/10000 train_time:693491ms step_avg:76.71ms +[2025-09-02 15:09:02] [Rank 0] step:9061/10000 train_time:695134ms step_avg:76.72ms +[2025-09-02 15:09:02] [Rank 0] step:9061/10000 train_time:695134ms step_avg:76.72ms +[2025-09-02 15:09:03] [Rank 0] step:9081/10000 train_time:696774ms step_avg:76.73ms +[2025-09-02 15:09:03] [Rank 0] step:9081/10000 train_time:696774ms step_avg:76.73ms +[2025-09-02 15:09:05] [Rank 0] step:9101/10000 train_time:698532ms step_avg:76.75ms +[2025-09-02 15:09:05] [Rank 0] step:9101/10000 train_time:698532ms step_avg:76.75ms +[2025-09-02 15:09:07] [Rank 0] step:9121/10000 train_time:700268ms step_avg:76.78ms +[2025-09-02 15:09:07] [Rank 0] step:9121/10000 train_time:700268ms step_avg:76.78ms +[2025-09-02 15:09:08] [Rank 0] step:9141/10000 train_time:701893ms step_avg:76.79ms +[2025-09-02 15:09:08] [Rank 0] step:9141/10000 train_time:701893ms step_avg:76.79ms +[2025-09-02 15:09:10] [Rank 0] step:9161/10000 train_time:703517ms step_avg:76.79ms +[2025-09-02 15:09:10] [Rank 0] step:9161/10000 train_time:703517ms step_avg:76.79ms +[2025-09-02 15:09:12] [Rank 0] step:9181/10000 train_time:705184ms step_avg:76.81ms +[2025-09-02 15:09:12] [Rank 0] step:9181/10000 train_time:705184ms step_avg:76.81ms +[2025-09-02 15:09:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:09:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:09:25] [Rank 0] PRINT: step:9200/10000 val_loss:3.8583 svd_entropy: attn_qk:H=0.7526,top10E=0.26,eRank=155.4,q75/q25=110.89 attn_vo:H=0.8403,top10E=0.14,eRank=287.8,q75/q25=53.78 mlp_w1:H=0.7748,top10E=0.27,eRank=194.3,q75/q25=20.53 mlp_w2:H=0.8655,top10E=0.11,eRank=325.7,q75/q25=34.30 vo_prod:H=0.7575,top10E=0.22,eRank=160.8,q75/q25=2672.67 train_time:706979ms step_avg:76.85ms +[2025-09-02 15:09:25] [Rank 0] PRINT: step:9200/10000 val_loss:3.8583 svd_entropy: attn_qk:H=0.7526,top10E=0.26,eRank=155.4,q75/q25=110.89 attn_vo:H=0.8403,top10E=0.14,eRank=287.8,q75/q25=53.78 mlp_w1:H=0.7748,top10E=0.27,eRank=194.3,q75/q25=20.53 mlp_w2:H=0.8655,top10E=0.11,eRank=325.7,q75/q25=34.30 vo_prod:H=0.7575,top10E=0.22,eRank=160.8,q75/q25=2672.67 train_time:706979ms step_avg:76.85ms +[2025-09-02 15:09:25] [Rank 0] step:9201/10000 train_time:706990ms step_avg:76.84ms +[2025-09-02 15:09:25] [Rank 0] step:9201/10000 train_time:706990ms step_avg:76.84ms +[2025-09-02 15:09:27] [Rank 0] step:9221/10000 train_time:708481ms step_avg:76.83ms +[2025-09-02 15:09:27] [Rank 0] step:9221/10000 train_time:708481ms step_avg:76.83ms +[2025-09-02 15:09:28] [Rank 0] step:9241/10000 train_time:710124ms step_avg:76.84ms +[2025-09-02 15:09:28] [Rank 0] step:9241/10000 train_time:710124ms step_avg:76.84ms +[2025-09-02 15:09:30] [Rank 0] step:9261/10000 train_time:711770ms step_avg:76.86ms +[2025-09-02 15:09:30] [Rank 0] step:9261/10000 train_time:711770ms step_avg:76.86ms +[2025-09-02 15:09:32] [Rank 0] step:9281/10000 train_time:713399ms step_avg:76.87ms +[2025-09-02 15:09:32] [Rank 0] step:9281/10000 train_time:713399ms step_avg:76.87ms +[2025-09-02 15:09:33] [Rank 0] step:9301/10000 train_time:715034ms step_avg:76.88ms +[2025-09-02 15:09:33] [Rank 0] step:9301/10000 train_time:715034ms step_avg:76.88ms +[2025-09-02 15:09:35] [Rank 0] step:9321/10000 train_time:716673ms step_avg:76.89ms +[2025-09-02 15:09:35] [Rank 0] step:9321/10000 train_time:716673ms step_avg:76.89ms +[2025-09-02 15:09:37] [Rank 0] step:9341/10000 train_time:718311ms step_avg:76.90ms +[2025-09-02 15:09:37] [Rank 0] step:9341/10000 train_time:718311ms step_avg:76.90ms +[2025-09-02 15:09:38] [Rank 0] step:9361/10000 train_time:719950ms step_avg:76.91ms +[2025-09-02 15:09:38] [Rank 0] step:9361/10000 train_time:719950ms step_avg:76.91ms +[2025-09-02 15:09:40] [Rank 0] step:9381/10000 train_time:721601ms step_avg:76.92ms +[2025-09-02 15:09:40] [Rank 0] step:9381/10000 train_time:721601ms step_avg:76.92ms +[2025-09-02 15:09:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:09:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:09:53] [Rank 0] PRINT: step:9400/10000 val_loss:3.8507 svd_entropy: attn_qk:H=0.7530,top10E=0.26,eRank=155.7,q75/q25=111.02 attn_vo:H=0.8406,top10E=0.14,eRank=288.3,q75/q25=53.41 mlp_w1:H=0.7753,top10E=0.27,eRank=194.9,q75/q25=20.52 mlp_w2:H=0.8657,top10E=0.11,eRank=326.2,q75/q25=34.28 vo_prod:H=0.7581,top10E=0.22,eRank=161.3,q75/q25=2601.86 train_time:723407ms step_avg:76.96ms +[2025-09-02 15:09:53] [Rank 0] PRINT: step:9400/10000 val_loss:3.8507 svd_entropy: attn_qk:H=0.7530,top10E=0.26,eRank=155.7,q75/q25=111.02 attn_vo:H=0.8406,top10E=0.14,eRank=288.3,q75/q25=53.41 mlp_w1:H=0.7753,top10E=0.27,eRank=194.9,q75/q25=20.52 mlp_w2:H=0.8657,top10E=0.11,eRank=326.2,q75/q25=34.28 vo_prod:H=0.7581,top10E=0.22,eRank=161.3,q75/q25=2601.86 train_time:723407ms step_avg:76.96ms +[2025-09-02 15:09:53] [Rank 0] step:9401/10000 train_time:723418ms step_avg:76.95ms +[2025-09-02 15:09:53] [Rank 0] step:9401/10000 train_time:723418ms step_avg:76.95ms +[2025-09-02 15:09:55] [Rank 0] step:9421/10000 train_time:724910ms step_avg:76.95ms +[2025-09-02 15:09:55] [Rank 0] step:9421/10000 train_time:724910ms step_avg:76.95ms +[2025-09-02 15:09:57] [Rank 0] step:9441/10000 train_time:726544ms step_avg:76.96ms +[2025-09-02 15:09:57] [Rank 0] step:9441/10000 train_time:726544ms step_avg:76.96ms +[2025-09-02 15:09:58] [Rank 0] step:9461/10000 train_time:728185ms step_avg:76.97ms +[2025-09-02 15:09:58] [Rank 0] step:9461/10000 train_time:728185ms step_avg:76.97ms +[2025-09-02 15:10:00] [Rank 0] step:9481/10000 train_time:729821ms step_avg:76.98ms +[2025-09-02 15:10:00] [Rank 0] step:9481/10000 train_time:729821ms step_avg:76.98ms +[2025-09-02 15:10:02] [Rank 0] step:9501/10000 train_time:731472ms step_avg:76.99ms +[2025-09-02 15:10:02] [Rank 0] step:9501/10000 train_time:731472ms step_avg:76.99ms +[2025-09-02 15:10:03] [Rank 0] step:9521/10000 train_time:733104ms step_avg:77.00ms +[2025-09-02 15:10:03] [Rank 0] step:9521/10000 train_time:733104ms step_avg:77.00ms +[2025-09-02 15:10:05] [Rank 0] step:9541/10000 train_time:734741ms step_avg:77.01ms +[2025-09-02 15:10:05] [Rank 0] step:9541/10000 train_time:734741ms step_avg:77.01ms +[2025-09-02 15:10:06] [Rank 0] step:9561/10000 train_time:736371ms step_avg:77.02ms +[2025-09-02 15:10:06] [Rank 0] step:9561/10000 train_time:736371ms step_avg:77.02ms +[2025-09-02 15:10:08] [Rank 0] step:9581/10000 train_time:738109ms step_avg:77.04ms +[2025-09-02 15:10:08] [Rank 0] step:9581/10000 train_time:738109ms step_avg:77.04ms +[2025-09-02 15:10:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:10:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:10:22] [Rank 0] PRINT: step:9600/10000 val_loss:3.8441 svd_entropy: attn_qk:H=0.7533,top10E=0.26,eRank=156.1,q75/q25=111.11 attn_vo:H=0.8409,top10E=0.14,eRank=288.8,q75/q25=53.11 mlp_w1:H=0.7758,top10E=0.27,eRank=195.4,q75/q25=20.55 mlp_w2:H=0.8659,top10E=0.11,eRank=326.7,q75/q25=34.28 vo_prod:H=0.7585,top10E=0.22,eRank=161.8,q75/q25=2558.75 train_time:739921ms step_avg:77.08ms +[2025-09-02 15:10:22] [Rank 0] PRINT: step:9600/10000 val_loss:3.8441 svd_entropy: attn_qk:H=0.7533,top10E=0.26,eRank=156.1,q75/q25=111.11 attn_vo:H=0.8409,top10E=0.14,eRank=288.8,q75/q25=53.11 mlp_w1:H=0.7758,top10E=0.27,eRank=195.4,q75/q25=20.55 mlp_w2:H=0.8659,top10E=0.11,eRank=326.7,q75/q25=34.28 vo_prod:H=0.7585,top10E=0.22,eRank=161.8,q75/q25=2558.75 train_time:739921ms step_avg:77.08ms +[2025-09-02 15:10:22] [Rank 0] step:9601/10000 train_time:739932ms step_avg:77.07ms +[2025-09-02 15:10:22] [Rank 0] step:9601/10000 train_time:739932ms step_avg:77.07ms +[2025-09-02 15:10:23] [Rank 0] step:9621/10000 train_time:741416ms step_avg:77.06ms +[2025-09-02 15:10:23] [Rank 0] step:9621/10000 train_time:741416ms step_avg:77.06ms +[2025-09-02 15:10:25] [Rank 0] step:9641/10000 train_time:743057ms step_avg:77.07ms +[2025-09-02 15:10:25] [Rank 0] step:9641/10000 train_time:743057ms step_avg:77.07ms +[2025-09-02 15:10:27] [Rank 0] step:9661/10000 train_time:744720ms step_avg:77.09ms +[2025-09-02 15:10:27] [Rank 0] step:9661/10000 train_time:744720ms step_avg:77.09ms +[2025-09-02 15:10:28] [Rank 0] step:9681/10000 train_time:746376ms step_avg:77.10ms +[2025-09-02 15:10:28] [Rank 0] step:9681/10000 train_time:746376ms step_avg:77.10ms +[2025-09-02 15:10:30] [Rank 0] step:9701/10000 train_time:748050ms step_avg:77.11ms +[2025-09-02 15:10:30] [Rank 0] step:9701/10000 train_time:748050ms step_avg:77.11ms +[2025-09-02 15:10:32] [Rank 0] step:9721/10000 train_time:749704ms step_avg:77.12ms +[2025-09-02 15:10:32] [Rank 0] step:9721/10000 train_time:749704ms step_avg:77.12ms +[2025-09-02 15:10:33] [Rank 0] step:9741/10000 train_time:751386ms step_avg:77.14ms +[2025-09-02 15:10:33] [Rank 0] step:9741/10000 train_time:751386ms step_avg:77.14ms +[2025-09-02 15:10:35] [Rank 0] step:9761/10000 train_time:753048ms step_avg:77.15ms +[2025-09-02 15:10:35] [Rank 0] step:9761/10000 train_time:753048ms step_avg:77.15ms +[2025-09-02 15:10:37] [Rank 0] step:9781/10000 train_time:754722ms step_avg:77.16ms +[2025-09-02 15:10:37] [Rank 0] step:9781/10000 train_time:754722ms step_avg:77.16ms +[2025-09-02 15:10:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:10:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:10:50] [Rank 0] PRINT: step:9800/10000 val_loss:3.8383 svd_entropy: attn_qk:H=0.7535,top10E=0.26,eRank=156.3,q75/q25=111.06 attn_vo:H=0.8411,top10E=0.14,eRank=289.1,q75/q25=52.95 mlp_w1:H=0.7761,top10E=0.27,eRank=195.8,q75/q25=20.55 mlp_w2:H=0.8661,top10E=0.11,eRank=327.1,q75/q25=34.27 vo_prod:H=0.7589,top10E=0.22,eRank=162.2,q75/q25=2526.94 train_time:756570ms step_avg:77.20ms +[2025-09-02 15:10:50] [Rank 0] PRINT: step:9800/10000 val_loss:3.8383 svd_entropy: attn_qk:H=0.7535,top10E=0.26,eRank=156.3,q75/q25=111.06 attn_vo:H=0.8411,top10E=0.14,eRank=289.1,q75/q25=52.95 mlp_w1:H=0.7761,top10E=0.27,eRank=195.8,q75/q25=20.55 mlp_w2:H=0.8661,top10E=0.11,eRank=327.1,q75/q25=34.27 vo_prod:H=0.7589,top10E=0.22,eRank=162.2,q75/q25=2526.94 train_time:756570ms step_avg:77.20ms +[2025-09-02 15:10:50] [Rank 0] step:9801/10000 train_time:756581ms step_avg:77.19ms +[2025-09-02 15:10:50] [Rank 0] step:9801/10000 train_time:756581ms step_avg:77.19ms +[2025-09-02 15:10:52] [Rank 0] step:9821/10000 train_time:758084ms step_avg:77.19ms +[2025-09-02 15:10:52] [Rank 0] step:9821/10000 train_time:758084ms step_avg:77.19ms +[2025-09-02 15:10:53] [Rank 0] step:9841/10000 train_time:759762ms step_avg:77.20ms +[2025-09-02 15:10:53] [Rank 0] step:9841/10000 train_time:759762ms step_avg:77.20ms +[2025-09-02 15:10:55] [Rank 0] step:9861/10000 train_time:761413ms step_avg:77.21ms +[2025-09-02 15:10:55] [Rank 0] step:9861/10000 train_time:761413ms step_avg:77.21ms +[2025-09-02 15:10:57] [Rank 0] step:9881/10000 train_time:763065ms step_avg:77.23ms +[2025-09-02 15:10:57] [Rank 0] step:9881/10000 train_time:763065ms step_avg:77.23ms +[2025-09-02 15:10:58] [Rank 0] step:9901/10000 train_time:764735ms step_avg:77.24ms +[2025-09-02 15:10:58] [Rank 0] step:9901/10000 train_time:764735ms step_avg:77.24ms +[2025-09-02 15:11:00] [Rank 0] step:9921/10000 train_time:766394ms step_avg:77.25ms +[2025-09-02 15:11:00] [Rank 0] step:9921/10000 train_time:766394ms step_avg:77.25ms +[2025-09-02 15:11:02] [Rank 0] step:9941/10000 train_time:768063ms step_avg:77.26ms +[2025-09-02 15:11:02] [Rank 0] step:9941/10000 train_time:768063ms step_avg:77.26ms +[2025-09-02 15:11:03] [Rank 0] step:9961/10000 train_time:769725ms step_avg:77.27ms +[2025-09-02 15:11:03] [Rank 0] step:9961/10000 train_time:769725ms step_avg:77.27ms +[2025-09-02 15:11:05] [Rank 0] step:9981/10000 train_time:771386ms step_avg:77.29ms +[2025-09-02 15:11:05] [Rank 0] step:9981/10000 train_time:771386ms step_avg:77.29ms +[2025-09-02 15:11:07] [Rank 0] step:10000/10000 train_time:772971ms step_avg:77.30ms +[2025-09-02 15:11:07] [Rank 0] step:10000/10000 train_time:772971ms step_avg:77.30ms +[2025-09-02 15:11:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:11:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 15:11:19] [Rank 0] PRINT: step:10000/10000 val_loss:3.8326 svd_entropy: attn_qk:H=0.7537,top10E=0.26,eRank=156.5,q75/q25=111.21 attn_vo:H=0.8413,top10E=0.14,eRank=289.4,q75/q25=52.74 mlp_w1:H=0.7763,top10E=0.27,eRank=196.1,q75/q25=20.56 mlp_w2:H=0.8662,top10E=0.11,eRank=327.4,q75/q25=34.23 vo_prod:H=0.7591,top10E=0.22,eRank=162.5,q75/q25=2504.31 train_time:773229ms step_avg:77.32ms +[2025-09-02 15:11:19] [Rank 0] PRINT: step:10000/10000 val_loss:3.8326 svd_entropy: attn_qk:H=0.7537,top10E=0.26,eRank=156.5,q75/q25=111.21 attn_vo:H=0.8413,top10E=0.14,eRank=289.4,q75/q25=52.74 mlp_w1:H=0.7763,top10E=0.27,eRank=196.1,q75/q25=20.56 mlp_w2:H=0.8662,top10E=0.11,eRank=327.4,q75/q25=34.23 vo_prod:H=0.7591,top10E=0.22,eRank=162.5,q75/q25=2504.31 train_time:773229ms step_avg:77.32ms +[2025-09-02 15:11:19] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 15:11:19 2025 --- +[2025-09-02 15:11:19] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 15:11:19 2025 --- +[2025-09-02 15:11:19] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14436 MiB +[2025-09-02 15:11:19] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14436 MiB diff --git a/logs_svd_qkvo/mode_15_param_qkvo_seed_47/config.json b/logs_svd_qkvo/mode_15_param_qkvo_seed_47/config.json new file mode 100644 index 0000000000000000000000000000000000000000..09d0923c87e91027df39ef44e8994c52ab46e5a2 --- /dev/null +++ b/logs_svd_qkvo/mode_15_param_qkvo_seed_47/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 47, + "optimizer_mode": 15, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "ae12d283-7f6e-4ff0-bb1f-a564a8f95b6c", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_15_param_qkvo_seed_47/training_log_ae12d283-7f6e-4ff0-bb1f-a564a8f95b6c.txt b/logs_svd_qkvo/mode_15_param_qkvo_seed_47/training_log_ae12d283-7f6e-4ff0-bb1f-a564a8f95b6c.txt new file mode 100644 index 0000000000000000000000000000000000000000..4080e3db0f942a3d93698ad5c195383502c3680a --- /dev/null +++ b/logs_svd_qkvo/mode_15_param_qkvo_seed_47/training_log_ae12d283-7f6e-4ff0-bb1f-a564a8f95b6c.txt @@ -0,0 +1,2984 @@ +[2025-09-02 16:00:13] [Rank 0] PRINT: --- Script Start: Tue Sep 2 16:00:13 2025 --- +[2025-09-02 16:00:13] [Rank 0] PRINT: --- Script Start: Tue Sep 2 16:00:13 2025 --- +[2025-09-02 16:00:13] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=15, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 16:00:13] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=15, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 16:00:13] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 16:00:13] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 16:00:13] [Rank 0] PRINT: Using fixed seed: 47 +[2025-09-02 16:00:13] [Rank 0] PRINT: Using fixed seed: 47 +[2025-09-02 16:00:13] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_15_param_qkvo_seed_47 +[2025-09-02 16:00:13] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_15_param_qkvo_seed_47 +[2025-09-02 16:00:13] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 16:00:13] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 16:00:13] [Rank 0] PRINT: Constructing model... +[2025-09-02 16:00:13] [Rank 0] PRINT: Constructing model... +[2025-09-02 16:00:15] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 16:00:15] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 16:00:15] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 16:00:15] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 16:00:15] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 16:00:15] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 16:00:15] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 15 +[2025-09-02 16:00:15] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 15 +[2025-09-02 16:00:15] [Rank 0] PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 16:00:15] [Rank 0] PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 16:00:15] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 16:00:15] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 16:00:15] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 16:00:15] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 16:00:15] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 16:00:15] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 16:00:15] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 16:00:15] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 16:00:15] [Rank 0] PRINT: Starting warmup... +[2025-09-02 16:00:15] [Rank 0] PRINT: Starting warmup... +[2025-09-02 16:00:59] [Rank 0] PRINT: Warmup complete. +[2025-09-02 16:00:59] [Rank 0] PRINT: Warmup complete. +[2025-09-02 16:00:59] [Rank 0] PRINT: Starting training... +[2025-09-02 16:00:59] [Rank 0] PRINT: Starting training... +[2025-09-02 16:00:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:00:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:02:22] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.28 attn_vo:H=0.4624,top10E=0.02,eRank=232.9,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 16:02:22] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.28 attn_vo:H=0.4624,top10E=0.02,eRank=232.9,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 16:02:23] [Rank 0] step:21/10000 train_time:1311ms step_avg:62.45ms +[2025-09-02 16:02:23] [Rank 0] step:21/10000 train_time:1311ms step_avg:62.45ms +[2025-09-02 16:02:25] [Rank 0] step:41/10000 train_time:2808ms step_avg:68.49ms +[2025-09-02 16:02:25] [Rank 0] step:41/10000 train_time:2808ms step_avg:68.49ms +[2025-09-02 16:02:26] [Rank 0] step:61/10000 train_time:4156ms step_avg:68.13ms +[2025-09-02 16:02:26] [Rank 0] step:61/10000 train_time:4156ms step_avg:68.13ms +[2025-09-02 16:02:28] [Rank 0] step:81/10000 train_time:5555ms step_avg:68.57ms +[2025-09-02 16:02:28] [Rank 0] step:81/10000 train_time:5555ms step_avg:68.57ms +[2025-09-02 16:02:29] [Rank 0] step:101/10000 train_time:6954ms step_avg:68.85ms +[2025-09-02 16:02:29] [Rank 0] step:101/10000 train_time:6954ms step_avg:68.85ms +[2025-09-02 16:02:31] [Rank 0] step:121/10000 train_time:8354ms step_avg:69.04ms +[2025-09-02 16:02:31] [Rank 0] step:121/10000 train_time:8354ms step_avg:69.04ms +[2025-09-02 16:02:32] [Rank 0] step:141/10000 train_time:9755ms step_avg:69.19ms +[2025-09-02 16:02:32] [Rank 0] step:141/10000 train_time:9755ms step_avg:69.19ms +[2025-09-02 16:02:33] [Rank 0] step:161/10000 train_time:11158ms step_avg:69.31ms +[2025-09-02 16:02:33] [Rank 0] step:161/10000 train_time:11158ms step_avg:69.31ms +[2025-09-02 16:02:35] [Rank 0] step:181/10000 train_time:12560ms step_avg:69.39ms +[2025-09-02 16:02:35] [Rank 0] step:181/10000 train_time:12560ms step_avg:69.39ms +[2025-09-02 16:02:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:02:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:02:48] [Rank 0] PRINT: step:200/10000 val_loss:6.4038 svd_entropy: attn_qk:H=0.4461,top10E=0.80,eRank=37.4,q75/q25=12.23 attn_vo:H=0.5830,top10E=0.60,eRank=117.8,q75/q25=69.08 mlp_w1:H=0.4135,top10E=0.74,eRank=25.2,q75/q25=2.75 mlp_w2:H=0.1678,top10E=0.94,eRank=4.7,q75/q25=593.64 vo_prod:H=0.3133,top10E=0.94,eRank=9.2,q75/q25=448.83 train_time:14102ms step_avg:70.51ms +[2025-09-02 16:02:48] [Rank 0] PRINT: step:200/10000 val_loss:6.4038 svd_entropy: attn_qk:H=0.4461,top10E=0.80,eRank=37.4,q75/q25=12.23 attn_vo:H=0.5830,top10E=0.60,eRank=117.8,q75/q25=69.08 mlp_w1:H=0.4135,top10E=0.74,eRank=25.2,q75/q25=2.75 mlp_w2:H=0.1678,top10E=0.94,eRank=4.7,q75/q25=593.64 vo_prod:H=0.3133,top10E=0.94,eRank=9.2,q75/q25=448.83 train_time:14102ms step_avg:70.51ms +[2025-09-02 16:02:48] [Rank 0] step:201/10000 train_time:14113ms step_avg:70.21ms +[2025-09-02 16:02:48] [Rank 0] step:201/10000 train_time:14113ms step_avg:70.21ms +[2025-09-02 16:02:49] [Rank 0] step:221/10000 train_time:15386ms step_avg:69.62ms +[2025-09-02 16:02:49] [Rank 0] step:221/10000 train_time:15386ms step_avg:69.62ms +[2025-09-02 16:02:51] [Rank 0] step:241/10000 train_time:16787ms step_avg:69.65ms +[2025-09-02 16:02:51] [Rank 0] step:241/10000 train_time:16787ms step_avg:69.65ms +[2025-09-02 16:02:52] [Rank 0] step:261/10000 train_time:18189ms step_avg:69.69ms +[2025-09-02 16:02:52] [Rank 0] step:261/10000 train_time:18189ms step_avg:69.69ms +[2025-09-02 16:02:54] [Rank 0] step:281/10000 train_time:19592ms step_avg:69.72ms +[2025-09-02 16:02:54] [Rank 0] step:281/10000 train_time:19592ms step_avg:69.72ms +[2025-09-02 16:02:55] [Rank 0] step:301/10000 train_time:20996ms step_avg:69.75ms +[2025-09-02 16:02:55] [Rank 0] step:301/10000 train_time:20996ms step_avg:69.75ms +[2025-09-02 16:02:56] [Rank 0] step:321/10000 train_time:22401ms step_avg:69.79ms +[2025-09-02 16:02:56] [Rank 0] step:321/10000 train_time:22401ms step_avg:69.79ms +[2025-09-02 16:02:58] [Rank 0] step:341/10000 train_time:23808ms step_avg:69.82ms +[2025-09-02 16:02:58] [Rank 0] step:341/10000 train_time:23808ms step_avg:69.82ms +[2025-09-02 16:02:59] [Rank 0] step:361/10000 train_time:25214ms step_avg:69.85ms +[2025-09-02 16:02:59] [Rank 0] step:361/10000 train_time:25214ms step_avg:69.85ms +[2025-09-02 16:03:01] [Rank 0] step:381/10000 train_time:26621ms step_avg:69.87ms +[2025-09-02 16:03:01] [Rank 0] step:381/10000 train_time:26621ms step_avg:69.87ms +[2025-09-02 16:03:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:03:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:03:14] [Rank 0] PRINT: step:400/10000 val_loss:5.9085 svd_entropy: attn_qk:H=0.5052,top10E=0.69,eRank=46.2,q75/q25=13.52 attn_vo:H=0.6148,top10E=0.47,eRank=99.5,q75/q25=29.51 mlp_w1:H=0.4484,top10E=0.69,eRank=40.1,q75/q25=3.12 mlp_w2:H=0.5177,top10E=0.62,eRank=35.1,q75/q25=15.32 vo_prod:H=0.4570,top10E=0.73,eRank=22.0,q75/q25=219.87 train_time:28169ms step_avg:70.42ms +[2025-09-02 16:03:14] [Rank 0] PRINT: step:400/10000 val_loss:5.9085 svd_entropy: attn_qk:H=0.5052,top10E=0.69,eRank=46.2,q75/q25=13.52 attn_vo:H=0.6148,top10E=0.47,eRank=99.5,q75/q25=29.51 mlp_w1:H=0.4484,top10E=0.69,eRank=40.1,q75/q25=3.12 mlp_w2:H=0.5177,top10E=0.62,eRank=35.1,q75/q25=15.32 vo_prod:H=0.4570,top10E=0.73,eRank=22.0,q75/q25=219.87 train_time:28169ms step_avg:70.42ms +[2025-09-02 16:03:14] [Rank 0] step:401/10000 train_time:28179ms step_avg:70.27ms +[2025-09-02 16:03:14] [Rank 0] step:401/10000 train_time:28179ms step_avg:70.27ms +[2025-09-02 16:03:15] [Rank 0] step:421/10000 train_time:29464ms step_avg:69.99ms +[2025-09-02 16:03:15] [Rank 0] step:421/10000 train_time:29464ms step_avg:69.99ms +[2025-09-02 16:03:16] [Rank 0] step:441/10000 train_time:30869ms step_avg:70.00ms +[2025-09-02 16:03:16] [Rank 0] step:441/10000 train_time:30869ms step_avg:70.00ms +[2025-09-02 16:03:18] [Rank 0] step:461/10000 train_time:32275ms step_avg:70.01ms +[2025-09-02 16:03:18] [Rank 0] step:461/10000 train_time:32275ms step_avg:70.01ms +[2025-09-02 16:03:19] [Rank 0] step:481/10000 train_time:33681ms step_avg:70.02ms +[2025-09-02 16:03:19] [Rank 0] step:481/10000 train_time:33681ms step_avg:70.02ms +[2025-09-02 16:03:21] [Rank 0] step:501/10000 train_time:35087ms step_avg:70.03ms +[2025-09-02 16:03:21] [Rank 0] step:501/10000 train_time:35087ms step_avg:70.03ms +[2025-09-02 16:03:22] [Rank 0] step:521/10000 train_time:36493ms step_avg:70.04ms +[2025-09-02 16:03:22] [Rank 0] step:521/10000 train_time:36493ms step_avg:70.04ms +[2025-09-02 16:03:24] [Rank 0] step:541/10000 train_time:37902ms step_avg:70.06ms +[2025-09-02 16:03:24] [Rank 0] step:541/10000 train_time:37902ms step_avg:70.06ms +[2025-09-02 16:03:25] [Rank 0] step:561/10000 train_time:39309ms step_avg:70.07ms +[2025-09-02 16:03:25] [Rank 0] step:561/10000 train_time:39309ms step_avg:70.07ms +[2025-09-02 16:03:26] [Rank 0] step:581/10000 train_time:40716ms step_avg:70.08ms +[2025-09-02 16:03:26] [Rank 0] step:581/10000 train_time:40716ms step_avg:70.08ms +[2025-09-02 16:03:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:03:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:03:40] [Rank 0] PRINT: step:600/10000 val_loss:5.6207 svd_entropy: attn_qk:H=0.5441,top10E=0.61,eRank=53.6,q75/q25=15.19 attn_vo:H=0.6474,top10E=0.39,eRank=109.1,q75/q25=26.84 mlp_w1:H=0.4826,top10E=0.65,eRank=50.4,q75/q25=3.40 mlp_w2:H=0.6149,top10E=0.48,eRank=64.5,q75/q25=11.17 vo_prod:H=0.5199,top10E=0.60,eRank=33.1,q75/q25=275.62 train_time:42265ms step_avg:70.44ms +[2025-09-02 16:03:40] [Rank 0] PRINT: step:600/10000 val_loss:5.6207 svd_entropy: attn_qk:H=0.5441,top10E=0.61,eRank=53.6,q75/q25=15.19 attn_vo:H=0.6474,top10E=0.39,eRank=109.1,q75/q25=26.84 mlp_w1:H=0.4826,top10E=0.65,eRank=50.4,q75/q25=3.40 mlp_w2:H=0.6149,top10E=0.48,eRank=64.5,q75/q25=11.17 vo_prod:H=0.5199,top10E=0.60,eRank=33.1,q75/q25=275.62 train_time:42265ms step_avg:70.44ms +[2025-09-02 16:03:40] [Rank 0] step:601/10000 train_time:42276ms step_avg:70.34ms +[2025-09-02 16:03:40] [Rank 0] step:601/10000 train_time:42276ms step_avg:70.34ms +[2025-09-02 16:03:41] [Rank 0] step:621/10000 train_time:43570ms step_avg:70.16ms +[2025-09-02 16:03:41] [Rank 0] step:621/10000 train_time:43570ms step_avg:70.16ms +[2025-09-02 16:03:42] [Rank 0] step:641/10000 train_time:44973ms step_avg:70.16ms +[2025-09-02 16:03:42] [Rank 0] step:641/10000 train_time:44973ms step_avg:70.16ms +[2025-09-02 16:03:44] [Rank 0] step:661/10000 train_time:46377ms step_avg:70.16ms +[2025-09-02 16:03:44] [Rank 0] step:661/10000 train_time:46377ms step_avg:70.16ms +[2025-09-02 16:03:45] [Rank 0] step:681/10000 train_time:47781ms step_avg:70.16ms +[2025-09-02 16:03:45] [Rank 0] step:681/10000 train_time:47781ms step_avg:70.16ms +[2025-09-02 16:03:47] [Rank 0] step:701/10000 train_time:49186ms step_avg:70.17ms +[2025-09-02 16:03:47] [Rank 0] step:701/10000 train_time:49186ms step_avg:70.17ms +[2025-09-02 16:03:48] [Rank 0] step:721/10000 train_time:50591ms step_avg:70.17ms +[2025-09-02 16:03:48] [Rank 0] step:721/10000 train_time:50591ms step_avg:70.17ms +[2025-09-02 16:03:49] [Rank 0] step:741/10000 train_time:51997ms step_avg:70.17ms +[2025-09-02 16:03:49] [Rank 0] step:741/10000 train_time:51997ms step_avg:70.17ms +[2025-09-02 16:03:51] [Rank 0] step:761/10000 train_time:53413ms step_avg:70.19ms +[2025-09-02 16:03:51] [Rank 0] step:761/10000 train_time:53413ms step_avg:70.19ms +[2025-09-02 16:03:52] [Rank 0] step:781/10000 train_time:54833ms step_avg:70.21ms +[2025-09-02 16:03:52] [Rank 0] step:781/10000 train_time:54833ms step_avg:70.21ms +[2025-09-02 16:03:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:03:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:04:06] [Rank 0] PRINT: step:800/10000 val_loss:5.3988 svd_entropy: attn_qk:H=0.5725,top10E=0.55,eRank=60.2,q75/q25=17.43 attn_vo:H=0.6720,top10E=0.35,eRank=119.6,q75/q25=31.91 mlp_w1:H=0.5131,top10E=0.62,eRank=58.1,q75/q25=3.67 mlp_w2:H=0.6702,top10E=0.40,eRank=90.2,q75/q25=10.07 vo_prod:H=0.5553,top10E=0.53,eRank=41.7,q75/q25=555.74 train_time:56394ms step_avg:70.49ms +[2025-09-02 16:04:06] [Rank 0] PRINT: step:800/10000 val_loss:5.3988 svd_entropy: attn_qk:H=0.5725,top10E=0.55,eRank=60.2,q75/q25=17.43 attn_vo:H=0.6720,top10E=0.35,eRank=119.6,q75/q25=31.91 mlp_w1:H=0.5131,top10E=0.62,eRank=58.1,q75/q25=3.67 mlp_w2:H=0.6702,top10E=0.40,eRank=90.2,q75/q25=10.07 vo_prod:H=0.5553,top10E=0.53,eRank=41.7,q75/q25=555.74 train_time:56394ms step_avg:70.49ms +[2025-09-02 16:04:06] [Rank 0] step:801/10000 train_time:56405ms step_avg:70.42ms +[2025-09-02 16:04:06] [Rank 0] step:801/10000 train_time:56405ms step_avg:70.42ms +[2025-09-02 16:04:07] [Rank 0] step:821/10000 train_time:57703ms step_avg:70.28ms +[2025-09-02 16:04:07] [Rank 0] step:821/10000 train_time:57703ms step_avg:70.28ms +[2025-09-02 16:04:08] [Rank 0] step:841/10000 train_time:59119ms step_avg:70.30ms +[2025-09-02 16:04:08] [Rank 0] step:841/10000 train_time:59119ms step_avg:70.30ms +[2025-09-02 16:04:10] [Rank 0] step:861/10000 train_time:60538ms step_avg:70.31ms +[2025-09-02 16:04:10] [Rank 0] step:861/10000 train_time:60538ms step_avg:70.31ms +[2025-09-02 16:04:11] [Rank 0] step:881/10000 train_time:61956ms step_avg:70.32ms +[2025-09-02 16:04:11] [Rank 0] step:881/10000 train_time:61956ms step_avg:70.32ms +[2025-09-02 16:04:13] [Rank 0] step:901/10000 train_time:63374ms step_avg:70.34ms +[2025-09-02 16:04:13] [Rank 0] step:901/10000 train_time:63374ms step_avg:70.34ms +[2025-09-02 16:04:14] [Rank 0] step:921/10000 train_time:64792ms step_avg:70.35ms +[2025-09-02 16:04:14] [Rank 0] step:921/10000 train_time:64792ms step_avg:70.35ms +[2025-09-02 16:04:16] [Rank 0] step:941/10000 train_time:66211ms step_avg:70.36ms +[2025-09-02 16:04:16] [Rank 0] step:941/10000 train_time:66211ms step_avg:70.36ms +[2025-09-02 16:04:17] [Rank 0] step:961/10000 train_time:67630ms step_avg:70.37ms +[2025-09-02 16:04:17] [Rank 0] step:961/10000 train_time:67630ms step_avg:70.37ms +[2025-09-02 16:04:18] [Rank 0] step:981/10000 train_time:69050ms step_avg:70.39ms +[2025-09-02 16:04:18] [Rank 0] step:981/10000 train_time:69050ms step_avg:70.39ms +[2025-09-02 16:04:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:04:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:04:32] [Rank 0] PRINT: step:1000/10000 val_loss:5.2361 svd_entropy: attn_qk:H=0.5941,top10E=0.51,eRank=66.3,q75/q25=20.20 attn_vo:H=0.6930,top10E=0.31,eRank=131.1,q75/q25=42.35 mlp_w1:H=0.5391,top10E=0.59,eRank=64.9,q75/q25=3.95 mlp_w2:H=0.7134,top10E=0.34,eRank=117.7,q75/q25=10.03 vo_prod:H=0.5823,top10E=0.47,eRank=49.8,q75/q25=1219.71 train_time:70610ms step_avg:70.61ms +[2025-09-02 16:04:32] [Rank 0] PRINT: step:1000/10000 val_loss:5.2361 svd_entropy: attn_qk:H=0.5941,top10E=0.51,eRank=66.3,q75/q25=20.20 attn_vo:H=0.6930,top10E=0.31,eRank=131.1,q75/q25=42.35 mlp_w1:H=0.5391,top10E=0.59,eRank=64.9,q75/q25=3.95 mlp_w2:H=0.7134,top10E=0.34,eRank=117.7,q75/q25=10.03 vo_prod:H=0.5823,top10E=0.47,eRank=49.8,q75/q25=1219.71 train_time:70610ms step_avg:70.61ms +[2025-09-02 16:04:32] [Rank 0] step:1001/10000 train_time:70621ms step_avg:70.55ms +[2025-09-02 16:04:32] [Rank 0] step:1001/10000 train_time:70621ms step_avg:70.55ms +[2025-09-02 16:04:33] [Rank 0] step:1021/10000 train_time:71913ms step_avg:70.43ms +[2025-09-02 16:04:33] [Rank 0] step:1021/10000 train_time:71913ms step_avg:70.43ms +[2025-09-02 16:04:35] [Rank 0] step:1041/10000 train_time:73330ms step_avg:70.44ms +[2025-09-02 16:04:35] [Rank 0] step:1041/10000 train_time:73330ms step_avg:70.44ms +[2025-09-02 16:04:36] [Rank 0] step:1061/10000 train_time:74750ms step_avg:70.45ms +[2025-09-02 16:04:36] [Rank 0] step:1061/10000 train_time:74750ms step_avg:70.45ms +[2025-09-02 16:04:37] [Rank 0] step:1081/10000 train_time:76168ms step_avg:70.46ms +[2025-09-02 16:04:37] [Rank 0] step:1081/10000 train_time:76168ms step_avg:70.46ms +[2025-09-02 16:04:39] [Rank 0] step:1101/10000 train_time:77587ms step_avg:70.47ms +[2025-09-02 16:04:39] [Rank 0] step:1101/10000 train_time:77587ms step_avg:70.47ms +[2025-09-02 16:04:40] [Rank 0] step:1121/10000 train_time:79006ms step_avg:70.48ms +[2025-09-02 16:04:40] [Rank 0] step:1121/10000 train_time:79006ms step_avg:70.48ms +[2025-09-02 16:04:42] [Rank 0] step:1141/10000 train_time:80425ms step_avg:70.49ms +[2025-09-02 16:04:42] [Rank 0] step:1141/10000 train_time:80425ms step_avg:70.49ms +[2025-09-02 16:04:43] [Rank 0] step:1161/10000 train_time:81844ms step_avg:70.49ms +[2025-09-02 16:04:43] [Rank 0] step:1161/10000 train_time:81844ms step_avg:70.49ms +[2025-09-02 16:04:45] [Rank 0] step:1181/10000 train_time:83265ms step_avg:70.50ms +[2025-09-02 16:04:45] [Rank 0] step:1181/10000 train_time:83265ms step_avg:70.50ms +[2025-09-02 16:04:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:04:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:04:58] [Rank 0] PRINT: step:1200/10000 val_loss:5.0890 svd_entropy: attn_qk:H=0.6117,top10E=0.47,eRank=72.2,q75/q25=23.98 attn_vo:H=0.7107,top10E=0.29,eRank=142.4,q75/q25=55.35 mlp_w1:H=0.5584,top10E=0.56,eRank=70.8,q75/q25=4.21 mlp_w2:H=0.7362,top10E=0.31,eRank=137.0,q75/q25=10.95 vo_prod:H=0.6027,top10E=0.44,eRank=56.9,q75/q25=2435.64 train_time:84827ms step_avg:70.69ms +[2025-09-02 16:04:58] [Rank 0] PRINT: step:1200/10000 val_loss:5.0890 svd_entropy: attn_qk:H=0.6117,top10E=0.47,eRank=72.2,q75/q25=23.98 attn_vo:H=0.7107,top10E=0.29,eRank=142.4,q75/q25=55.35 mlp_w1:H=0.5584,top10E=0.56,eRank=70.8,q75/q25=4.21 mlp_w2:H=0.7362,top10E=0.31,eRank=137.0,q75/q25=10.95 vo_prod:H=0.6027,top10E=0.44,eRank=56.9,q75/q25=2435.64 train_time:84827ms step_avg:70.69ms +[2025-09-02 16:04:58] [Rank 0] step:1201/10000 train_time:84838ms step_avg:70.64ms +[2025-09-02 16:04:58] [Rank 0] step:1201/10000 train_time:84838ms step_avg:70.64ms +[2025-09-02 16:04:59] [Rank 0] step:1221/10000 train_time:86125ms step_avg:70.54ms +[2025-09-02 16:04:59] [Rank 0] step:1221/10000 train_time:86125ms step_avg:70.54ms +[2025-09-02 16:05:01] [Rank 0] step:1241/10000 train_time:87543ms step_avg:70.54ms +[2025-09-02 16:05:01] [Rank 0] step:1241/10000 train_time:87543ms step_avg:70.54ms +[2025-09-02 16:05:02] [Rank 0] step:1261/10000 train_time:88962ms step_avg:70.55ms +[2025-09-02 16:05:02] [Rank 0] step:1261/10000 train_time:88962ms step_avg:70.55ms +[2025-09-02 16:05:04] [Rank 0] step:1281/10000 train_time:90380ms step_avg:70.55ms +[2025-09-02 16:05:04] [Rank 0] step:1281/10000 train_time:90380ms step_avg:70.55ms +[2025-09-02 16:05:05] [Rank 0] step:1301/10000 train_time:91800ms step_avg:70.56ms +[2025-09-02 16:05:05] [Rank 0] step:1301/10000 train_time:91800ms step_avg:70.56ms +[2025-09-02 16:05:06] [Rank 0] step:1321/10000 train_time:93221ms step_avg:70.57ms +[2025-09-02 16:05:06] [Rank 0] step:1321/10000 train_time:93221ms step_avg:70.57ms +[2025-09-02 16:05:08] [Rank 0] step:1341/10000 train_time:94641ms step_avg:70.58ms +[2025-09-02 16:05:08] [Rank 0] step:1341/10000 train_time:94641ms step_avg:70.58ms +[2025-09-02 16:05:09] [Rank 0] step:1361/10000 train_time:96062ms step_avg:70.58ms +[2025-09-02 16:05:09] [Rank 0] step:1361/10000 train_time:96062ms step_avg:70.58ms +[2025-09-02 16:05:11] [Rank 0] step:1381/10000 train_time:97482ms step_avg:70.59ms +[2025-09-02 16:05:11] [Rank 0] step:1381/10000 train_time:97482ms step_avg:70.59ms +[2025-09-02 16:05:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:05:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:05:24] [Rank 0] PRINT: step:1400/10000 val_loss:4.9693 svd_entropy: attn_qk:H=0.6262,top10E=0.45,eRank=77.7,q75/q25=28.77 attn_vo:H=0.7257,top10E=0.27,eRank=153.3,q75/q25=67.57 mlp_w1:H=0.5752,top10E=0.54,eRank=76.4,q75/q25=4.51 mlp_w2:H=0.7539,top10E=0.28,eRank=153.9,q75/q25=11.70 vo_prod:H=0.6196,top10E=0.41,eRank=63.8,q75/q25=3942.72 train_time:99045ms step_avg:70.75ms +[2025-09-02 16:05:24] [Rank 0] PRINT: step:1400/10000 val_loss:4.9693 svd_entropy: attn_qk:H=0.6262,top10E=0.45,eRank=77.7,q75/q25=28.77 attn_vo:H=0.7257,top10E=0.27,eRank=153.3,q75/q25=67.57 mlp_w1:H=0.5752,top10E=0.54,eRank=76.4,q75/q25=4.51 mlp_w2:H=0.7539,top10E=0.28,eRank=153.9,q75/q25=11.70 vo_prod:H=0.6196,top10E=0.41,eRank=63.8,q75/q25=3942.72 train_time:99045ms step_avg:70.75ms +[2025-09-02 16:05:24] [Rank 0] step:1401/10000 train_time:99056ms step_avg:70.70ms +[2025-09-02 16:05:24] [Rank 0] step:1401/10000 train_time:99056ms step_avg:70.70ms +[2025-09-02 16:05:25] [Rank 0] step:1421/10000 train_time:100344ms step_avg:70.61ms +[2025-09-02 16:05:25] [Rank 0] step:1421/10000 train_time:100344ms step_avg:70.61ms +[2025-09-02 16:05:27] [Rank 0] step:1441/10000 train_time:101762ms step_avg:70.62ms +[2025-09-02 16:05:27] [Rank 0] step:1441/10000 train_time:101762ms step_avg:70.62ms +[2025-09-02 16:05:28] [Rank 0] step:1461/10000 train_time:103180ms step_avg:70.62ms +[2025-09-02 16:05:28] [Rank 0] step:1461/10000 train_time:103180ms step_avg:70.62ms +[2025-09-02 16:05:29] [Rank 0] step:1481/10000 train_time:104599ms step_avg:70.63ms +[2025-09-02 16:05:29] [Rank 0] step:1481/10000 train_time:104599ms step_avg:70.63ms +[2025-09-02 16:05:31] [Rank 0] step:1501/10000 train_time:106027ms step_avg:70.64ms +[2025-09-02 16:05:31] [Rank 0] step:1501/10000 train_time:106027ms step_avg:70.64ms +[2025-09-02 16:05:32] [Rank 0] step:1521/10000 train_time:107460ms step_avg:70.65ms +[2025-09-02 16:05:32] [Rank 0] step:1521/10000 train_time:107460ms step_avg:70.65ms +[2025-09-02 16:05:34] [Rank 0] step:1541/10000 train_time:108891ms step_avg:70.66ms +[2025-09-02 16:05:34] [Rank 0] step:1541/10000 train_time:108891ms step_avg:70.66ms +[2025-09-02 16:05:35] [Rank 0] step:1561/10000 train_time:110323ms step_avg:70.67ms +[2025-09-02 16:05:35] [Rank 0] step:1561/10000 train_time:110323ms step_avg:70.67ms +[2025-09-02 16:05:37] [Rank 0] step:1581/10000 train_time:111754ms step_avg:70.69ms +[2025-09-02 16:05:37] [Rank 0] step:1581/10000 train_time:111754ms step_avg:70.69ms +[2025-09-02 16:05:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:05:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:05:50] [Rank 0] PRINT: step:1600/10000 val_loss:4.8428 svd_entropy: attn_qk:H=0.6382,top10E=0.43,eRank=82.4,q75/q25=34.42 attn_vo:H=0.7385,top10E=0.25,eRank=163.7,q75/q25=77.28 mlp_w1:H=0.5899,top10E=0.53,eRank=81.8,q75/q25=4.82 mlp_w2:H=0.7659,top10E=0.26,eRank=166.9,q75/q25=12.76 vo_prod:H=0.6340,top10E=0.38,eRank=70.1,q75/q25=5692.51 train_time:113352ms step_avg:70.85ms +[2025-09-02 16:05:50] [Rank 0] PRINT: step:1600/10000 val_loss:4.8428 svd_entropy: attn_qk:H=0.6382,top10E=0.43,eRank=82.4,q75/q25=34.42 attn_vo:H=0.7385,top10E=0.25,eRank=163.7,q75/q25=77.28 mlp_w1:H=0.5899,top10E=0.53,eRank=81.8,q75/q25=4.82 mlp_w2:H=0.7659,top10E=0.26,eRank=166.9,q75/q25=12.76 vo_prod:H=0.6340,top10E=0.38,eRank=70.1,q75/q25=5692.51 train_time:113352ms step_avg:70.85ms +[2025-09-02 16:05:50] [Rank 0] step:1601/10000 train_time:113362ms step_avg:70.81ms +[2025-09-02 16:05:50] [Rank 0] step:1601/10000 train_time:113362ms step_avg:70.81ms +[2025-09-02 16:05:51] [Rank 0] step:1621/10000 train_time:114660ms step_avg:70.73ms +[2025-09-02 16:05:51] [Rank 0] step:1621/10000 train_time:114660ms step_avg:70.73ms +[2025-09-02 16:05:53] [Rank 0] step:1641/10000 train_time:116092ms step_avg:70.74ms +[2025-09-02 16:05:53] [Rank 0] step:1641/10000 train_time:116092ms step_avg:70.74ms +[2025-09-02 16:05:54] [Rank 0] step:1661/10000 train_time:117519ms step_avg:70.75ms +[2025-09-02 16:05:54] [Rank 0] step:1661/10000 train_time:117519ms step_avg:70.75ms +[2025-09-02 16:05:55] [Rank 0] step:1681/10000 train_time:118949ms step_avg:70.76ms +[2025-09-02 16:05:55] [Rank 0] step:1681/10000 train_time:118949ms step_avg:70.76ms +[2025-09-02 16:05:57] [Rank 0] step:1701/10000 train_time:120379ms step_avg:70.77ms +[2025-09-02 16:05:57] [Rank 0] step:1701/10000 train_time:120379ms step_avg:70.77ms +[2025-09-02 16:05:58] [Rank 0] step:1721/10000 train_time:121808ms step_avg:70.78ms +[2025-09-02 16:05:58] [Rank 0] step:1721/10000 train_time:121808ms step_avg:70.78ms +[2025-09-02 16:06:00] [Rank 0] step:1741/10000 train_time:123238ms step_avg:70.79ms +[2025-09-02 16:06:00] [Rank 0] step:1741/10000 train_time:123238ms step_avg:70.79ms +[2025-09-02 16:06:01] [Rank 0] step:1761/10000 train_time:124668ms step_avg:70.79ms +[2025-09-02 16:06:01] [Rank 0] step:1761/10000 train_time:124668ms step_avg:70.79ms +[2025-09-02 16:06:03] [Rank 0] step:1781/10000 train_time:126099ms step_avg:70.80ms +[2025-09-02 16:06:03] [Rank 0] step:1781/10000 train_time:126099ms step_avg:70.80ms +[2025-09-02 16:06:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:06:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:06:15] [Rank 0] PRINT: step:1800/10000 val_loss:4.7475 svd_entropy: attn_qk:H=0.6487,top10E=0.41,eRank=86.9,q75/q25=40.44 attn_vo:H=0.7492,top10E=0.24,eRank=173.1,q75/q25=84.48 mlp_w1:H=0.6032,top10E=0.51,eRank=86.8,q75/q25=5.16 mlp_w2:H=0.7756,top10E=0.25,eRank=178.3,q75/q25=13.69 vo_prod:H=0.6462,top10E=0.36,eRank=76.1,q75/q25=7393.73 train_time:127673ms step_avg:70.93ms +[2025-09-02 16:06:15] [Rank 0] PRINT: step:1800/10000 val_loss:4.7475 svd_entropy: attn_qk:H=0.6487,top10E=0.41,eRank=86.9,q75/q25=40.44 attn_vo:H=0.7492,top10E=0.24,eRank=173.1,q75/q25=84.48 mlp_w1:H=0.6032,top10E=0.51,eRank=86.8,q75/q25=5.16 mlp_w2:H=0.7756,top10E=0.25,eRank=178.3,q75/q25=13.69 vo_prod:H=0.6462,top10E=0.36,eRank=76.1,q75/q25=7393.73 train_time:127673ms step_avg:70.93ms +[2025-09-02 16:06:15] [Rank 0] step:1801/10000 train_time:127683ms step_avg:70.90ms +[2025-09-02 16:06:15] [Rank 0] step:1801/10000 train_time:127683ms step_avg:70.90ms +[2025-09-02 16:06:17] [Rank 0] step:1821/10000 train_time:128990ms step_avg:70.83ms +[2025-09-02 16:06:17] [Rank 0] step:1821/10000 train_time:128990ms step_avg:70.83ms +[2025-09-02 16:06:18] [Rank 0] step:1841/10000 train_time:130417ms step_avg:70.84ms +[2025-09-02 16:06:18] [Rank 0] step:1841/10000 train_time:130417ms step_avg:70.84ms +[2025-09-02 16:06:20] [Rank 0] step:1861/10000 train_time:131848ms step_avg:70.85ms +[2025-09-02 16:06:20] [Rank 0] step:1861/10000 train_time:131848ms step_avg:70.85ms +[2025-09-02 16:06:21] [Rank 0] step:1881/10000 train_time:133278ms step_avg:70.85ms +[2025-09-02 16:06:21] [Rank 0] step:1881/10000 train_time:133278ms step_avg:70.85ms +[2025-09-02 16:06:23] [Rank 0] step:1901/10000 train_time:134708ms step_avg:70.86ms +[2025-09-02 16:06:23] [Rank 0] step:1901/10000 train_time:134708ms step_avg:70.86ms +[2025-09-02 16:06:24] [Rank 0] step:1921/10000 train_time:136137ms step_avg:70.87ms +[2025-09-02 16:06:24] [Rank 0] step:1921/10000 train_time:136137ms step_avg:70.87ms +[2025-09-02 16:06:25] [Rank 0] step:1941/10000 train_time:137566ms step_avg:70.87ms +[2025-09-02 16:06:25] [Rank 0] step:1941/10000 train_time:137566ms step_avg:70.87ms +[2025-09-02 16:06:27] [Rank 0] step:1961/10000 train_time:138996ms step_avg:70.88ms +[2025-09-02 16:06:27] [Rank 0] step:1961/10000 train_time:138996ms step_avg:70.88ms +[2025-09-02 16:06:28] [Rank 0] step:1981/10000 train_time:140426ms step_avg:70.89ms +[2025-09-02 16:06:28] [Rank 0] step:1981/10000 train_time:140426ms step_avg:70.89ms +[2025-09-02 16:06:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:06:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:06:41] [Rank 0] PRINT: step:2000/10000 val_loss:4.6864 svd_entropy: attn_qk:H=0.6578,top10E=0.39,eRank=91.2,q75/q25=46.88 attn_vo:H=0.7585,top10E=0.23,eRank=181.9,q75/q25=90.03 mlp_w1:H=0.6152,top10E=0.49,eRank=91.6,q75/q25=5.49 mlp_w2:H=0.7841,top10E=0.23,eRank=188.8,q75/q25=14.55 vo_prod:H=0.6569,top10E=0.35,eRank=81.7,q75/q25=8796.50 train_time:141999ms step_avg:71.00ms +[2025-09-02 16:06:41] [Rank 0] PRINT: step:2000/10000 val_loss:4.6864 svd_entropy: attn_qk:H=0.6578,top10E=0.39,eRank=91.2,q75/q25=46.88 attn_vo:H=0.7585,top10E=0.23,eRank=181.9,q75/q25=90.03 mlp_w1:H=0.6152,top10E=0.49,eRank=91.6,q75/q25=5.49 mlp_w2:H=0.7841,top10E=0.23,eRank=188.8,q75/q25=14.55 vo_prod:H=0.6569,top10E=0.35,eRank=81.7,q75/q25=8796.50 train_time:141999ms step_avg:71.00ms +[2025-09-02 16:06:41] [Rank 0] step:2001/10000 train_time:142009ms step_avg:70.97ms +[2025-09-02 16:06:41] [Rank 0] step:2001/10000 train_time:142009ms step_avg:70.97ms +[2025-09-02 16:06:43] [Rank 0] step:2021/10000 train_time:143308ms step_avg:70.91ms +[2025-09-02 16:06:43] [Rank 0] step:2021/10000 train_time:143308ms step_avg:70.91ms +[2025-09-02 16:06:44] [Rank 0] step:2041/10000 train_time:144855ms step_avg:70.97ms +[2025-09-02 16:06:44] [Rank 0] step:2041/10000 train_time:144855ms step_avg:70.97ms +[2025-09-02 16:06:46] [Rank 0] step:2061/10000 train_time:146285ms step_avg:70.98ms +[2025-09-02 16:06:46] [Rank 0] step:2061/10000 train_time:146285ms step_avg:70.98ms +[2025-09-02 16:06:47] [Rank 0] step:2081/10000 train_time:147715ms step_avg:70.98ms +[2025-09-02 16:06:47] [Rank 0] step:2081/10000 train_time:147715ms step_avg:70.98ms +[2025-09-02 16:06:49] [Rank 0] step:2101/10000 train_time:149150ms step_avg:70.99ms +[2025-09-02 16:06:49] [Rank 0] step:2101/10000 train_time:149150ms step_avg:70.99ms +[2025-09-02 16:06:50] [Rank 0] step:2121/10000 train_time:150580ms step_avg:71.00ms +[2025-09-02 16:06:50] [Rank 0] step:2121/10000 train_time:150580ms step_avg:71.00ms +[2025-09-02 16:06:52] [Rank 0] step:2141/10000 train_time:152011ms step_avg:71.00ms +[2025-09-02 16:06:52] [Rank 0] step:2141/10000 train_time:152011ms step_avg:71.00ms +[2025-09-02 16:06:53] [Rank 0] step:2161/10000 train_time:153442ms step_avg:71.00ms +[2025-09-02 16:06:53] [Rank 0] step:2161/10000 train_time:153442ms step_avg:71.00ms +[2025-09-02 16:06:54] [Rank 0] step:2181/10000 train_time:154873ms step_avg:71.01ms +[2025-09-02 16:06:54] [Rank 0] step:2181/10000 train_time:154873ms step_avg:71.01ms +[2025-09-02 16:06:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:06:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:07:07] [Rank 0] PRINT: step:2200/10000 val_loss:4.6143 svd_entropy: attn_qk:H=0.6655,top10E=0.38,eRank=95.0,q75/q25=53.05 attn_vo:H=0.7664,top10E=0.22,eRank=189.7,q75/q25=93.01 mlp_w1:H=0.6258,top10E=0.48,eRank=96.1,q75/q25=5.81 mlp_w2:H=0.7904,top10E=0.22,eRank=197.2,q75/q25=15.22 vo_prod:H=0.6659,top10E=0.33,eRank=86.8,q75/q25=10161.62 train_time:156448ms step_avg:71.11ms +[2025-09-02 16:07:07] [Rank 0] PRINT: step:2200/10000 val_loss:4.6143 svd_entropy: attn_qk:H=0.6655,top10E=0.38,eRank=95.0,q75/q25=53.05 attn_vo:H=0.7664,top10E=0.22,eRank=189.7,q75/q25=93.01 mlp_w1:H=0.6258,top10E=0.48,eRank=96.1,q75/q25=5.81 mlp_w2:H=0.7904,top10E=0.22,eRank=197.2,q75/q25=15.22 vo_prod:H=0.6659,top10E=0.33,eRank=86.8,q75/q25=10161.62 train_time:156448ms step_avg:71.11ms +[2025-09-02 16:07:08] [Rank 0] step:2201/10000 train_time:156458ms step_avg:71.09ms +[2025-09-02 16:07:08] [Rank 0] step:2201/10000 train_time:156458ms step_avg:71.09ms +[2025-09-02 16:07:09] [Rank 0] step:2221/10000 train_time:157772ms step_avg:71.04ms +[2025-09-02 16:07:09] [Rank 0] step:2221/10000 train_time:157772ms step_avg:71.04ms +[2025-09-02 16:07:10] [Rank 0] step:2241/10000 train_time:159234ms step_avg:71.05ms +[2025-09-02 16:07:10] [Rank 0] step:2241/10000 train_time:159234ms step_avg:71.05ms +[2025-09-02 16:07:12] [Rank 0] step:2261/10000 train_time:160707ms step_avg:71.08ms +[2025-09-02 16:07:12] [Rank 0] step:2261/10000 train_time:160707ms step_avg:71.08ms +[2025-09-02 16:07:13] [Rank 0] step:2281/10000 train_time:162181ms step_avg:71.10ms +[2025-09-02 16:07:13] [Rank 0] step:2281/10000 train_time:162181ms step_avg:71.10ms +[2025-09-02 16:07:15] [Rank 0] step:2301/10000 train_time:163656ms step_avg:71.12ms +[2025-09-02 16:07:15] [Rank 0] step:2301/10000 train_time:163656ms step_avg:71.12ms +[2025-09-02 16:07:16] [Rank 0] step:2321/10000 train_time:165130ms step_avg:71.15ms +[2025-09-02 16:07:16] [Rank 0] step:2321/10000 train_time:165130ms step_avg:71.15ms +[2025-09-02 16:07:18] [Rank 0] step:2341/10000 train_time:166605ms step_avg:71.17ms +[2025-09-02 16:07:18] [Rank 0] step:2341/10000 train_time:166605ms step_avg:71.17ms +[2025-09-02 16:07:19] [Rank 0] step:2361/10000 train_time:168080ms step_avg:71.19ms +[2025-09-02 16:07:19] [Rank 0] step:2361/10000 train_time:168080ms step_avg:71.19ms +[2025-09-02 16:07:21] [Rank 0] step:2381/10000 train_time:169556ms step_avg:71.21ms +[2025-09-02 16:07:21] [Rank 0] step:2381/10000 train_time:169556ms step_avg:71.21ms +[2025-09-02 16:07:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:07:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:07:34] [Rank 0] PRINT: step:2400/10000 val_loss:4.5409 svd_entropy: attn_qk:H=0.6720,top10E=0.37,eRank=98.3,q75/q25=58.80 attn_vo:H=0.7734,top10E=0.21,eRank=197.3,q75/q25=94.48 mlp_w1:H=0.6352,top10E=0.47,eRank=100.4,q75/q25=6.13 mlp_w2:H=0.7964,top10E=0.21,eRank=205.5,q75/q25=15.92 vo_prod:H=0.6747,top10E=0.32,eRank=92.1,q75/q25=10797.48 train_time:171180ms step_avg:71.33ms +[2025-09-02 16:07:34] [Rank 0] PRINT: step:2400/10000 val_loss:4.5409 svd_entropy: attn_qk:H=0.6720,top10E=0.37,eRank=98.3,q75/q25=58.80 attn_vo:H=0.7734,top10E=0.21,eRank=197.3,q75/q25=94.48 mlp_w1:H=0.6352,top10E=0.47,eRank=100.4,q75/q25=6.13 mlp_w2:H=0.7964,top10E=0.21,eRank=205.5,q75/q25=15.92 vo_prod:H=0.6747,top10E=0.32,eRank=92.1,q75/q25=10797.48 train_time:171180ms step_avg:71.33ms +[2025-09-02 16:07:34] [Rank 0] step:2401/10000 train_time:171191ms step_avg:71.30ms +[2025-09-02 16:07:34] [Rank 0] step:2401/10000 train_time:171191ms step_avg:71.30ms +[2025-09-02 16:07:35] [Rank 0] step:2421/10000 train_time:172544ms step_avg:71.27ms +[2025-09-02 16:07:35] [Rank 0] step:2421/10000 train_time:172544ms step_avg:71.27ms +[2025-09-02 16:07:37] [Rank 0] step:2441/10000 train_time:174016ms step_avg:71.29ms +[2025-09-02 16:07:37] [Rank 0] step:2441/10000 train_time:174016ms step_avg:71.29ms +[2025-09-02 16:07:38] [Rank 0] step:2461/10000 train_time:175489ms step_avg:71.31ms +[2025-09-02 16:07:38] [Rank 0] step:2461/10000 train_time:175489ms step_avg:71.31ms +[2025-09-02 16:07:40] [Rank 0] step:2481/10000 train_time:176962ms step_avg:71.33ms +[2025-09-02 16:07:40] [Rank 0] step:2481/10000 train_time:176962ms step_avg:71.33ms +[2025-09-02 16:07:41] [Rank 0] step:2501/10000 train_time:178436ms step_avg:71.35ms +[2025-09-02 16:07:41] [Rank 0] step:2501/10000 train_time:178436ms step_avg:71.35ms +[2025-09-02 16:07:43] [Rank 0] step:2521/10000 train_time:179911ms step_avg:71.36ms +[2025-09-02 16:07:43] [Rank 0] step:2521/10000 train_time:179911ms step_avg:71.36ms +[2025-09-02 16:07:44] [Rank 0] step:2541/10000 train_time:181386ms step_avg:71.38ms +[2025-09-02 16:07:44] [Rank 0] step:2541/10000 train_time:181386ms step_avg:71.38ms +[2025-09-02 16:07:46] [Rank 0] step:2561/10000 train_time:182861ms step_avg:71.40ms +[2025-09-02 16:07:46] [Rank 0] step:2561/10000 train_time:182861ms step_avg:71.40ms +[2025-09-02 16:07:47] [Rank 0] step:2581/10000 train_time:184335ms step_avg:71.42ms +[2025-09-02 16:07:47] [Rank 0] step:2581/10000 train_time:184335ms step_avg:71.42ms +[2025-09-02 16:07:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:07:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:08:00] [Rank 0] PRINT: step:2600/10000 val_loss:4.4861 svd_entropy: attn_qk:H=0.6783,top10E=0.36,eRank=101.7,q75/q25=64.85 attn_vo:H=0.7797,top10E=0.20,eRank=204.2,q75/q25=95.54 mlp_w1:H=0.6436,top10E=0.46,eRank=104.4,q75/q25=6.48 mlp_w2:H=0.8010,top10E=0.20,eRank=212.3,q75/q25=16.51 vo_prod:H=0.6822,top10E=0.31,eRank=96.8,q75/q25=11224.37 train_time:185958ms step_avg:71.52ms +[2025-09-02 16:08:00] [Rank 0] PRINT: step:2600/10000 val_loss:4.4861 svd_entropy: attn_qk:H=0.6783,top10E=0.36,eRank=101.7,q75/q25=64.85 attn_vo:H=0.7797,top10E=0.20,eRank=204.2,q75/q25=95.54 mlp_w1:H=0.6436,top10E=0.46,eRank=104.4,q75/q25=6.48 mlp_w2:H=0.8010,top10E=0.20,eRank=212.3,q75/q25=16.51 vo_prod:H=0.6822,top10E=0.31,eRank=96.8,q75/q25=11224.37 train_time:185958ms step_avg:71.52ms +[2025-09-02 16:08:00] [Rank 0] step:2601/10000 train_time:185969ms step_avg:71.50ms +[2025-09-02 16:08:00] [Rank 0] step:2601/10000 train_time:185969ms step_avg:71.50ms +[2025-09-02 16:08:02] [Rank 0] step:2621/10000 train_time:187314ms step_avg:71.47ms +[2025-09-02 16:08:02] [Rank 0] step:2621/10000 train_time:187314ms step_avg:71.47ms +[2025-09-02 16:08:03] [Rank 0] step:2641/10000 train_time:188787ms step_avg:71.48ms +[2025-09-02 16:08:03] [Rank 0] step:2641/10000 train_time:188787ms step_avg:71.48ms +[2025-09-02 16:08:05] [Rank 0] step:2661/10000 train_time:190260ms step_avg:71.50ms +[2025-09-02 16:08:05] [Rank 0] step:2661/10000 train_time:190260ms step_avg:71.50ms +[2025-09-02 16:08:06] [Rank 0] step:2681/10000 train_time:191733ms step_avg:71.52ms +[2025-09-02 16:08:06] [Rank 0] step:2681/10000 train_time:191733ms step_avg:71.52ms +[2025-09-02 16:08:08] [Rank 0] step:2701/10000 train_time:193206ms step_avg:71.53ms +[2025-09-02 16:08:08] [Rank 0] step:2701/10000 train_time:193206ms step_avg:71.53ms +[2025-09-02 16:08:09] [Rank 0] step:2721/10000 train_time:194678ms step_avg:71.55ms +[2025-09-02 16:08:09] [Rank 0] step:2721/10000 train_time:194678ms step_avg:71.55ms +[2025-09-02 16:08:11] [Rank 0] step:2741/10000 train_time:196153ms step_avg:71.56ms +[2025-09-02 16:08:11] [Rank 0] step:2741/10000 train_time:196153ms step_avg:71.56ms +[2025-09-02 16:08:12] [Rank 0] step:2761/10000 train_time:197628ms step_avg:71.58ms +[2025-09-02 16:08:12] [Rank 0] step:2761/10000 train_time:197628ms step_avg:71.58ms +[2025-09-02 16:08:14] [Rank 0] step:2781/10000 train_time:199102ms step_avg:71.59ms +[2025-09-02 16:08:14] [Rank 0] step:2781/10000 train_time:199102ms step_avg:71.59ms +[2025-09-02 16:08:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:08:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:08:27] [Rank 0] PRINT: step:2800/10000 val_loss:4.4485 svd_entropy: attn_qk:H=0.6841,top10E=0.35,eRank=105.0,q75/q25=70.96 attn_vo:H=0.7855,top10E=0.19,eRank=210.9,q75/q25=95.41 mlp_w1:H=0.6513,top10E=0.45,eRank=108.3,q75/q25=6.76 mlp_w2:H=0.8048,top10E=0.20,eRank=218.3,q75/q25=17.14 vo_prod:H=0.6891,top10E=0.30,eRank=101.4,q75/q25=11326.31 train_time:200726ms step_avg:71.69ms +[2025-09-02 16:08:27] [Rank 0] PRINT: step:2800/10000 val_loss:4.4485 svd_entropy: attn_qk:H=0.6841,top10E=0.35,eRank=105.0,q75/q25=70.96 attn_vo:H=0.7855,top10E=0.19,eRank=210.9,q75/q25=95.41 mlp_w1:H=0.6513,top10E=0.45,eRank=108.3,q75/q25=6.76 mlp_w2:H=0.8048,top10E=0.20,eRank=218.3,q75/q25=17.14 vo_prod:H=0.6891,top10E=0.30,eRank=101.4,q75/q25=11326.31 train_time:200726ms step_avg:71.69ms +[2025-09-02 16:08:27] [Rank 0] step:2801/10000 train_time:200736ms step_avg:71.67ms +[2025-09-02 16:08:27] [Rank 0] step:2801/10000 train_time:200736ms step_avg:71.67ms +[2025-09-02 16:08:28] [Rank 0] step:2821/10000 train_time:202100ms step_avg:71.64ms +[2025-09-02 16:08:28] [Rank 0] step:2821/10000 train_time:202100ms step_avg:71.64ms +[2025-09-02 16:08:30] [Rank 0] step:2841/10000 train_time:203572ms step_avg:71.65ms +[2025-09-02 16:08:30] [Rank 0] step:2841/10000 train_time:203572ms step_avg:71.65ms +[2025-09-02 16:08:31] [Rank 0] step:2861/10000 train_time:205046ms step_avg:71.67ms +[2025-09-02 16:08:31] [Rank 0] step:2861/10000 train_time:205046ms step_avg:71.67ms +[2025-09-02 16:08:33] [Rank 0] step:2881/10000 train_time:206518ms step_avg:71.68ms +[2025-09-02 16:08:33] [Rank 0] step:2881/10000 train_time:206518ms step_avg:71.68ms +[2025-09-02 16:08:34] [Rank 0] step:2901/10000 train_time:207992ms step_avg:71.70ms +[2025-09-02 16:08:34] [Rank 0] step:2901/10000 train_time:207992ms step_avg:71.70ms +[2025-09-02 16:08:36] [Rank 0] step:2921/10000 train_time:209466ms step_avg:71.71ms +[2025-09-02 16:08:36] [Rank 0] step:2921/10000 train_time:209466ms step_avg:71.71ms +[2025-09-02 16:08:37] [Rank 0] step:2941/10000 train_time:210941ms step_avg:71.72ms +[2025-09-02 16:08:37] [Rank 0] step:2941/10000 train_time:210941ms step_avg:71.72ms +[2025-09-02 16:08:39] [Rank 0] step:2961/10000 train_time:212416ms step_avg:71.74ms +[2025-09-02 16:08:39] [Rank 0] step:2961/10000 train_time:212416ms step_avg:71.74ms +[2025-09-02 16:08:40] [Rank 0] step:2981/10000 train_time:213897ms step_avg:71.75ms +[2025-09-02 16:08:40] [Rank 0] step:2981/10000 train_time:213897ms step_avg:71.75ms +[2025-09-02 16:08:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:08:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:08:54] [Rank 0] PRINT: step:3000/10000 val_loss:4.4086 svd_entropy: attn_qk:H=0.6894,top10E=0.34,eRank=108.0,q75/q25=75.89 attn_vo:H=0.7907,top10E=0.19,eRank=217.1,q75/q25=95.43 mlp_w1:H=0.6581,top10E=0.44,eRank=111.8,q75/q25=7.06 mlp_w2:H=0.8081,top10E=0.19,eRank=223.7,q75/q25=17.47 vo_prod:H=0.6953,top10E=0.29,eRank=105.7,q75/q25=11387.68 train_time:215529ms step_avg:71.84ms +[2025-09-02 16:08:54] [Rank 0] PRINT: step:3000/10000 val_loss:4.4086 svd_entropy: attn_qk:H=0.6894,top10E=0.34,eRank=108.0,q75/q25=75.89 attn_vo:H=0.7907,top10E=0.19,eRank=217.1,q75/q25=95.43 mlp_w1:H=0.6581,top10E=0.44,eRank=111.8,q75/q25=7.06 mlp_w2:H=0.8081,top10E=0.19,eRank=223.7,q75/q25=17.47 vo_prod:H=0.6953,top10E=0.29,eRank=105.7,q75/q25=11387.68 train_time:215529ms step_avg:71.84ms +[2025-09-02 16:08:54] [Rank 0] step:3001/10000 train_time:215540ms step_avg:71.82ms +[2025-09-02 16:08:54] [Rank 0] step:3001/10000 train_time:215540ms step_avg:71.82ms +[2025-09-02 16:08:55] [Rank 0] step:3021/10000 train_time:216902ms step_avg:71.80ms +[2025-09-02 16:08:55] [Rank 0] step:3021/10000 train_time:216902ms step_avg:71.80ms +[2025-09-02 16:08:57] [Rank 0] step:3041/10000 train_time:218382ms step_avg:71.81ms +[2025-09-02 16:08:57] [Rank 0] step:3041/10000 train_time:218382ms step_avg:71.81ms +[2025-09-02 16:08:58] [Rank 0] step:3061/10000 train_time:219864ms step_avg:71.83ms +[2025-09-02 16:08:58] [Rank 0] step:3061/10000 train_time:219864ms step_avg:71.83ms +[2025-09-02 16:09:00] [Rank 0] step:3081/10000 train_time:221346ms step_avg:71.84ms +[2025-09-02 16:09:00] [Rank 0] step:3081/10000 train_time:221346ms step_avg:71.84ms +[2025-09-02 16:09:01] [Rank 0] step:3101/10000 train_time:222830ms step_avg:71.86ms +[2025-09-02 16:09:01] [Rank 0] step:3101/10000 train_time:222830ms step_avg:71.86ms +[2025-09-02 16:09:03] [Rank 0] step:3121/10000 train_time:224313ms step_avg:71.87ms +[2025-09-02 16:09:03] [Rank 0] step:3121/10000 train_time:224313ms step_avg:71.87ms +[2025-09-02 16:09:04] [Rank 0] step:3141/10000 train_time:225797ms step_avg:71.89ms +[2025-09-02 16:09:04] [Rank 0] step:3141/10000 train_time:225797ms step_avg:71.89ms +[2025-09-02 16:09:06] [Rank 0] step:3161/10000 train_time:227281ms step_avg:71.90ms +[2025-09-02 16:09:06] [Rank 0] step:3161/10000 train_time:227281ms step_avg:71.90ms +[2025-09-02 16:09:07] [Rank 0] step:3181/10000 train_time:228766ms step_avg:71.92ms +[2025-09-02 16:09:07] [Rank 0] step:3181/10000 train_time:228766ms step_avg:71.92ms +[2025-09-02 16:09:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:09:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:09:20] [Rank 0] PRINT: step:3200/10000 val_loss:4.3743 svd_entropy: attn_qk:H=0.6941,top10E=0.34,eRank=110.9,q75/q25=80.19 attn_vo:H=0.7954,top10E=0.18,eRank=222.9,q75/q25=93.59 mlp_w1:H=0.6644,top10E=0.43,eRank=115.3,q75/q25=7.32 mlp_w2:H=0.8112,top10E=0.18,eRank=228.7,q75/q25=17.91 vo_prod:H=0.7010,top10E=0.28,eRank=109.8,q75/q25=11151.50 train_time:230399ms step_avg:72.00ms +[2025-09-02 16:09:20] [Rank 0] PRINT: step:3200/10000 val_loss:4.3743 svd_entropy: attn_qk:H=0.6941,top10E=0.34,eRank=110.9,q75/q25=80.19 attn_vo:H=0.7954,top10E=0.18,eRank=222.9,q75/q25=93.59 mlp_w1:H=0.6644,top10E=0.43,eRank=115.3,q75/q25=7.32 mlp_w2:H=0.8112,top10E=0.18,eRank=228.7,q75/q25=17.91 vo_prod:H=0.7010,top10E=0.28,eRank=109.8,q75/q25=11151.50 train_time:230399ms step_avg:72.00ms +[2025-09-02 16:09:20] [Rank 0] step:3201/10000 train_time:230409ms step_avg:71.98ms +[2025-09-02 16:09:20] [Rank 0] step:3201/10000 train_time:230409ms step_avg:71.98ms +[2025-09-02 16:09:22] [Rank 0] step:3221/10000 train_time:231746ms step_avg:71.95ms +[2025-09-02 16:09:22] [Rank 0] step:3221/10000 train_time:231746ms step_avg:71.95ms +[2025-09-02 16:09:23] [Rank 0] step:3241/10000 train_time:233227ms step_avg:71.96ms +[2025-09-02 16:09:23] [Rank 0] step:3241/10000 train_time:233227ms step_avg:71.96ms +[2025-09-02 16:09:25] [Rank 0] step:3261/10000 train_time:234708ms step_avg:71.97ms +[2025-09-02 16:09:25] [Rank 0] step:3261/10000 train_time:234708ms step_avg:71.97ms +[2025-09-02 16:09:26] [Rank 0] step:3281/10000 train_time:236190ms step_avg:71.99ms +[2025-09-02 16:09:26] [Rank 0] step:3281/10000 train_time:236190ms step_avg:71.99ms +[2025-09-02 16:09:28] [Rank 0] step:3301/10000 train_time:237672ms step_avg:72.00ms +[2025-09-02 16:09:28] [Rank 0] step:3301/10000 train_time:237672ms step_avg:72.00ms +[2025-09-02 16:09:29] [Rank 0] step:3321/10000 train_time:239154ms step_avg:72.01ms +[2025-09-02 16:09:29] [Rank 0] step:3321/10000 train_time:239154ms step_avg:72.01ms +[2025-09-02 16:09:31] [Rank 0] step:3341/10000 train_time:240635ms step_avg:72.02ms +[2025-09-02 16:09:31] [Rank 0] step:3341/10000 train_time:240635ms step_avg:72.02ms +[2025-09-02 16:09:32] [Rank 0] step:3361/10000 train_time:242117ms step_avg:72.04ms +[2025-09-02 16:09:32] [Rank 0] step:3361/10000 train_time:242117ms step_avg:72.04ms +[2025-09-02 16:09:34] [Rank 0] step:3381/10000 train_time:243598ms step_avg:72.05ms +[2025-09-02 16:09:34] [Rank 0] step:3381/10000 train_time:243598ms step_avg:72.05ms +[2025-09-02 16:09:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:09:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:09:47] [Rank 0] PRINT: step:3400/10000 val_loss:4.3334 svd_entropy: attn_qk:H=0.6989,top10E=0.33,eRank=113.9,q75/q25=85.23 attn_vo:H=0.7998,top10E=0.18,eRank=228.5,q75/q25=91.83 mlp_w1:H=0.6706,top10E=0.42,eRank=118.9,q75/q25=7.61 mlp_w2:H=0.8140,top10E=0.18,eRank=233.5,q75/q25=18.30 vo_prod:H=0.7065,top10E=0.28,eRank=113.9,q75/q25=10746.91 train_time:245242ms step_avg:72.13ms +[2025-09-02 16:09:47] [Rank 0] PRINT: step:3400/10000 val_loss:4.3334 svd_entropy: attn_qk:H=0.6989,top10E=0.33,eRank=113.9,q75/q25=85.23 attn_vo:H=0.7998,top10E=0.18,eRank=228.5,q75/q25=91.83 mlp_w1:H=0.6706,top10E=0.42,eRank=118.9,q75/q25=7.61 mlp_w2:H=0.8140,top10E=0.18,eRank=233.5,q75/q25=18.30 vo_prod:H=0.7065,top10E=0.28,eRank=113.9,q75/q25=10746.91 train_time:245242ms step_avg:72.13ms +[2025-09-02 16:09:47] [Rank 0] step:3401/10000 train_time:245253ms step_avg:72.11ms +[2025-09-02 16:09:47] [Rank 0] step:3401/10000 train_time:245253ms step_avg:72.11ms +[2025-09-02 16:09:48] [Rank 0] step:3421/10000 train_time:246607ms step_avg:72.09ms +[2025-09-02 16:09:48] [Rank 0] step:3421/10000 train_time:246607ms step_avg:72.09ms +[2025-09-02 16:09:50] [Rank 0] step:3441/10000 train_time:248084ms step_avg:72.10ms +[2025-09-02 16:09:50] [Rank 0] step:3441/10000 train_time:248084ms step_avg:72.10ms +[2025-09-02 16:09:51] [Rank 0] step:3461/10000 train_time:249565ms step_avg:72.11ms +[2025-09-02 16:09:51] [Rank 0] step:3461/10000 train_time:249565ms step_avg:72.11ms +[2025-09-02 16:09:53] [Rank 0] step:3481/10000 train_time:251045ms step_avg:72.12ms +[2025-09-02 16:09:53] [Rank 0] step:3481/10000 train_time:251045ms step_avg:72.12ms +[2025-09-02 16:09:54] [Rank 0] step:3501/10000 train_time:252527ms step_avg:72.13ms +[2025-09-02 16:09:54] [Rank 0] step:3501/10000 train_time:252527ms step_avg:72.13ms +[2025-09-02 16:09:56] [Rank 0] step:3521/10000 train_time:254011ms step_avg:72.14ms +[2025-09-02 16:09:56] [Rank 0] step:3521/10000 train_time:254011ms step_avg:72.14ms +[2025-09-02 16:09:57] [Rank 0] step:3541/10000 train_time:255493ms step_avg:72.15ms +[2025-09-02 16:09:57] [Rank 0] step:3541/10000 train_time:255493ms step_avg:72.15ms +[2025-09-02 16:09:59] [Rank 0] step:3561/10000 train_time:256975ms step_avg:72.16ms +[2025-09-02 16:09:59] [Rank 0] step:3561/10000 train_time:256975ms step_avg:72.16ms +[2025-09-02 16:10:00] [Rank 0] step:3581/10000 train_time:258457ms step_avg:72.17ms +[2025-09-02 16:10:00] [Rank 0] step:3581/10000 train_time:258457ms step_avg:72.17ms +[2025-09-02 16:10:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:10:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:10:13] [Rank 0] PRINT: step:3600/10000 val_loss:4.3300 svd_entropy: attn_qk:H=0.7029,top10E=0.32,eRank=116.5,q75/q25=88.48 attn_vo:H=0.8037,top10E=0.17,eRank=233.6,q75/q25=90.11 mlp_w1:H=0.6763,top10E=0.41,eRank=122.2,q75/q25=7.85 mlp_w2:H=0.8162,top10E=0.18,eRank=237.5,q75/q25=18.64 vo_prod:H=0.7111,top10E=0.27,eRank=117.4,q75/q25=10370.98 train_time:260090ms step_avg:72.25ms +[2025-09-02 16:10:13] [Rank 0] PRINT: step:3600/10000 val_loss:4.3300 svd_entropy: attn_qk:H=0.7029,top10E=0.32,eRank=116.5,q75/q25=88.48 attn_vo:H=0.8037,top10E=0.17,eRank=233.6,q75/q25=90.11 mlp_w1:H=0.6763,top10E=0.41,eRank=122.2,q75/q25=7.85 mlp_w2:H=0.8162,top10E=0.18,eRank=237.5,q75/q25=18.64 vo_prod:H=0.7111,top10E=0.27,eRank=117.4,q75/q25=10370.98 train_time:260090ms step_avg:72.25ms +[2025-09-02 16:10:13] [Rank 0] step:3601/10000 train_time:260101ms step_avg:72.23ms +[2025-09-02 16:10:13] [Rank 0] step:3601/10000 train_time:260101ms step_avg:72.23ms +[2025-09-02 16:10:15] [Rank 0] step:3621/10000 train_time:261442ms step_avg:72.20ms +[2025-09-02 16:10:15] [Rank 0] step:3621/10000 train_time:261442ms step_avg:72.20ms +[2025-09-02 16:10:16] [Rank 0] step:3641/10000 train_time:262922ms step_avg:72.21ms +[2025-09-02 16:10:16] [Rank 0] step:3641/10000 train_time:262922ms step_avg:72.21ms +[2025-09-02 16:10:18] [Rank 0] step:3661/10000 train_time:264401ms step_avg:72.22ms +[2025-09-02 16:10:18] [Rank 0] step:3661/10000 train_time:264401ms step_avg:72.22ms +[2025-09-02 16:10:19] [Rank 0] step:3681/10000 train_time:265882ms step_avg:72.23ms +[2025-09-02 16:10:19] [Rank 0] step:3681/10000 train_time:265882ms step_avg:72.23ms +[2025-09-02 16:10:21] [Rank 0] step:3701/10000 train_time:267365ms step_avg:72.24ms +[2025-09-02 16:10:21] [Rank 0] step:3701/10000 train_time:267365ms step_avg:72.24ms +[2025-09-02 16:10:22] [Rank 0] step:3721/10000 train_time:268875ms step_avg:72.26ms +[2025-09-02 16:10:22] [Rank 0] step:3721/10000 train_time:268875ms step_avg:72.26ms +[2025-09-02 16:10:24] [Rank 0] step:3741/10000 train_time:270392ms step_avg:72.28ms +[2025-09-02 16:10:24] [Rank 0] step:3741/10000 train_time:270392ms step_avg:72.28ms +[2025-09-02 16:10:25] [Rank 0] step:3761/10000 train_time:271912ms step_avg:72.30ms +[2025-09-02 16:10:25] [Rank 0] step:3761/10000 train_time:271912ms step_avg:72.30ms +[2025-09-02 16:10:27] [Rank 0] step:3781/10000 train_time:273431ms step_avg:72.32ms +[2025-09-02 16:10:27] [Rank 0] step:3781/10000 train_time:273431ms step_avg:72.32ms +[2025-09-02 16:10:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:10:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:10:40] [Rank 0] PRINT: step:3800/10000 val_loss:4.2691 svd_entropy: attn_qk:H=0.7066,top10E=0.32,eRank=119.0,q75/q25=92.11 attn_vo:H=0.8073,top10E=0.17,eRank=238.5,q75/q25=88.30 mlp_w1:H=0.6815,top10E=0.40,eRank=125.4,q75/q25=8.13 mlp_w2:H=0.8183,top10E=0.17,eRank=241.2,q75/q25=18.96 vo_prod:H=0.7155,top10E=0.27,eRank=120.9,q75/q25=9824.99 train_time:275103ms step_avg:72.40ms +[2025-09-02 16:10:40] [Rank 0] PRINT: step:3800/10000 val_loss:4.2691 svd_entropy: attn_qk:H=0.7066,top10E=0.32,eRank=119.0,q75/q25=92.11 attn_vo:H=0.8073,top10E=0.17,eRank=238.5,q75/q25=88.30 mlp_w1:H=0.6815,top10E=0.40,eRank=125.4,q75/q25=8.13 mlp_w2:H=0.8183,top10E=0.17,eRank=241.2,q75/q25=18.96 vo_prod:H=0.7155,top10E=0.27,eRank=120.9,q75/q25=9824.99 train_time:275103ms step_avg:72.40ms +[2025-09-02 16:10:40] [Rank 0] step:3801/10000 train_time:275114ms step_avg:72.38ms +[2025-09-02 16:10:40] [Rank 0] step:3801/10000 train_time:275114ms step_avg:72.38ms +[2025-09-02 16:10:42] [Rank 0] step:3821/10000 train_time:276490ms step_avg:72.36ms +[2025-09-02 16:10:42] [Rank 0] step:3821/10000 train_time:276490ms step_avg:72.36ms +[2025-09-02 16:10:43] [Rank 0] step:3841/10000 train_time:278012ms step_avg:72.38ms +[2025-09-02 16:10:43] [Rank 0] step:3841/10000 train_time:278012ms step_avg:72.38ms +[2025-09-02 16:10:45] [Rank 0] step:3861/10000 train_time:279533ms step_avg:72.40ms +[2025-09-02 16:10:45] [Rank 0] step:3861/10000 train_time:279533ms step_avg:72.40ms +[2025-09-02 16:10:46] [Rank 0] step:3881/10000 train_time:281052ms step_avg:72.42ms +[2025-09-02 16:10:46] [Rank 0] step:3881/10000 train_time:281052ms step_avg:72.42ms +[2025-09-02 16:10:48] [Rank 0] step:3901/10000 train_time:282571ms step_avg:72.44ms +[2025-09-02 16:10:48] [Rank 0] step:3901/10000 train_time:282571ms step_avg:72.44ms +[2025-09-02 16:10:49] [Rank 0] step:3921/10000 train_time:284092ms step_avg:72.45ms +[2025-09-02 16:10:49] [Rank 0] step:3921/10000 train_time:284092ms step_avg:72.45ms +[2025-09-02 16:10:51] [Rank 0] step:3941/10000 train_time:285615ms step_avg:72.47ms +[2025-09-02 16:10:51] [Rank 0] step:3941/10000 train_time:285615ms step_avg:72.47ms +[2025-09-02 16:10:53] [Rank 0] step:3961/10000 train_time:287133ms step_avg:72.49ms +[2025-09-02 16:10:53] [Rank 0] step:3961/10000 train_time:287133ms step_avg:72.49ms +[2025-09-02 16:10:54] [Rank 0] step:3981/10000 train_time:288659ms step_avg:72.51ms +[2025-09-02 16:10:54] [Rank 0] step:3981/10000 train_time:288659ms step_avg:72.51ms +[2025-09-02 16:10:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:10:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:11:07] [Rank 0] PRINT: step:4000/10000 val_loss:4.2450 svd_entropy: attn_qk:H=0.7102,top10E=0.31,eRank=121.5,q75/q25=94.75 attn_vo:H=0.8107,top10E=0.17,eRank=243.2,q75/q25=85.26 mlp_w1:H=0.6866,top10E=0.40,eRank=128.7,q75/q25=8.38 mlp_w2:H=0.8202,top10E=0.17,eRank=244.8,q75/q25=19.42 vo_prod:H=0.7195,top10E=0.26,eRank=124.3,q75/q25=9018.38 train_time:290331ms step_avg:72.58ms +[2025-09-02 16:11:07] [Rank 0] PRINT: step:4000/10000 val_loss:4.2450 svd_entropy: attn_qk:H=0.7102,top10E=0.31,eRank=121.5,q75/q25=94.75 attn_vo:H=0.8107,top10E=0.17,eRank=243.2,q75/q25=85.26 mlp_w1:H=0.6866,top10E=0.40,eRank=128.7,q75/q25=8.38 mlp_w2:H=0.8202,top10E=0.17,eRank=244.8,q75/q25=19.42 vo_prod:H=0.7195,top10E=0.26,eRank=124.3,q75/q25=9018.38 train_time:290331ms step_avg:72.58ms +[2025-09-02 16:11:07] [Rank 0] step:4001/10000 train_time:290342ms step_avg:72.57ms +[2025-09-02 16:11:07] [Rank 0] step:4001/10000 train_time:290342ms step_avg:72.57ms +[2025-09-02 16:11:09] [Rank 0] step:4021/10000 train_time:291728ms step_avg:72.55ms +[2025-09-02 16:11:09] [Rank 0] step:4021/10000 train_time:291728ms step_avg:72.55ms +[2025-09-02 16:11:10] [Rank 0] step:4041/10000 train_time:293245ms step_avg:72.57ms +[2025-09-02 16:11:10] [Rank 0] step:4041/10000 train_time:293245ms step_avg:72.57ms +[2025-09-02 16:11:12] [Rank 0] step:4061/10000 train_time:294762ms step_avg:72.58ms +[2025-09-02 16:11:12] [Rank 0] step:4061/10000 train_time:294762ms step_avg:72.58ms +[2025-09-02 16:11:14] [Rank 0] step:4081/10000 train_time:296390ms step_avg:72.63ms +[2025-09-02 16:11:14] [Rank 0] step:4081/10000 train_time:296390ms step_avg:72.63ms +[2025-09-02 16:11:15] [Rank 0] step:4101/10000 train_time:297906ms step_avg:72.64ms +[2025-09-02 16:11:15] [Rank 0] step:4101/10000 train_time:297906ms step_avg:72.64ms +[2025-09-02 16:11:17] [Rank 0] step:4121/10000 train_time:299428ms step_avg:72.66ms +[2025-09-02 16:11:17] [Rank 0] step:4121/10000 train_time:299428ms step_avg:72.66ms +[2025-09-02 16:11:18] [Rank 0] step:4141/10000 train_time:300948ms step_avg:72.68ms +[2025-09-02 16:11:18] [Rank 0] step:4141/10000 train_time:300948ms step_avg:72.68ms +[2025-09-02 16:11:20] [Rank 0] step:4161/10000 train_time:302466ms step_avg:72.69ms +[2025-09-02 16:11:20] [Rank 0] step:4161/10000 train_time:302466ms step_avg:72.69ms +[2025-09-02 16:11:21] [Rank 0] step:4181/10000 train_time:303987ms step_avg:72.71ms +[2025-09-02 16:11:21] [Rank 0] step:4181/10000 train_time:303987ms step_avg:72.71ms +[2025-09-02 16:11:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:11:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:11:35] [Rank 0] PRINT: step:4200/10000 val_loss:4.2271 svd_entropy: attn_qk:H=0.7137,top10E=0.31,eRank=123.9,q75/q25=98.07 attn_vo:H=0.8139,top10E=0.16,eRank=247.6,q75/q25=82.78 mlp_w1:H=0.6912,top10E=0.39,eRank=131.7,q75/q25=8.64 mlp_w2:H=0.8221,top10E=0.16,eRank=248.3,q75/q25=19.70 vo_prod:H=0.7234,top10E=0.26,eRank=127.6,q75/q25=8281.31 train_time:305659ms step_avg:72.78ms +[2025-09-02 16:11:35] [Rank 0] PRINT: step:4200/10000 val_loss:4.2271 svd_entropy: attn_qk:H=0.7137,top10E=0.31,eRank=123.9,q75/q25=98.07 attn_vo:H=0.8139,top10E=0.16,eRank=247.6,q75/q25=82.78 mlp_w1:H=0.6912,top10E=0.39,eRank=131.7,q75/q25=8.64 mlp_w2:H=0.8221,top10E=0.16,eRank=248.3,q75/q25=19.70 vo_prod:H=0.7234,top10E=0.26,eRank=127.6,q75/q25=8281.31 train_time:305659ms step_avg:72.78ms +[2025-09-02 16:11:35] [Rank 0] step:4201/10000 train_time:305671ms step_avg:72.76ms +[2025-09-02 16:11:35] [Rank 0] step:4201/10000 train_time:305671ms step_avg:72.76ms +[2025-09-02 16:11:36] [Rank 0] step:4221/10000 train_time:307068ms step_avg:72.75ms +[2025-09-02 16:11:36] [Rank 0] step:4221/10000 train_time:307068ms step_avg:72.75ms +[2025-09-02 16:11:38] [Rank 0] step:4241/10000 train_time:308586ms step_avg:72.76ms +[2025-09-02 16:11:38] [Rank 0] step:4241/10000 train_time:308586ms step_avg:72.76ms +[2025-09-02 16:11:39] [Rank 0] step:4261/10000 train_time:310102ms step_avg:72.78ms +[2025-09-02 16:11:39] [Rank 0] step:4261/10000 train_time:310102ms step_avg:72.78ms +[2025-09-02 16:11:41] [Rank 0] step:4281/10000 train_time:311618ms step_avg:72.79ms +[2025-09-02 16:11:41] [Rank 0] step:4281/10000 train_time:311618ms step_avg:72.79ms +[2025-09-02 16:11:42] [Rank 0] step:4301/10000 train_time:313136ms step_avg:72.81ms +[2025-09-02 16:11:42] [Rank 0] step:4301/10000 train_time:313136ms step_avg:72.81ms +[2025-09-02 16:11:44] [Rank 0] step:4321/10000 train_time:314653ms step_avg:72.82ms +[2025-09-02 16:11:44] [Rank 0] step:4321/10000 train_time:314653ms step_avg:72.82ms +[2025-09-02 16:11:45] [Rank 0] step:4341/10000 train_time:316170ms step_avg:72.83ms +[2025-09-02 16:11:45] [Rank 0] step:4341/10000 train_time:316170ms step_avg:72.83ms +[2025-09-02 16:11:47] [Rank 0] step:4361/10000 train_time:317687ms step_avg:72.85ms +[2025-09-02 16:11:47] [Rank 0] step:4361/10000 train_time:317687ms step_avg:72.85ms +[2025-09-02 16:11:48] [Rank 0] step:4381/10000 train_time:319203ms step_avg:72.86ms +[2025-09-02 16:11:48] [Rank 0] step:4381/10000 train_time:319203ms step_avg:72.86ms +[2025-09-02 16:11:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:11:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:12:02] [Rank 0] PRINT: step:4400/10000 val_loss:4.2039 svd_entropy: attn_qk:H=0.7170,top10E=0.30,eRank=126.2,q75/q25=100.06 attn_vo:H=0.8168,top10E=0.16,eRank=251.8,q75/q25=80.37 mlp_w1:H=0.6956,top10E=0.38,eRank=134.6,q75/q25=8.89 mlp_w2:H=0.8237,top10E=0.16,eRank=251.3,q75/q25=19.99 vo_prod:H=0.7270,top10E=0.25,eRank=130.8,q75/q25=7865.71 train_time:320874ms step_avg:72.93ms +[2025-09-02 16:12:02] [Rank 0] PRINT: step:4400/10000 val_loss:4.2039 svd_entropy: attn_qk:H=0.7170,top10E=0.30,eRank=126.2,q75/q25=100.06 attn_vo:H=0.8168,top10E=0.16,eRank=251.8,q75/q25=80.37 mlp_w1:H=0.6956,top10E=0.38,eRank=134.6,q75/q25=8.89 mlp_w2:H=0.8237,top10E=0.16,eRank=251.3,q75/q25=19.99 vo_prod:H=0.7270,top10E=0.25,eRank=130.8,q75/q25=7865.71 train_time:320874ms step_avg:72.93ms +[2025-09-02 16:12:02] [Rank 0] step:4401/10000 train_time:320887ms step_avg:72.91ms +[2025-09-02 16:12:02] [Rank 0] step:4401/10000 train_time:320887ms step_avg:72.91ms +[2025-09-02 16:12:03] [Rank 0] step:4421/10000 train_time:322258ms step_avg:72.89ms +[2025-09-02 16:12:03] [Rank 0] step:4421/10000 train_time:322258ms step_avg:72.89ms +[2025-09-02 16:12:05] [Rank 0] step:4441/10000 train_time:323773ms step_avg:72.91ms +[2025-09-02 16:12:05] [Rank 0] step:4441/10000 train_time:323773ms step_avg:72.91ms +[2025-09-02 16:12:06] [Rank 0] step:4461/10000 train_time:325303ms step_avg:72.92ms +[2025-09-02 16:12:06] [Rank 0] step:4461/10000 train_time:325303ms step_avg:72.92ms +[2025-09-02 16:12:08] [Rank 0] step:4481/10000 train_time:326824ms step_avg:72.94ms +[2025-09-02 16:12:08] [Rank 0] step:4481/10000 train_time:326824ms step_avg:72.94ms +[2025-09-02 16:12:09] [Rank 0] step:4501/10000 train_time:328348ms step_avg:72.95ms +[2025-09-02 16:12:09] [Rank 0] step:4501/10000 train_time:328348ms step_avg:72.95ms +[2025-09-02 16:12:11] [Rank 0] step:4521/10000 train_time:329870ms step_avg:72.96ms +[2025-09-02 16:12:11] [Rank 0] step:4521/10000 train_time:329870ms step_avg:72.96ms +[2025-09-02 16:12:12] [Rank 0] step:4541/10000 train_time:331394ms step_avg:72.98ms +[2025-09-02 16:12:12] [Rank 0] step:4541/10000 train_time:331394ms step_avg:72.98ms +[2025-09-02 16:12:14] [Rank 0] step:4561/10000 train_time:332925ms step_avg:72.99ms +[2025-09-02 16:12:14] [Rank 0] step:4561/10000 train_time:332925ms step_avg:72.99ms +[2025-09-02 16:12:15] [Rank 0] step:4581/10000 train_time:334451ms step_avg:73.01ms +[2025-09-02 16:12:15] [Rank 0] step:4581/10000 train_time:334451ms step_avg:73.01ms +[2025-09-02 16:12:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:12:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:12:29] [Rank 0] PRINT: step:4600/10000 val_loss:4.1761 svd_entropy: attn_qk:H=0.7203,top10E=0.30,eRank=128.6,q75/q25=102.81 attn_vo:H=0.8196,top10E=0.16,eRank=256.0,q75/q25=78.70 mlp_w1:H=0.6995,top10E=0.38,eRank=137.4,q75/q25=9.11 mlp_w2:H=0.8251,top10E=0.16,eRank=254.3,q75/q25=20.24 vo_prod:H=0.7306,top10E=0.25,eRank=134.0,q75/q25=7310.58 train_time:336130ms step_avg:73.07ms +[2025-09-02 16:12:29] [Rank 0] PRINT: step:4600/10000 val_loss:4.1761 svd_entropy: attn_qk:H=0.7203,top10E=0.30,eRank=128.6,q75/q25=102.81 attn_vo:H=0.8196,top10E=0.16,eRank=256.0,q75/q25=78.70 mlp_w1:H=0.6995,top10E=0.38,eRank=137.4,q75/q25=9.11 mlp_w2:H=0.8251,top10E=0.16,eRank=254.3,q75/q25=20.24 vo_prod:H=0.7306,top10E=0.25,eRank=134.0,q75/q25=7310.58 train_time:336130ms step_avg:73.07ms +[2025-09-02 16:12:29] [Rank 0] step:4601/10000 train_time:336142ms step_avg:73.06ms +[2025-09-02 16:12:29] [Rank 0] step:4601/10000 train_time:336142ms step_avg:73.06ms +[2025-09-02 16:12:30] [Rank 0] step:4621/10000 train_time:337535ms step_avg:73.04ms +[2025-09-02 16:12:30] [Rank 0] step:4621/10000 train_time:337535ms step_avg:73.04ms +[2025-09-02 16:12:32] [Rank 0] step:4641/10000 train_time:339060ms step_avg:73.06ms +[2025-09-02 16:12:32] [Rank 0] step:4641/10000 train_time:339060ms step_avg:73.06ms +[2025-09-02 16:12:34] [Rank 0] step:4661/10000 train_time:340585ms step_avg:73.07ms +[2025-09-02 16:12:34] [Rank 0] step:4661/10000 train_time:340585ms step_avg:73.07ms +[2025-09-02 16:12:35] [Rank 0] step:4681/10000 train_time:342111ms step_avg:73.08ms +[2025-09-02 16:12:35] [Rank 0] step:4681/10000 train_time:342111ms step_avg:73.08ms +[2025-09-02 16:12:37] [Rank 0] step:4701/10000 train_time:343636ms step_avg:73.10ms +[2025-09-02 16:12:37] [Rank 0] step:4701/10000 train_time:343636ms step_avg:73.10ms +[2025-09-02 16:12:38] [Rank 0] step:4721/10000 train_time:345160ms step_avg:73.11ms +[2025-09-02 16:12:38] [Rank 0] step:4721/10000 train_time:345160ms step_avg:73.11ms +[2025-09-02 16:12:40] [Rank 0] step:4741/10000 train_time:346684ms step_avg:73.12ms +[2025-09-02 16:12:40] [Rank 0] step:4741/10000 train_time:346684ms step_avg:73.12ms +[2025-09-02 16:12:41] [Rank 0] step:4761/10000 train_time:348211ms step_avg:73.14ms +[2025-09-02 16:12:41] [Rank 0] step:4761/10000 train_time:348211ms step_avg:73.14ms +[2025-09-02 16:12:43] [Rank 0] step:4781/10000 train_time:349735ms step_avg:73.15ms +[2025-09-02 16:12:43] [Rank 0] step:4781/10000 train_time:349735ms step_avg:73.15ms +[2025-09-02 16:12:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:12:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:12:56] [Rank 0] PRINT: step:4800/10000 val_loss:4.1616 svd_entropy: attn_qk:H=0.7233,top10E=0.30,eRank=130.9,q75/q25=104.42 attn_vo:H=0.8223,top10E=0.15,eRank=259.9,q75/q25=75.57 mlp_w1:H=0.7032,top10E=0.37,eRank=140.1,q75/q25=9.30 mlp_w2:H=0.8265,top10E=0.16,eRank=257.0,q75/q25=20.48 vo_prod:H=0.7338,top10E=0.24,eRank=136.9,q75/q25=6635.60 train_time:351415ms step_avg:73.21ms +[2025-09-02 16:12:56] [Rank 0] PRINT: step:4800/10000 val_loss:4.1616 svd_entropy: attn_qk:H=0.7233,top10E=0.30,eRank=130.9,q75/q25=104.42 attn_vo:H=0.8223,top10E=0.15,eRank=259.9,q75/q25=75.57 mlp_w1:H=0.7032,top10E=0.37,eRank=140.1,q75/q25=9.30 mlp_w2:H=0.8265,top10E=0.16,eRank=257.0,q75/q25=20.48 vo_prod:H=0.7338,top10E=0.24,eRank=136.9,q75/q25=6635.60 train_time:351415ms step_avg:73.21ms +[2025-09-02 16:12:56] [Rank 0] step:4801/10000 train_time:351427ms step_avg:73.20ms +[2025-09-02 16:12:56] [Rank 0] step:4801/10000 train_time:351427ms step_avg:73.20ms +[2025-09-02 16:12:58] [Rank 0] step:4821/10000 train_time:352814ms step_avg:73.18ms +[2025-09-02 16:12:58] [Rank 0] step:4821/10000 train_time:352814ms step_avg:73.18ms +[2025-09-02 16:12:59] [Rank 0] step:4841/10000 train_time:354339ms step_avg:73.20ms +[2025-09-02 16:12:59] [Rank 0] step:4841/10000 train_time:354339ms step_avg:73.20ms +[2025-09-02 16:13:01] [Rank 0] step:4861/10000 train_time:355872ms step_avg:73.21ms +[2025-09-02 16:13:01] [Rank 0] step:4861/10000 train_time:355872ms step_avg:73.21ms +[2025-09-02 16:13:02] [Rank 0] step:4881/10000 train_time:357398ms step_avg:73.22ms +[2025-09-02 16:13:02] [Rank 0] step:4881/10000 train_time:357398ms step_avg:73.22ms +[2025-09-02 16:13:04] [Rank 0] step:4901/10000 train_time:358926ms step_avg:73.24ms +[2025-09-02 16:13:04] [Rank 0] step:4901/10000 train_time:358926ms step_avg:73.24ms +[2025-09-02 16:13:05] [Rank 0] step:4921/10000 train_time:360456ms step_avg:73.25ms +[2025-09-02 16:13:05] [Rank 0] step:4921/10000 train_time:360456ms step_avg:73.25ms +[2025-09-02 16:13:07] [Rank 0] step:4941/10000 train_time:361985ms step_avg:73.26ms +[2025-09-02 16:13:07] [Rank 0] step:4941/10000 train_time:361985ms step_avg:73.26ms +[2025-09-02 16:13:08] [Rank 0] step:4961/10000 train_time:363514ms step_avg:73.27ms +[2025-09-02 16:13:08] [Rank 0] step:4961/10000 train_time:363514ms step_avg:73.27ms +[2025-09-02 16:13:10] [Rank 0] step:4981/10000 train_time:365043ms step_avg:73.29ms +[2025-09-02 16:13:10] [Rank 0] step:4981/10000 train_time:365043ms step_avg:73.29ms +[2025-09-02 16:13:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:13:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:13:23] [Rank 0] PRINT: step:5000/10000 val_loss:4.1406 svd_entropy: attn_qk:H=0.7261,top10E=0.29,eRank=133.1,q75/q25=105.52 attn_vo:H=0.8247,top10E=0.15,eRank=263.5,q75/q25=73.26 mlp_w1:H=0.7067,top10E=0.37,eRank=142.7,q75/q25=9.55 mlp_w2:H=0.8278,top10E=0.15,eRank=259.5,q75/q25=20.64 vo_prod:H=0.7369,top10E=0.24,eRank=139.8,q75/q25=6023.06 train_time:366724ms step_avg:73.34ms +[2025-09-02 16:13:23] [Rank 0] PRINT: step:5000/10000 val_loss:4.1406 svd_entropy: attn_qk:H=0.7261,top10E=0.29,eRank=133.1,q75/q25=105.52 attn_vo:H=0.8247,top10E=0.15,eRank=263.5,q75/q25=73.26 mlp_w1:H=0.7067,top10E=0.37,eRank=142.7,q75/q25=9.55 mlp_w2:H=0.8278,top10E=0.15,eRank=259.5,q75/q25=20.64 vo_prod:H=0.7369,top10E=0.24,eRank=139.8,q75/q25=6023.06 train_time:366724ms step_avg:73.34ms +[2025-09-02 16:13:23] [Rank 0] step:5001/10000 train_time:366736ms step_avg:73.33ms +[2025-09-02 16:13:23] [Rank 0] step:5001/10000 train_time:366736ms step_avg:73.33ms +[2025-09-02 16:13:25] [Rank 0] step:5021/10000 train_time:368126ms step_avg:73.32ms +[2025-09-02 16:13:25] [Rank 0] step:5021/10000 train_time:368126ms step_avg:73.32ms +[2025-09-02 16:13:26] [Rank 0] step:5041/10000 train_time:369649ms step_avg:73.33ms +[2025-09-02 16:13:26] [Rank 0] step:5041/10000 train_time:369649ms step_avg:73.33ms +[2025-09-02 16:13:28] [Rank 0] step:5061/10000 train_time:371172ms step_avg:73.34ms +[2025-09-02 16:13:28] [Rank 0] step:5061/10000 train_time:371172ms step_avg:73.34ms +[2025-09-02 16:13:29] [Rank 0] step:5081/10000 train_time:372697ms step_avg:73.35ms +[2025-09-02 16:13:29] [Rank 0] step:5081/10000 train_time:372697ms step_avg:73.35ms +[2025-09-02 16:13:31] [Rank 0] step:5101/10000 train_time:374221ms step_avg:73.36ms +[2025-09-02 16:13:31] [Rank 0] step:5101/10000 train_time:374221ms step_avg:73.36ms +[2025-09-02 16:13:33] [Rank 0] step:5121/10000 train_time:375745ms step_avg:73.37ms +[2025-09-02 16:13:33] [Rank 0] step:5121/10000 train_time:375745ms step_avg:73.37ms +[2025-09-02 16:13:34] [Rank 0] step:5141/10000 train_time:377272ms step_avg:73.38ms +[2025-09-02 16:13:34] [Rank 0] step:5141/10000 train_time:377272ms step_avg:73.38ms +[2025-09-02 16:13:36] [Rank 0] step:5161/10000 train_time:378797ms step_avg:73.40ms +[2025-09-02 16:13:36] [Rank 0] step:5161/10000 train_time:378797ms step_avg:73.40ms +[2025-09-02 16:13:37] [Rank 0] step:5181/10000 train_time:380325ms step_avg:73.41ms +[2025-09-02 16:13:37] [Rank 0] step:5181/10000 train_time:380325ms step_avg:73.41ms +[2025-09-02 16:13:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:13:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:13:50] [Rank 0] PRINT: step:5200/10000 val_loss:4.1199 svd_entropy: attn_qk:H=0.7288,top10E=0.29,eRank=135.2,q75/q25=106.71 attn_vo:H=0.8270,top10E=0.15,eRank=267.1,q75/q25=71.13 mlp_w1:H=0.7104,top10E=0.36,eRank=145.4,q75/q25=9.79 mlp_w2:H=0.8291,top10E=0.15,eRank=262.1,q75/q25=20.83 vo_prod:H=0.7397,top10E=0.24,eRank=142.5,q75/q25=5532.37 train_time:382029ms step_avg:73.47ms +[2025-09-02 16:13:50] [Rank 0] PRINT: step:5200/10000 val_loss:4.1199 svd_entropy: attn_qk:H=0.7288,top10E=0.29,eRank=135.2,q75/q25=106.71 attn_vo:H=0.8270,top10E=0.15,eRank=267.1,q75/q25=71.13 mlp_w1:H=0.7104,top10E=0.36,eRank=145.4,q75/q25=9.79 mlp_w2:H=0.8291,top10E=0.15,eRank=262.1,q75/q25=20.83 vo_prod:H=0.7397,top10E=0.24,eRank=142.5,q75/q25=5532.37 train_time:382029ms step_avg:73.47ms +[2025-09-02 16:13:51] [Rank 0] step:5201/10000 train_time:382041ms step_avg:73.46ms +[2025-09-02 16:13:51] [Rank 0] step:5201/10000 train_time:382041ms step_avg:73.46ms +[2025-09-02 16:13:52] [Rank 0] step:5221/10000 train_time:383443ms step_avg:73.44ms +[2025-09-02 16:13:52] [Rank 0] step:5221/10000 train_time:383443ms step_avg:73.44ms +[2025-09-02 16:13:54] [Rank 0] step:5241/10000 train_time:384998ms step_avg:73.46ms +[2025-09-02 16:13:54] [Rank 0] step:5241/10000 train_time:384998ms step_avg:73.46ms +[2025-09-02 16:13:55] [Rank 0] step:5261/10000 train_time:386552ms step_avg:73.47ms +[2025-09-02 16:13:55] [Rank 0] step:5261/10000 train_time:386552ms step_avg:73.47ms +[2025-09-02 16:13:57] [Rank 0] step:5281/10000 train_time:388109ms step_avg:73.49ms +[2025-09-02 16:13:57] [Rank 0] step:5281/10000 train_time:388109ms step_avg:73.49ms +[2025-09-02 16:13:58] [Rank 0] step:5301/10000 train_time:389677ms step_avg:73.51ms +[2025-09-02 16:13:58] [Rank 0] step:5301/10000 train_time:389677ms step_avg:73.51ms +[2025-09-02 16:14:00] [Rank 0] step:5321/10000 train_time:391305ms step_avg:73.54ms +[2025-09-02 16:14:00] [Rank 0] step:5321/10000 train_time:391305ms step_avg:73.54ms +[2025-09-02 16:14:02] [Rank 0] step:5341/10000 train_time:392859ms step_avg:73.56ms +[2025-09-02 16:14:02] [Rank 0] step:5341/10000 train_time:392859ms step_avg:73.56ms +[2025-09-02 16:14:03] [Rank 0] step:5361/10000 train_time:394420ms step_avg:73.57ms +[2025-09-02 16:14:03] [Rank 0] step:5361/10000 train_time:394420ms step_avg:73.57ms +[2025-09-02 16:14:05] [Rank 0] step:5381/10000 train_time:395982ms step_avg:73.59ms +[2025-09-02 16:14:05] [Rank 0] step:5381/10000 train_time:395982ms step_avg:73.59ms +[2025-09-02 16:14:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:14:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:14:18] [Rank 0] PRINT: step:5400/10000 val_loss:4.1008 svd_entropy: attn_qk:H=0.7313,top10E=0.28,eRank=137.2,q75/q25=107.64 attn_vo:H=0.8291,top10E=0.15,eRank=270.3,q75/q25=68.55 mlp_w1:H=0.7136,top10E=0.36,eRank=147.9,q75/q25=10.03 mlp_w2:H=0.8301,top10E=0.15,eRank=264.3,q75/q25=21.07 vo_prod:H=0.7423,top10E=0.23,eRank=145.0,q75/q25=5142.82 train_time:397694ms step_avg:73.65ms +[2025-09-02 16:14:18] [Rank 0] PRINT: step:5400/10000 val_loss:4.1008 svd_entropy: attn_qk:H=0.7313,top10E=0.28,eRank=137.2,q75/q25=107.64 attn_vo:H=0.8291,top10E=0.15,eRank=270.3,q75/q25=68.55 mlp_w1:H=0.7136,top10E=0.36,eRank=147.9,q75/q25=10.03 mlp_w2:H=0.8301,top10E=0.15,eRank=264.3,q75/q25=21.07 vo_prod:H=0.7423,top10E=0.23,eRank=145.0,q75/q25=5142.82 train_time:397694ms step_avg:73.65ms +[2025-09-02 16:14:18] [Rank 0] step:5401/10000 train_time:397705ms step_avg:73.64ms +[2025-09-02 16:14:18] [Rank 0] step:5401/10000 train_time:397705ms step_avg:73.64ms +[2025-09-02 16:14:20] [Rank 0] step:5421/10000 train_time:399132ms step_avg:73.63ms +[2025-09-02 16:14:20] [Rank 0] step:5421/10000 train_time:399132ms step_avg:73.63ms +[2025-09-02 16:14:21] [Rank 0] step:5441/10000 train_time:400684ms step_avg:73.64ms +[2025-09-02 16:14:21] [Rank 0] step:5441/10000 train_time:400684ms step_avg:73.64ms +[2025-09-02 16:14:23] [Rank 0] step:5461/10000 train_time:402241ms step_avg:73.66ms +[2025-09-02 16:14:23] [Rank 0] step:5461/10000 train_time:402241ms step_avg:73.66ms +[2025-09-02 16:14:24] [Rank 0] step:5481/10000 train_time:403800ms step_avg:73.67ms +[2025-09-02 16:14:24] [Rank 0] step:5481/10000 train_time:403800ms step_avg:73.67ms +[2025-09-02 16:14:26] [Rank 0] step:5501/10000 train_time:405361ms step_avg:73.69ms +[2025-09-02 16:14:26] [Rank 0] step:5501/10000 train_time:405361ms step_avg:73.69ms +[2025-09-02 16:14:27] [Rank 0] step:5521/10000 train_time:406925ms step_avg:73.70ms +[2025-09-02 16:14:27] [Rank 0] step:5521/10000 train_time:406925ms step_avg:73.70ms +[2025-09-02 16:14:29] [Rank 0] step:5541/10000 train_time:408481ms step_avg:73.72ms +[2025-09-02 16:14:29] [Rank 0] step:5541/10000 train_time:408481ms step_avg:73.72ms +[2025-09-02 16:14:31] [Rank 0] step:5561/10000 train_time:410040ms step_avg:73.73ms +[2025-09-02 16:14:31] [Rank 0] step:5561/10000 train_time:410040ms step_avg:73.73ms +[2025-09-02 16:14:32] [Rank 0] step:5581/10000 train_time:411597ms step_avg:73.75ms +[2025-09-02 16:14:32] [Rank 0] step:5581/10000 train_time:411597ms step_avg:73.75ms +[2025-09-02 16:14:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:14:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:14:45] [Rank 0] PRINT: step:5600/10000 val_loss:4.0883 svd_entropy: attn_qk:H=0.7338,top10E=0.28,eRank=139.2,q75/q25=108.28 attn_vo:H=0.8311,top10E=0.15,eRank=273.5,q75/q25=66.33 mlp_w1:H=0.7166,top10E=0.35,eRank=150.3,q75/q25=10.23 mlp_w2:H=0.8311,top10E=0.15,eRank=266.4,q75/q25=21.33 vo_prod:H=0.7448,top10E=0.23,eRank=147.5,q75/q25=4541.43 train_time:413311ms step_avg:73.81ms +[2025-09-02 16:14:45] [Rank 0] PRINT: step:5600/10000 val_loss:4.0883 svd_entropy: attn_qk:H=0.7338,top10E=0.28,eRank=139.2,q75/q25=108.28 attn_vo:H=0.8311,top10E=0.15,eRank=273.5,q75/q25=66.33 mlp_w1:H=0.7166,top10E=0.35,eRank=150.3,q75/q25=10.23 mlp_w2:H=0.8311,top10E=0.15,eRank=266.4,q75/q25=21.33 vo_prod:H=0.7448,top10E=0.23,eRank=147.5,q75/q25=4541.43 train_time:413311ms step_avg:73.81ms +[2025-09-02 16:14:45] [Rank 0] step:5601/10000 train_time:413323ms step_avg:73.79ms +[2025-09-02 16:14:45] [Rank 0] step:5601/10000 train_time:413323ms step_avg:73.79ms +[2025-09-02 16:14:47] [Rank 0] step:5621/10000 train_time:414728ms step_avg:73.78ms +[2025-09-02 16:14:47] [Rank 0] step:5621/10000 train_time:414728ms step_avg:73.78ms +[2025-09-02 16:14:49] [Rank 0] step:5641/10000 train_time:416286ms step_avg:73.80ms +[2025-09-02 16:14:49] [Rank 0] step:5641/10000 train_time:416286ms step_avg:73.80ms +[2025-09-02 16:14:50] [Rank 0] step:5661/10000 train_time:417839ms step_avg:73.81ms +[2025-09-02 16:14:50] [Rank 0] step:5661/10000 train_time:417839ms step_avg:73.81ms +[2025-09-02 16:14:52] [Rank 0] step:5681/10000 train_time:419398ms step_avg:73.82ms +[2025-09-02 16:14:52] [Rank 0] step:5681/10000 train_time:419398ms step_avg:73.82ms +[2025-09-02 16:14:53] [Rank 0] step:5701/10000 train_time:420954ms step_avg:73.84ms +[2025-09-02 16:14:53] [Rank 0] step:5701/10000 train_time:420954ms step_avg:73.84ms +[2025-09-02 16:14:55] [Rank 0] step:5721/10000 train_time:422515ms step_avg:73.85ms +[2025-09-02 16:14:55] [Rank 0] step:5721/10000 train_time:422515ms step_avg:73.85ms +[2025-09-02 16:14:56] [Rank 0] step:5741/10000 train_time:424074ms step_avg:73.87ms +[2025-09-02 16:14:56] [Rank 0] step:5741/10000 train_time:424074ms step_avg:73.87ms +[2025-09-02 16:14:58] [Rank 0] step:5761/10000 train_time:425633ms step_avg:73.88ms +[2025-09-02 16:14:58] [Rank 0] step:5761/10000 train_time:425633ms step_avg:73.88ms +[2025-09-02 16:14:59] [Rank 0] step:5781/10000 train_time:427191ms step_avg:73.90ms +[2025-09-02 16:14:59] [Rank 0] step:5781/10000 train_time:427191ms step_avg:73.90ms +[2025-09-02 16:15:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:15:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:15:13] [Rank 0] PRINT: step:5800/10000 val_loss:4.0766 svd_entropy: attn_qk:H=0.7361,top10E=0.28,eRank=141.2,q75/q25=109.19 attn_vo:H=0.8330,top10E=0.14,eRank=276.5,q75/q25=64.48 mlp_w1:H=0.7194,top10E=0.35,eRank=152.5,q75/q25=10.46 mlp_w2:H=0.8319,top10E=0.15,eRank=268.3,q75/q25=21.56 vo_prod:H=0.7470,top10E=0.23,eRank=149.8,q75/q25=4206.06 train_time:428908ms step_avg:73.95ms +[2025-09-02 16:15:13] [Rank 0] PRINT: step:5800/10000 val_loss:4.0766 svd_entropy: attn_qk:H=0.7361,top10E=0.28,eRank=141.2,q75/q25=109.19 attn_vo:H=0.8330,top10E=0.14,eRank=276.5,q75/q25=64.48 mlp_w1:H=0.7194,top10E=0.35,eRank=152.5,q75/q25=10.46 mlp_w2:H=0.8319,top10E=0.15,eRank=268.3,q75/q25=21.56 vo_prod:H=0.7470,top10E=0.23,eRank=149.8,q75/q25=4206.06 train_time:428908ms step_avg:73.95ms +[2025-09-02 16:15:13] [Rank 0] step:5801/10000 train_time:428919ms step_avg:73.94ms +[2025-09-02 16:15:13] [Rank 0] step:5801/10000 train_time:428919ms step_avg:73.94ms +[2025-09-02 16:15:14] [Rank 0] step:5821/10000 train_time:430322ms step_avg:73.93ms +[2025-09-02 16:15:14] [Rank 0] step:5821/10000 train_time:430322ms step_avg:73.93ms +[2025-09-02 16:15:16] [Rank 0] step:5841/10000 train_time:431876ms step_avg:73.94ms +[2025-09-02 16:15:16] [Rank 0] step:5841/10000 train_time:431876ms step_avg:73.94ms +[2025-09-02 16:15:18] [Rank 0] step:5861/10000 train_time:433437ms step_avg:73.95ms +[2025-09-02 16:15:18] [Rank 0] step:5861/10000 train_time:433437ms step_avg:73.95ms +[2025-09-02 16:15:19] [Rank 0] step:5881/10000 train_time:434998ms step_avg:73.97ms +[2025-09-02 16:15:19] [Rank 0] step:5881/10000 train_time:434998ms step_avg:73.97ms +[2025-09-02 16:15:21] [Rank 0] step:5901/10000 train_time:436558ms step_avg:73.98ms +[2025-09-02 16:15:21] [Rank 0] step:5901/10000 train_time:436558ms step_avg:73.98ms +[2025-09-02 16:15:22] [Rank 0] step:5921/10000 train_time:438117ms step_avg:73.99ms +[2025-09-02 16:15:22] [Rank 0] step:5921/10000 train_time:438117ms step_avg:73.99ms +[2025-09-02 16:15:24] [Rank 0] step:5941/10000 train_time:439681ms step_avg:74.01ms +[2025-09-02 16:15:24] [Rank 0] step:5941/10000 train_time:439681ms step_avg:74.01ms +[2025-09-02 16:15:25] [Rank 0] step:5961/10000 train_time:441245ms step_avg:74.02ms +[2025-09-02 16:15:25] [Rank 0] step:5961/10000 train_time:441245ms step_avg:74.02ms +[2025-09-02 16:15:27] [Rank 0] step:5981/10000 train_time:442810ms step_avg:74.04ms +[2025-09-02 16:15:27] [Rank 0] step:5981/10000 train_time:442810ms step_avg:74.04ms +[2025-09-02 16:15:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:15:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:15:40] [Rank 0] PRINT: step:6000/10000 val_loss:4.0536 svd_entropy: attn_qk:H=0.7383,top10E=0.28,eRank=143.0,q75/q25=110.24 attn_vo:H=0.8348,top10E=0.14,eRank=279.4,q75/q25=62.28 mlp_w1:H=0.7223,top10E=0.35,eRank=154.9,q75/q25=10.66 mlp_w2:H=0.8329,top10E=0.15,eRank=270.4,q75/q25=21.69 vo_prod:H=0.7493,top10E=0.23,eRank=152.1,q75/q25=3841.85 train_time:444524ms step_avg:74.09ms +[2025-09-02 16:15:40] [Rank 0] PRINT: step:6000/10000 val_loss:4.0536 svd_entropy: attn_qk:H=0.7383,top10E=0.28,eRank=143.0,q75/q25=110.24 attn_vo:H=0.8348,top10E=0.14,eRank=279.4,q75/q25=62.28 mlp_w1:H=0.7223,top10E=0.35,eRank=154.9,q75/q25=10.66 mlp_w2:H=0.8329,top10E=0.15,eRank=270.4,q75/q25=21.69 vo_prod:H=0.7493,top10E=0.23,eRank=152.1,q75/q25=3841.85 train_time:444524ms step_avg:74.09ms +[2025-09-02 16:15:40] [Rank 0] step:6001/10000 train_time:444535ms step_avg:74.08ms +[2025-09-02 16:15:40] [Rank 0] step:6001/10000 train_time:444535ms step_avg:74.08ms +[2025-09-02 16:15:42] [Rank 0] step:6021/10000 train_time:445952ms step_avg:74.07ms +[2025-09-02 16:15:42] [Rank 0] step:6021/10000 train_time:445952ms step_avg:74.07ms +[2025-09-02 16:15:43] [Rank 0] step:6041/10000 train_time:447514ms step_avg:74.08ms +[2025-09-02 16:15:43] [Rank 0] step:6041/10000 train_time:447514ms step_avg:74.08ms +[2025-09-02 16:15:45] [Rank 0] step:6061/10000 train_time:449082ms step_avg:74.09ms +[2025-09-02 16:15:45] [Rank 0] step:6061/10000 train_time:449082ms step_avg:74.09ms +[2025-09-02 16:15:47] [Rank 0] step:6081/10000 train_time:450645ms step_avg:74.11ms +[2025-09-02 16:15:47] [Rank 0] step:6081/10000 train_time:450645ms step_avg:74.11ms +[2025-09-02 16:15:48] [Rank 0] step:6101/10000 train_time:452209ms step_avg:74.12ms +[2025-09-02 16:15:48] [Rank 0] step:6101/10000 train_time:452209ms step_avg:74.12ms +[2025-09-02 16:15:50] [Rank 0] step:6121/10000 train_time:454039ms step_avg:74.18ms +[2025-09-02 16:15:50] [Rank 0] step:6121/10000 train_time:454039ms step_avg:74.18ms +[2025-09-02 16:15:52] [Rank 0] step:6141/10000 train_time:455610ms step_avg:74.19ms +[2025-09-02 16:15:52] [Rank 0] step:6141/10000 train_time:455610ms step_avg:74.19ms +[2025-09-02 16:15:53] [Rank 0] step:6161/10000 train_time:457175ms step_avg:74.20ms +[2025-09-02 16:15:53] [Rank 0] step:6161/10000 train_time:457175ms step_avg:74.20ms +[2025-09-02 16:15:55] [Rank 0] step:6181/10000 train_time:458738ms step_avg:74.22ms +[2025-09-02 16:15:55] [Rank 0] step:6181/10000 train_time:458738ms step_avg:74.22ms +[2025-09-02 16:15:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:15:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:16:08] [Rank 0] PRINT: step:6200/10000 val_loss:4.0375 svd_entropy: attn_qk:H=0.7404,top10E=0.27,eRank=144.8,q75/q25=109.57 attn_vo:H=0.8365,top10E=0.14,eRank=282.2,q75/q25=60.19 mlp_w1:H=0.7249,top10E=0.34,eRank=157.1,q75/q25=10.83 mlp_w2:H=0.8338,top10E=0.14,eRank=272.3,q75/q25=21.81 vo_prod:H=0.7515,top10E=0.22,eRank=154.3,q75/q25=3518.19 train_time:460458ms step_avg:74.27ms +[2025-09-02 16:16:08] [Rank 0] PRINT: step:6200/10000 val_loss:4.0375 svd_entropy: attn_qk:H=0.7404,top10E=0.27,eRank=144.8,q75/q25=109.57 attn_vo:H=0.8365,top10E=0.14,eRank=282.2,q75/q25=60.19 mlp_w1:H=0.7249,top10E=0.34,eRank=157.1,q75/q25=10.83 mlp_w2:H=0.8338,top10E=0.14,eRank=272.3,q75/q25=21.81 vo_prod:H=0.7515,top10E=0.22,eRank=154.3,q75/q25=3518.19 train_time:460458ms step_avg:74.27ms +[2025-09-02 16:16:08] [Rank 0] step:6201/10000 train_time:460469ms step_avg:74.26ms +[2025-09-02 16:16:08] [Rank 0] step:6201/10000 train_time:460469ms step_avg:74.26ms +[2025-09-02 16:16:10] [Rank 0] step:6221/10000 train_time:461894ms step_avg:74.25ms +[2025-09-02 16:16:10] [Rank 0] step:6221/10000 train_time:461894ms step_avg:74.25ms +[2025-09-02 16:16:11] [Rank 0] step:6241/10000 train_time:463449ms step_avg:74.26ms +[2025-09-02 16:16:11] [Rank 0] step:6241/10000 train_time:463449ms step_avg:74.26ms +[2025-09-02 16:16:13] [Rank 0] step:6261/10000 train_time:465009ms step_avg:74.27ms +[2025-09-02 16:16:13] [Rank 0] step:6261/10000 train_time:465009ms step_avg:74.27ms +[2025-09-02 16:16:14] [Rank 0] step:6281/10000 train_time:466575ms step_avg:74.28ms +[2025-09-02 16:16:14] [Rank 0] step:6281/10000 train_time:466575ms step_avg:74.28ms +[2025-09-02 16:16:16] [Rank 0] step:6301/10000 train_time:468142ms step_avg:74.30ms +[2025-09-02 16:16:16] [Rank 0] step:6301/10000 train_time:468142ms step_avg:74.30ms +[2025-09-02 16:16:17] [Rank 0] step:6321/10000 train_time:469704ms step_avg:74.31ms +[2025-09-02 16:16:17] [Rank 0] step:6321/10000 train_time:469704ms step_avg:74.31ms +[2025-09-02 16:16:19] [Rank 0] step:6341/10000 train_time:471273ms step_avg:74.32ms +[2025-09-02 16:16:19] [Rank 0] step:6341/10000 train_time:471273ms step_avg:74.32ms +[2025-09-02 16:16:21] [Rank 0] step:6361/10000 train_time:472842ms step_avg:74.33ms +[2025-09-02 16:16:21] [Rank 0] step:6361/10000 train_time:472842ms step_avg:74.33ms +[2025-09-02 16:16:22] [Rank 0] step:6381/10000 train_time:474410ms step_avg:74.35ms +[2025-09-02 16:16:22] [Rank 0] step:6381/10000 train_time:474410ms step_avg:74.35ms +[2025-09-02 16:16:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:16:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:16:35] [Rank 0] PRINT: step:6400/10000 val_loss:4.0233 svd_entropy: attn_qk:H=0.7423,top10E=0.27,eRank=146.4,q75/q25=110.80 attn_vo:H=0.8381,top10E=0.14,eRank=284.7,q75/q25=58.67 mlp_w1:H=0.7274,top10E=0.34,eRank=159.2,q75/q25=10.99 mlp_w2:H=0.8345,top10E=0.14,eRank=274.0,q75/q25=21.95 vo_prod:H=0.7535,top10E=0.22,eRank=156.4,q75/q25=3288.06 train_time:476130ms step_avg:74.40ms +[2025-09-02 16:16:35] [Rank 0] PRINT: step:6400/10000 val_loss:4.0233 svd_entropy: attn_qk:H=0.7423,top10E=0.27,eRank=146.4,q75/q25=110.80 attn_vo:H=0.8381,top10E=0.14,eRank=284.7,q75/q25=58.67 mlp_w1:H=0.7274,top10E=0.34,eRank=159.2,q75/q25=10.99 mlp_w2:H=0.8345,top10E=0.14,eRank=274.0,q75/q25=21.95 vo_prod:H=0.7535,top10E=0.22,eRank=156.4,q75/q25=3288.06 train_time:476130ms step_avg:74.40ms +[2025-09-02 16:16:36] [Rank 0] step:6401/10000 train_time:476142ms step_avg:74.39ms +[2025-09-02 16:16:36] [Rank 0] step:6401/10000 train_time:476142ms step_avg:74.39ms +[2025-09-02 16:16:37] [Rank 0] step:6421/10000 train_time:477567ms step_avg:74.38ms +[2025-09-02 16:16:37] [Rank 0] step:6421/10000 train_time:477567ms step_avg:74.38ms +[2025-09-02 16:16:39] [Rank 0] step:6441/10000 train_time:479130ms step_avg:74.39ms +[2025-09-02 16:16:39] [Rank 0] step:6441/10000 train_time:479130ms step_avg:74.39ms +[2025-09-02 16:16:40] [Rank 0] step:6461/10000 train_time:480697ms step_avg:74.40ms +[2025-09-02 16:16:40] [Rank 0] step:6461/10000 train_time:480697ms step_avg:74.40ms +[2025-09-02 16:16:42] [Rank 0] step:6481/10000 train_time:482267ms step_avg:74.41ms +[2025-09-02 16:16:42] [Rank 0] step:6481/10000 train_time:482267ms step_avg:74.41ms +[2025-09-02 16:16:43] [Rank 0] step:6501/10000 train_time:483828ms step_avg:74.42ms +[2025-09-02 16:16:43] [Rank 0] step:6501/10000 train_time:483828ms step_avg:74.42ms +[2025-09-02 16:16:45] [Rank 0] step:6521/10000 train_time:485389ms step_avg:74.43ms +[2025-09-02 16:16:45] [Rank 0] step:6521/10000 train_time:485389ms step_avg:74.43ms +[2025-09-02 16:16:46] [Rank 0] step:6541/10000 train_time:486954ms step_avg:74.45ms +[2025-09-02 16:16:46] [Rank 0] step:6541/10000 train_time:486954ms step_avg:74.45ms +[2025-09-02 16:16:48] [Rank 0] step:6561/10000 train_time:488521ms step_avg:74.46ms +[2025-09-02 16:16:48] [Rank 0] step:6561/10000 train_time:488521ms step_avg:74.46ms +[2025-09-02 16:16:50] [Rank 0] step:6581/10000 train_time:490083ms step_avg:74.47ms +[2025-09-02 16:16:50] [Rank 0] step:6581/10000 train_time:490083ms step_avg:74.47ms +[2025-09-02 16:16:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:16:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:17:03] [Rank 0] PRINT: step:6600/10000 val_loss:4.0081 svd_entropy: attn_qk:H=0.7441,top10E=0.27,eRank=148.0,q75/q25=110.94 attn_vo:H=0.8395,top10E=0.14,eRank=287.1,q75/q25=57.10 mlp_w1:H=0.7296,top10E=0.33,eRank=161.2,q75/q25=11.12 mlp_w2:H=0.8353,top10E=0.14,eRank=275.8,q75/q25=21.99 vo_prod:H=0.7555,top10E=0.22,eRank=158.5,q75/q25=2987.80 train_time:491806ms step_avg:74.52ms +[2025-09-02 16:17:03] [Rank 0] PRINT: step:6600/10000 val_loss:4.0081 svd_entropy: attn_qk:H=0.7441,top10E=0.27,eRank=148.0,q75/q25=110.94 attn_vo:H=0.8395,top10E=0.14,eRank=287.1,q75/q25=57.10 mlp_w1:H=0.7296,top10E=0.33,eRank=161.2,q75/q25=11.12 mlp_w2:H=0.8353,top10E=0.14,eRank=275.8,q75/q25=21.99 vo_prod:H=0.7555,top10E=0.22,eRank=158.5,q75/q25=2987.80 train_time:491806ms step_avg:74.52ms +[2025-09-02 16:17:03] [Rank 0] step:6601/10000 train_time:491818ms step_avg:74.51ms +[2025-09-02 16:17:03] [Rank 0] step:6601/10000 train_time:491818ms step_avg:74.51ms +[2025-09-02 16:17:05] [Rank 0] step:6621/10000 train_time:493247ms step_avg:74.50ms +[2025-09-02 16:17:05] [Rank 0] step:6621/10000 train_time:493247ms step_avg:74.50ms +[2025-09-02 16:17:06] [Rank 0] step:6641/10000 train_time:494818ms step_avg:74.51ms +[2025-09-02 16:17:06] [Rank 0] step:6641/10000 train_time:494818ms step_avg:74.51ms +[2025-09-02 16:17:08] [Rank 0] step:6661/10000 train_time:496383ms step_avg:74.52ms +[2025-09-02 16:17:08] [Rank 0] step:6661/10000 train_time:496383ms step_avg:74.52ms +[2025-09-02 16:17:09] [Rank 0] step:6681/10000 train_time:497965ms step_avg:74.53ms +[2025-09-02 16:17:09] [Rank 0] step:6681/10000 train_time:497965ms step_avg:74.53ms +[2025-09-02 16:17:11] [Rank 0] step:6701/10000 train_time:499566ms step_avg:74.55ms +[2025-09-02 16:17:11] [Rank 0] step:6701/10000 train_time:499566ms step_avg:74.55ms +[2025-09-02 16:17:12] [Rank 0] step:6721/10000 train_time:501160ms step_avg:74.57ms +[2025-09-02 16:17:12] [Rank 0] step:6721/10000 train_time:501160ms step_avg:74.57ms +[2025-09-02 16:17:14] [Rank 0] step:6741/10000 train_time:502748ms step_avg:74.58ms +[2025-09-02 16:17:14] [Rank 0] step:6741/10000 train_time:502748ms step_avg:74.58ms +[2025-09-02 16:17:16] [Rank 0] step:6761/10000 train_time:504340ms step_avg:74.60ms +[2025-09-02 16:17:16] [Rank 0] step:6761/10000 train_time:504340ms step_avg:74.60ms +[2025-09-02 16:17:17] [Rank 0] step:6781/10000 train_time:505937ms step_avg:74.61ms +[2025-09-02 16:17:17] [Rank 0] step:6781/10000 train_time:505937ms step_avg:74.61ms +[2025-09-02 16:17:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:17:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:17:30] [Rank 0] PRINT: step:6800/10000 val_loss:3.9923 svd_entropy: attn_qk:H=0.7457,top10E=0.27,eRank=149.5,q75/q25=111.27 attn_vo:H=0.8408,top10E=0.14,eRank=289.2,q75/q25=55.23 mlp_w1:H=0.7317,top10E=0.33,eRank=163.0,q75/q25=11.28 mlp_w2:H=0.8361,top10E=0.14,eRank=277.5,q75/q25=22.03 vo_prod:H=0.7572,top10E=0.22,eRank=160.3,q75/q25=2753.48 train_time:507693ms step_avg:74.66ms +[2025-09-02 16:17:30] [Rank 0] PRINT: step:6800/10000 val_loss:3.9923 svd_entropy: attn_qk:H=0.7457,top10E=0.27,eRank=149.5,q75/q25=111.27 attn_vo:H=0.8408,top10E=0.14,eRank=289.2,q75/q25=55.23 mlp_w1:H=0.7317,top10E=0.33,eRank=163.0,q75/q25=11.28 mlp_w2:H=0.8361,top10E=0.14,eRank=277.5,q75/q25=22.03 vo_prod:H=0.7572,top10E=0.22,eRank=160.3,q75/q25=2753.48 train_time:507693ms step_avg:74.66ms +[2025-09-02 16:17:31] [Rank 0] step:6801/10000 train_time:507705ms step_avg:74.65ms +[2025-09-02 16:17:31] [Rank 0] step:6801/10000 train_time:507705ms step_avg:74.65ms +[2025-09-02 16:17:32] [Rank 0] step:6821/10000 train_time:509160ms step_avg:74.65ms +[2025-09-02 16:17:32] [Rank 0] step:6821/10000 train_time:509160ms step_avg:74.65ms +[2025-09-02 16:17:34] [Rank 0] step:6841/10000 train_time:510749ms step_avg:74.66ms +[2025-09-02 16:17:34] [Rank 0] step:6841/10000 train_time:510749ms step_avg:74.66ms +[2025-09-02 16:17:35] [Rank 0] step:6861/10000 train_time:512342ms step_avg:74.67ms +[2025-09-02 16:17:35] [Rank 0] step:6861/10000 train_time:512342ms step_avg:74.67ms +[2025-09-02 16:17:37] [Rank 0] step:6881/10000 train_time:513934ms step_avg:74.69ms +[2025-09-02 16:17:37] [Rank 0] step:6881/10000 train_time:513934ms step_avg:74.69ms +[2025-09-02 16:17:39] [Rank 0] step:6901/10000 train_time:515523ms step_avg:74.70ms +[2025-09-02 16:17:39] [Rank 0] step:6901/10000 train_time:515523ms step_avg:74.70ms +[2025-09-02 16:17:40] [Rank 0] step:6921/10000 train_time:517121ms step_avg:74.72ms +[2025-09-02 16:17:40] [Rank 0] step:6921/10000 train_time:517121ms step_avg:74.72ms +[2025-09-02 16:17:42] [Rank 0] step:6941/10000 train_time:518720ms step_avg:74.73ms +[2025-09-02 16:17:42] [Rank 0] step:6941/10000 train_time:518720ms step_avg:74.73ms +[2025-09-02 16:17:43] [Rank 0] step:6961/10000 train_time:520329ms step_avg:74.75ms +[2025-09-02 16:17:43] [Rank 0] step:6961/10000 train_time:520329ms step_avg:74.75ms +[2025-09-02 16:17:45] [Rank 0] step:6981/10000 train_time:521931ms step_avg:74.76ms +[2025-09-02 16:17:45] [Rank 0] step:6981/10000 train_time:521931ms step_avg:74.76ms +[2025-09-02 16:17:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:17:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:17:58] [Rank 0] PRINT: step:7000/10000 val_loss:3.9760 svd_entropy: attn_qk:H=0.7472,top10E=0.26,eRank=150.8,q75/q25=111.97 attn_vo:H=0.8420,top10E=0.14,eRank=291.2,q75/q25=54.20 mlp_w1:H=0.7336,top10E=0.33,eRank=164.7,q75/q25=11.46 mlp_w2:H=0.8368,top10E=0.14,eRank=278.9,q75/q25=22.23 vo_prod:H=0.7589,top10E=0.22,eRank=162.2,q75/q25=2576.83 train_time:523689ms step_avg:74.81ms +[2025-09-02 16:17:58] [Rank 0] PRINT: step:7000/10000 val_loss:3.9760 svd_entropy: attn_qk:H=0.7472,top10E=0.26,eRank=150.8,q75/q25=111.97 attn_vo:H=0.8420,top10E=0.14,eRank=291.2,q75/q25=54.20 mlp_w1:H=0.7336,top10E=0.33,eRank=164.7,q75/q25=11.46 mlp_w2:H=0.8368,top10E=0.14,eRank=278.9,q75/q25=22.23 vo_prod:H=0.7589,top10E=0.22,eRank=162.2,q75/q25=2576.83 train_time:523689ms step_avg:74.81ms +[2025-09-02 16:17:58] [Rank 0] step:7001/10000 train_time:523700ms step_avg:74.80ms +[2025-09-02 16:17:58] [Rank 0] step:7001/10000 train_time:523700ms step_avg:74.80ms +[2025-09-02 16:18:00] [Rank 0] step:7021/10000 train_time:525142ms step_avg:74.80ms +[2025-09-02 16:18:00] [Rank 0] step:7021/10000 train_time:525142ms step_avg:74.80ms +[2025-09-02 16:18:02] [Rank 0] step:7041/10000 train_time:526735ms step_avg:74.81ms +[2025-09-02 16:18:02] [Rank 0] step:7041/10000 train_time:526735ms step_avg:74.81ms +[2025-09-02 16:18:03] [Rank 0] step:7061/10000 train_time:528327ms step_avg:74.82ms +[2025-09-02 16:18:03] [Rank 0] step:7061/10000 train_time:528327ms step_avg:74.82ms +[2025-09-02 16:18:05] [Rank 0] step:7081/10000 train_time:529922ms step_avg:74.84ms +[2025-09-02 16:18:05] [Rank 0] step:7081/10000 train_time:529922ms step_avg:74.84ms +[2025-09-02 16:18:06] [Rank 0] step:7101/10000 train_time:531514ms step_avg:74.85ms +[2025-09-02 16:18:06] [Rank 0] step:7101/10000 train_time:531514ms step_avg:74.85ms +[2025-09-02 16:18:08] [Rank 0] step:7121/10000 train_time:533107ms step_avg:74.86ms +[2025-09-02 16:18:08] [Rank 0] step:7121/10000 train_time:533107ms step_avg:74.86ms +[2025-09-02 16:18:09] [Rank 0] step:7141/10000 train_time:534701ms step_avg:74.88ms +[2025-09-02 16:18:09] [Rank 0] step:7141/10000 train_time:534701ms step_avg:74.88ms +[2025-09-02 16:18:11] [Rank 0] step:7161/10000 train_time:536297ms step_avg:74.89ms +[2025-09-02 16:18:11] [Rank 0] step:7161/10000 train_time:536297ms step_avg:74.89ms +[2025-09-02 16:18:13] [Rank 0] step:7181/10000 train_time:537891ms step_avg:74.90ms +[2025-09-02 16:18:13] [Rank 0] step:7181/10000 train_time:537891ms step_avg:74.90ms +[2025-09-02 16:18:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:18:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:18:26] [Rank 0] PRINT: step:7200/10000 val_loss:3.9680 svd_entropy: attn_qk:H=0.7486,top10E=0.26,eRank=152.1,q75/q25=111.42 attn_vo:H=0.8431,top10E=0.13,eRank=293.0,q75/q25=52.61 mlp_w1:H=0.7353,top10E=0.33,eRank=166.3,q75/q25=11.60 mlp_w2:H=0.8374,top10E=0.14,eRank=280.3,q75/q25=22.22 vo_prod:H=0.7605,top10E=0.21,eRank=163.9,q75/q25=2433.10 train_time:539645ms step_avg:74.95ms +[2025-09-02 16:18:26] [Rank 0] PRINT: step:7200/10000 val_loss:3.9680 svd_entropy: attn_qk:H=0.7486,top10E=0.26,eRank=152.1,q75/q25=111.42 attn_vo:H=0.8431,top10E=0.13,eRank=293.0,q75/q25=52.61 mlp_w1:H=0.7353,top10E=0.33,eRank=166.3,q75/q25=11.60 mlp_w2:H=0.8374,top10E=0.14,eRank=280.3,q75/q25=22.22 vo_prod:H=0.7605,top10E=0.21,eRank=163.9,q75/q25=2433.10 train_time:539645ms step_avg:74.95ms +[2025-09-02 16:18:26] [Rank 0] step:7201/10000 train_time:539657ms step_avg:74.94ms +[2025-09-02 16:18:26] [Rank 0] step:7201/10000 train_time:539657ms step_avg:74.94ms +[2025-09-02 16:18:28] [Rank 0] step:7221/10000 train_time:541124ms step_avg:74.94ms +[2025-09-02 16:18:28] [Rank 0] step:7221/10000 train_time:541124ms step_avg:74.94ms +[2025-09-02 16:18:29] [Rank 0] step:7241/10000 train_time:542707ms step_avg:74.95ms +[2025-09-02 16:18:29] [Rank 0] step:7241/10000 train_time:542707ms step_avg:74.95ms +[2025-09-02 16:18:31] [Rank 0] step:7261/10000 train_time:544297ms step_avg:74.96ms +[2025-09-02 16:18:31] [Rank 0] step:7261/10000 train_time:544297ms step_avg:74.96ms +[2025-09-02 16:18:33] [Rank 0] step:7281/10000 train_time:545899ms step_avg:74.98ms +[2025-09-02 16:18:33] [Rank 0] step:7281/10000 train_time:545899ms step_avg:74.98ms +[2025-09-02 16:18:34] [Rank 0] step:7301/10000 train_time:547493ms step_avg:74.99ms +[2025-09-02 16:18:34] [Rank 0] step:7301/10000 train_time:547493ms step_avg:74.99ms +[2025-09-02 16:18:36] [Rank 0] step:7321/10000 train_time:549093ms step_avg:75.00ms +[2025-09-02 16:18:36] [Rank 0] step:7321/10000 train_time:549093ms step_avg:75.00ms +[2025-09-02 16:18:37] [Rank 0] step:7341/10000 train_time:550689ms step_avg:75.02ms +[2025-09-02 16:18:37] [Rank 0] step:7341/10000 train_time:550689ms step_avg:75.02ms +[2025-09-02 16:18:39] [Rank 0] step:7361/10000 train_time:552292ms step_avg:75.03ms +[2025-09-02 16:18:39] [Rank 0] step:7361/10000 train_time:552292ms step_avg:75.03ms +[2025-09-02 16:18:41] [Rank 0] step:7381/10000 train_time:553891ms step_avg:75.04ms +[2025-09-02 16:18:41] [Rank 0] step:7381/10000 train_time:553891ms step_avg:75.04ms +[2025-09-02 16:18:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:18:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:18:54] [Rank 0] PRINT: step:7400/10000 val_loss:3.9486 svd_entropy: attn_qk:H=0.7498,top10E=0.26,eRank=153.2,q75/q25=111.41 attn_vo:H=0.8440,top10E=0.13,eRank=294.6,q75/q25=51.31 mlp_w1:H=0.7369,top10E=0.32,eRank=167.8,q75/q25=11.75 mlp_w2:H=0.8380,top10E=0.14,eRank=281.5,q75/q25=22.36 vo_prod:H=0.7617,top10E=0.21,eRank=165.3,q75/q25=2294.04 train_time:555632ms step_avg:75.09ms +[2025-09-02 16:18:54] [Rank 0] PRINT: step:7400/10000 val_loss:3.9486 svd_entropy: attn_qk:H=0.7498,top10E=0.26,eRank=153.2,q75/q25=111.41 attn_vo:H=0.8440,top10E=0.13,eRank=294.6,q75/q25=51.31 mlp_w1:H=0.7369,top10E=0.32,eRank=167.8,q75/q25=11.75 mlp_w2:H=0.8380,top10E=0.14,eRank=281.5,q75/q25=22.36 vo_prod:H=0.7617,top10E=0.21,eRank=165.3,q75/q25=2294.04 train_time:555632ms step_avg:75.09ms +[2025-09-02 16:18:54] [Rank 0] step:7401/10000 train_time:555644ms step_avg:75.08ms +[2025-09-02 16:18:54] [Rank 0] step:7401/10000 train_time:555644ms step_avg:75.08ms +[2025-09-02 16:18:56] [Rank 0] step:7421/10000 train_time:557116ms step_avg:75.07ms +[2025-09-02 16:18:56] [Rank 0] step:7421/10000 train_time:557116ms step_avg:75.07ms +[2025-09-02 16:18:57] [Rank 0] step:7441/10000 train_time:558710ms step_avg:75.09ms +[2025-09-02 16:18:57] [Rank 0] step:7441/10000 train_time:558710ms step_avg:75.09ms +[2025-09-02 16:18:59] [Rank 0] step:7461/10000 train_time:560301ms step_avg:75.10ms +[2025-09-02 16:18:59] [Rank 0] step:7461/10000 train_time:560301ms step_avg:75.10ms +[2025-09-02 16:19:01] [Rank 0] step:7481/10000 train_time:561899ms step_avg:75.11ms +[2025-09-02 16:19:01] [Rank 0] step:7481/10000 train_time:561899ms step_avg:75.11ms +[2025-09-02 16:19:02] [Rank 0] step:7501/10000 train_time:563498ms step_avg:75.12ms +[2025-09-02 16:19:02] [Rank 0] step:7501/10000 train_time:563498ms step_avg:75.12ms +[2025-09-02 16:19:04] [Rank 0] step:7521/10000 train_time:565092ms step_avg:75.14ms +[2025-09-02 16:19:04] [Rank 0] step:7521/10000 train_time:565092ms step_avg:75.14ms +[2025-09-02 16:19:05] [Rank 0] step:7541/10000 train_time:566701ms step_avg:75.15ms +[2025-09-02 16:19:05] [Rank 0] step:7541/10000 train_time:566701ms step_avg:75.15ms +[2025-09-02 16:19:07] [Rank 0] step:7561/10000 train_time:568285ms step_avg:75.16ms +[2025-09-02 16:19:07] [Rank 0] step:7561/10000 train_time:568285ms step_avg:75.16ms +[2025-09-02 16:19:09] [Rank 0] step:7581/10000 train_time:569891ms step_avg:75.17ms +[2025-09-02 16:19:09] [Rank 0] step:7581/10000 train_time:569891ms step_avg:75.17ms +[2025-09-02 16:19:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:19:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:19:22] [Rank 0] PRINT: step:7600/10000 val_loss:3.9451 svd_entropy: attn_qk:H=0.7509,top10E=0.26,eRank=154.2,q75/q25=111.34 attn_vo:H=0.8449,top10E=0.13,eRank=296.1,q75/q25=50.40 mlp_w1:H=0.7382,top10E=0.32,eRank=169.1,q75/q25=11.87 mlp_w2:H=0.8385,top10E=0.14,eRank=282.7,q75/q25=22.49 vo_prod:H=0.7630,top10E=0.21,eRank=166.6,q75/q25=2157.76 train_time:571657ms step_avg:75.22ms +[2025-09-02 16:19:22] [Rank 0] PRINT: step:7600/10000 val_loss:3.9451 svd_entropy: attn_qk:H=0.7509,top10E=0.26,eRank=154.2,q75/q25=111.34 attn_vo:H=0.8449,top10E=0.13,eRank=296.1,q75/q25=50.40 mlp_w1:H=0.7382,top10E=0.32,eRank=169.1,q75/q25=11.87 mlp_w2:H=0.8385,top10E=0.14,eRank=282.7,q75/q25=22.49 vo_prod:H=0.7630,top10E=0.21,eRank=166.6,q75/q25=2157.76 train_time:571657ms step_avg:75.22ms +[2025-09-02 16:19:22] [Rank 0] step:7601/10000 train_time:571668ms step_avg:75.21ms +[2025-09-02 16:19:22] [Rank 0] step:7601/10000 train_time:571668ms step_avg:75.21ms +[2025-09-02 16:19:24] [Rank 0] step:7621/10000 train_time:573113ms step_avg:75.20ms +[2025-09-02 16:19:24] [Rank 0] step:7621/10000 train_time:573113ms step_avg:75.20ms +[2025-09-02 16:19:25] [Rank 0] step:7641/10000 train_time:574706ms step_avg:75.21ms +[2025-09-02 16:19:25] [Rank 0] step:7641/10000 train_time:574706ms step_avg:75.21ms +[2025-09-02 16:19:27] [Rank 0] step:7661/10000 train_time:576302ms step_avg:75.23ms +[2025-09-02 16:19:27] [Rank 0] step:7661/10000 train_time:576302ms step_avg:75.23ms +[2025-09-02 16:19:29] [Rank 0] step:7681/10000 train_time:577895ms step_avg:75.24ms +[2025-09-02 16:19:29] [Rank 0] step:7681/10000 train_time:577895ms step_avg:75.24ms +[2025-09-02 16:19:30] [Rank 0] step:7701/10000 train_time:579487ms step_avg:75.25ms +[2025-09-02 16:19:30] [Rank 0] step:7701/10000 train_time:579487ms step_avg:75.25ms +[2025-09-02 16:19:32] [Rank 0] step:7721/10000 train_time:581091ms step_avg:75.26ms +[2025-09-02 16:19:32] [Rank 0] step:7721/10000 train_time:581091ms step_avg:75.26ms +[2025-09-02 16:19:33] [Rank 0] step:7741/10000 train_time:582689ms step_avg:75.27ms +[2025-09-02 16:19:33] [Rank 0] step:7741/10000 train_time:582689ms step_avg:75.27ms +[2025-09-02 16:19:35] [Rank 0] step:7761/10000 train_time:584294ms step_avg:75.29ms +[2025-09-02 16:19:35] [Rank 0] step:7761/10000 train_time:584294ms step_avg:75.29ms +[2025-09-02 16:19:37] [Rank 0] step:7781/10000 train_time:585899ms step_avg:75.30ms +[2025-09-02 16:19:37] [Rank 0] step:7781/10000 train_time:585899ms step_avg:75.30ms +[2025-09-02 16:19:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:19:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:19:50] [Rank 0] PRINT: step:7800/10000 val_loss:3.9295 svd_entropy: attn_qk:H=0.7519,top10E=0.26,eRank=155.1,q75/q25=111.29 attn_vo:H=0.8457,top10E=0.13,eRank=297.4,q75/q25=49.64 mlp_w1:H=0.7396,top10E=0.32,eRank=170.4,q75/q25=11.97 mlp_w2:H=0.8390,top10E=0.14,eRank=283.7,q75/q25=22.53 vo_prod:H=0.7641,top10E=0.21,eRank=167.9,q75/q25=2029.48 train_time:587668ms step_avg:75.34ms +[2025-09-02 16:19:50] [Rank 0] PRINT: step:7800/10000 val_loss:3.9295 svd_entropy: attn_qk:H=0.7519,top10E=0.26,eRank=155.1,q75/q25=111.29 attn_vo:H=0.8457,top10E=0.13,eRank=297.4,q75/q25=49.64 mlp_w1:H=0.7396,top10E=0.32,eRank=170.4,q75/q25=11.97 mlp_w2:H=0.8390,top10E=0.14,eRank=283.7,q75/q25=22.53 vo_prod:H=0.7641,top10E=0.21,eRank=167.9,q75/q25=2029.48 train_time:587668ms step_avg:75.34ms +[2025-09-02 16:19:50] [Rank 0] step:7801/10000 train_time:587679ms step_avg:75.33ms +[2025-09-02 16:19:50] [Rank 0] step:7801/10000 train_time:587679ms step_avg:75.33ms +[2025-09-02 16:19:52] [Rank 0] step:7821/10000 train_time:589136ms step_avg:75.33ms +[2025-09-02 16:19:52] [Rank 0] step:7821/10000 train_time:589136ms step_avg:75.33ms +[2025-09-02 16:19:53] [Rank 0] step:7841/10000 train_time:590728ms step_avg:75.34ms +[2025-09-02 16:19:53] [Rank 0] step:7841/10000 train_time:590728ms step_avg:75.34ms +[2025-09-02 16:19:55] [Rank 0] step:7861/10000 train_time:592329ms step_avg:75.35ms +[2025-09-02 16:19:55] [Rank 0] step:7861/10000 train_time:592329ms step_avg:75.35ms +[2025-09-02 16:19:57] [Rank 0] step:7881/10000 train_time:593935ms step_avg:75.36ms +[2025-09-02 16:19:57] [Rank 0] step:7881/10000 train_time:593935ms step_avg:75.36ms +[2025-09-02 16:19:58] [Rank 0] step:7901/10000 train_time:595533ms step_avg:75.37ms +[2025-09-02 16:19:58] [Rank 0] step:7901/10000 train_time:595533ms step_avg:75.37ms +[2025-09-02 16:20:00] [Rank 0] step:7921/10000 train_time:597134ms step_avg:75.39ms +[2025-09-02 16:20:00] [Rank 0] step:7921/10000 train_time:597134ms step_avg:75.39ms +[2025-09-02 16:20:01] [Rank 0] step:7941/10000 train_time:598742ms step_avg:75.40ms +[2025-09-02 16:20:01] [Rank 0] step:7941/10000 train_time:598742ms step_avg:75.40ms +[2025-09-02 16:20:03] [Rank 0] step:7961/10000 train_time:600350ms step_avg:75.41ms +[2025-09-02 16:20:03] [Rank 0] step:7961/10000 train_time:600350ms step_avg:75.41ms +[2025-09-02 16:20:05] [Rank 0] step:7981/10000 train_time:601949ms step_avg:75.42ms +[2025-09-02 16:20:05] [Rank 0] step:7981/10000 train_time:601949ms step_avg:75.42ms +[2025-09-02 16:20:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:20:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:20:18] [Rank 0] PRINT: step:8000/10000 val_loss:3.9144 svd_entropy: attn_qk:H=0.7529,top10E=0.26,eRank=156.0,q75/q25=111.62 attn_vo:H=0.8464,top10E=0.13,eRank=298.7,q75/q25=48.75 mlp_w1:H=0.7408,top10E=0.32,eRank=171.6,q75/q25=12.10 mlp_w2:H=0.8394,top10E=0.14,eRank=284.7,q75/q25=22.61 vo_prod:H=0.7652,top10E=0.21,eRank=169.1,q75/q25=1941.75 train_time:603713ms step_avg:75.46ms +[2025-09-02 16:20:18] [Rank 0] PRINT: step:8000/10000 val_loss:3.9144 svd_entropy: attn_qk:H=0.7529,top10E=0.26,eRank=156.0,q75/q25=111.62 attn_vo:H=0.8464,top10E=0.13,eRank=298.7,q75/q25=48.75 mlp_w1:H=0.7408,top10E=0.32,eRank=171.6,q75/q25=12.10 mlp_w2:H=0.8394,top10E=0.14,eRank=284.7,q75/q25=22.61 vo_prod:H=0.7652,top10E=0.21,eRank=169.1,q75/q25=1941.75 train_time:603713ms step_avg:75.46ms +[2025-09-02 16:20:18] [Rank 0] step:8001/10000 train_time:603725ms step_avg:75.46ms +[2025-09-02 16:20:18] [Rank 0] step:8001/10000 train_time:603725ms step_avg:75.46ms +[2025-09-02 16:20:20] [Rank 0] step:8021/10000 train_time:605170ms step_avg:75.45ms +[2025-09-02 16:20:20] [Rank 0] step:8021/10000 train_time:605170ms step_avg:75.45ms +[2025-09-02 16:20:22] [Rank 0] step:8041/10000 train_time:606836ms step_avg:75.47ms +[2025-09-02 16:20:22] [Rank 0] step:8041/10000 train_time:606836ms step_avg:75.47ms +[2025-09-02 16:20:23] [Rank 0] step:8061/10000 train_time:608437ms step_avg:75.48ms +[2025-09-02 16:20:23] [Rank 0] step:8061/10000 train_time:608437ms step_avg:75.48ms +[2025-09-02 16:20:25] [Rank 0] step:8081/10000 train_time:610032ms step_avg:75.49ms +[2025-09-02 16:20:25] [Rank 0] step:8081/10000 train_time:610032ms step_avg:75.49ms +[2025-09-02 16:20:26] [Rank 0] step:8101/10000 train_time:611637ms step_avg:75.50ms +[2025-09-02 16:20:26] [Rank 0] step:8101/10000 train_time:611637ms step_avg:75.50ms +[2025-09-02 16:20:28] [Rank 0] step:8121/10000 train_time:613239ms step_avg:75.51ms +[2025-09-02 16:20:28] [Rank 0] step:8121/10000 train_time:613239ms step_avg:75.51ms +[2025-09-02 16:20:30] [Rank 0] step:8141/10000 train_time:614946ms step_avg:75.54ms +[2025-09-02 16:20:30] [Rank 0] step:8141/10000 train_time:614946ms step_avg:75.54ms +[2025-09-02 16:20:31] [Rank 0] step:8161/10000 train_time:616561ms step_avg:75.55ms +[2025-09-02 16:20:31] [Rank 0] step:8161/10000 train_time:616561ms step_avg:75.55ms +[2025-09-02 16:20:33] [Rank 0] step:8181/10000 train_time:618192ms step_avg:75.56ms +[2025-09-02 16:20:33] [Rank 0] step:8181/10000 train_time:618192ms step_avg:75.56ms +[2025-09-02 16:20:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:20:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:20:47] [Rank 0] PRINT: step:8200/10000 val_loss:3.9040 svd_entropy: attn_qk:H=0.7537,top10E=0.26,eRank=156.8,q75/q25=111.48 attn_vo:H=0.8471,top10E=0.13,eRank=299.8,q75/q25=48.00 mlp_w1:H=0.7418,top10E=0.32,eRank=172.7,q75/q25=12.17 mlp_w2:H=0.8398,top10E=0.13,eRank=285.7,q75/q25=22.66 vo_prod:H=0.7662,top10E=0.21,eRank=170.2,q75/q25=1836.71 train_time:620012ms step_avg:75.61ms +[2025-09-02 16:20:47] [Rank 0] PRINT: step:8200/10000 val_loss:3.9040 svd_entropy: attn_qk:H=0.7537,top10E=0.26,eRank=156.8,q75/q25=111.48 attn_vo:H=0.8471,top10E=0.13,eRank=299.8,q75/q25=48.00 mlp_w1:H=0.7418,top10E=0.32,eRank=172.7,q75/q25=12.17 mlp_w2:H=0.8398,top10E=0.13,eRank=285.7,q75/q25=22.66 vo_prod:H=0.7662,top10E=0.21,eRank=170.2,q75/q25=1836.71 train_time:620012ms step_avg:75.61ms +[2025-09-02 16:20:47] [Rank 0] step:8201/10000 train_time:620024ms step_avg:75.60ms +[2025-09-02 16:20:47] [Rank 0] step:8201/10000 train_time:620024ms step_avg:75.60ms +[2025-09-02 16:20:48] [Rank 0] step:8221/10000 train_time:621518ms step_avg:75.60ms +[2025-09-02 16:20:48] [Rank 0] step:8221/10000 train_time:621518ms step_avg:75.60ms +[2025-09-02 16:20:50] [Rank 0] step:8241/10000 train_time:623149ms step_avg:75.62ms +[2025-09-02 16:20:50] [Rank 0] step:8241/10000 train_time:623149ms step_avg:75.62ms +[2025-09-02 16:20:52] [Rank 0] step:8261/10000 train_time:624780ms step_avg:75.63ms +[2025-09-02 16:20:52] [Rank 0] step:8261/10000 train_time:624780ms step_avg:75.63ms +[2025-09-02 16:20:53] [Rank 0] step:8281/10000 train_time:626411ms step_avg:75.64ms +[2025-09-02 16:20:53] [Rank 0] step:8281/10000 train_time:626411ms step_avg:75.64ms +[2025-09-02 16:20:55] [Rank 0] step:8301/10000 train_time:628039ms step_avg:75.66ms +[2025-09-02 16:20:55] [Rank 0] step:8301/10000 train_time:628039ms step_avg:75.66ms +[2025-09-02 16:20:56] [Rank 0] step:8321/10000 train_time:629658ms step_avg:75.67ms +[2025-09-02 16:20:56] [Rank 0] step:8321/10000 train_time:629658ms step_avg:75.67ms +[2025-09-02 16:20:58] [Rank 0] step:8341/10000 train_time:631288ms step_avg:75.68ms +[2025-09-02 16:20:58] [Rank 0] step:8341/10000 train_time:631288ms step_avg:75.68ms +[2025-09-02 16:21:00] [Rank 0] step:8361/10000 train_time:632911ms step_avg:75.70ms +[2025-09-02 16:21:00] [Rank 0] step:8361/10000 train_time:632911ms step_avg:75.70ms +[2025-09-02 16:21:01] [Rank 0] step:8381/10000 train_time:634540ms step_avg:75.71ms +[2025-09-02 16:21:01] [Rank 0] step:8381/10000 train_time:634540ms step_avg:75.71ms +[2025-09-02 16:21:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:21:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:21:15] [Rank 0] PRINT: step:8400/10000 val_loss:3.8936 svd_entropy: attn_qk:H=0.7545,top10E=0.26,eRank=157.6,q75/q25=111.36 attn_vo:H=0.8477,top10E=0.13,eRank=300.9,q75/q25=47.24 mlp_w1:H=0.7428,top10E=0.32,eRank=173.6,q75/q25=12.22 mlp_w2:H=0.8402,top10E=0.13,eRank=286.5,q75/q25=22.64 vo_prod:H=0.7671,top10E=0.21,eRank=171.3,q75/q25=1771.78 train_time:636326ms step_avg:75.75ms +[2025-09-02 16:21:15] [Rank 0] PRINT: step:8400/10000 val_loss:3.8936 svd_entropy: attn_qk:H=0.7545,top10E=0.26,eRank=157.6,q75/q25=111.36 attn_vo:H=0.8477,top10E=0.13,eRank=300.9,q75/q25=47.24 mlp_w1:H=0.7428,top10E=0.32,eRank=173.6,q75/q25=12.22 mlp_w2:H=0.8402,top10E=0.13,eRank=286.5,q75/q25=22.64 vo_prod:H=0.7671,top10E=0.21,eRank=171.3,q75/q25=1771.78 train_time:636326ms step_avg:75.75ms +[2025-09-02 16:21:15] [Rank 0] step:8401/10000 train_time:636338ms step_avg:75.75ms +[2025-09-02 16:21:15] [Rank 0] step:8401/10000 train_time:636338ms step_avg:75.75ms +[2025-09-02 16:21:17] [Rank 0] step:8421/10000 train_time:637809ms step_avg:75.74ms +[2025-09-02 16:21:17] [Rank 0] step:8421/10000 train_time:637809ms step_avg:75.74ms +[2025-09-02 16:21:18] [Rank 0] step:8441/10000 train_time:639433ms step_avg:75.75ms +[2025-09-02 16:21:18] [Rank 0] step:8441/10000 train_time:639433ms step_avg:75.75ms +[2025-09-02 16:21:20] [Rank 0] step:8461/10000 train_time:641049ms step_avg:75.77ms +[2025-09-02 16:21:20] [Rank 0] step:8461/10000 train_time:641049ms step_avg:75.77ms +[2025-09-02 16:21:22] [Rank 0] step:8481/10000 train_time:642678ms step_avg:75.78ms +[2025-09-02 16:21:22] [Rank 0] step:8481/10000 train_time:642678ms step_avg:75.78ms +[2025-09-02 16:21:23] [Rank 0] step:8501/10000 train_time:644325ms step_avg:75.79ms +[2025-09-02 16:21:23] [Rank 0] step:8501/10000 train_time:644325ms step_avg:75.79ms +[2025-09-02 16:21:25] [Rank 0] step:8521/10000 train_time:645960ms step_avg:75.81ms +[2025-09-02 16:21:25] [Rank 0] step:8521/10000 train_time:645960ms step_avg:75.81ms +[2025-09-02 16:21:26] [Rank 0] step:8541/10000 train_time:647600ms step_avg:75.82ms +[2025-09-02 16:21:26] [Rank 0] step:8541/10000 train_time:647600ms step_avg:75.82ms +[2025-09-02 16:21:28] [Rank 0] step:8561/10000 train_time:649232ms step_avg:75.84ms +[2025-09-02 16:21:28] [Rank 0] step:8561/10000 train_time:649232ms step_avg:75.84ms +[2025-09-02 16:21:30] [Rank 0] step:8581/10000 train_time:650861ms step_avg:75.85ms +[2025-09-02 16:21:30] [Rank 0] step:8581/10000 train_time:650861ms step_avg:75.85ms +[2025-09-02 16:21:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:21:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:21:43] [Rank 0] PRINT: step:8600/10000 val_loss:3.8849 svd_entropy: attn_qk:H=0.7552,top10E=0.25,eRank=158.3,q75/q25=111.13 attn_vo:H=0.8482,top10E=0.13,eRank=301.7,q75/q25=46.69 mlp_w1:H=0.7437,top10E=0.31,eRank=174.5,q75/q25=12.29 mlp_w2:H=0.8406,top10E=0.13,eRank=287.2,q75/q25=22.66 vo_prod:H=0.7679,top10E=0.21,eRank=172.2,q75/q25=1720.14 train_time:652646ms step_avg:75.89ms +[2025-09-02 16:21:43] [Rank 0] PRINT: step:8600/10000 val_loss:3.8849 svd_entropy: attn_qk:H=0.7552,top10E=0.25,eRank=158.3,q75/q25=111.13 attn_vo:H=0.8482,top10E=0.13,eRank=301.7,q75/q25=46.69 mlp_w1:H=0.7437,top10E=0.31,eRank=174.5,q75/q25=12.29 mlp_w2:H=0.8406,top10E=0.13,eRank=287.2,q75/q25=22.66 vo_prod:H=0.7679,top10E=0.21,eRank=172.2,q75/q25=1720.14 train_time:652646ms step_avg:75.89ms +[2025-09-02 16:21:43] [Rank 0] step:8601/10000 train_time:652658ms step_avg:75.88ms +[2025-09-02 16:21:43] [Rank 0] step:8601/10000 train_time:652658ms step_avg:75.88ms +[2025-09-02 16:21:45] [Rank 0] step:8621/10000 train_time:654134ms step_avg:75.88ms +[2025-09-02 16:21:45] [Rank 0] step:8621/10000 train_time:654134ms step_avg:75.88ms +[2025-09-02 16:21:47] [Rank 0] step:8641/10000 train_time:655759ms step_avg:75.89ms +[2025-09-02 16:21:47] [Rank 0] step:8641/10000 train_time:655759ms step_avg:75.89ms +[2025-09-02 16:21:48] [Rank 0] step:8661/10000 train_time:657388ms step_avg:75.90ms +[2025-09-02 16:21:48] [Rank 0] step:8661/10000 train_time:657388ms step_avg:75.90ms +[2025-09-02 16:21:50] [Rank 0] step:8681/10000 train_time:659010ms step_avg:75.91ms +[2025-09-02 16:21:50] [Rank 0] step:8681/10000 train_time:659010ms step_avg:75.91ms +[2025-09-02 16:21:51] [Rank 0] step:8701/10000 train_time:660631ms step_avg:75.93ms +[2025-09-02 16:21:51] [Rank 0] step:8701/10000 train_time:660631ms step_avg:75.93ms +[2025-09-02 16:21:53] [Rank 0] step:8721/10000 train_time:662261ms step_avg:75.94ms +[2025-09-02 16:21:53] [Rank 0] step:8721/10000 train_time:662261ms step_avg:75.94ms +[2025-09-02 16:21:55] [Rank 0] step:8741/10000 train_time:663876ms step_avg:75.95ms +[2025-09-02 16:21:55] [Rank 0] step:8741/10000 train_time:663876ms step_avg:75.95ms +[2025-09-02 16:21:56] [Rank 0] step:8761/10000 train_time:665496ms step_avg:75.96ms +[2025-09-02 16:21:56] [Rank 0] step:8761/10000 train_time:665496ms step_avg:75.96ms +[2025-09-02 16:21:58] [Rank 0] step:8781/10000 train_time:667125ms step_avg:75.97ms +[2025-09-02 16:21:58] [Rank 0] step:8781/10000 train_time:667125ms step_avg:75.97ms +[2025-09-02 16:21:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:21:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:22:11] [Rank 0] PRINT: step:8800/10000 val_loss:3.8755 svd_entropy: attn_qk:H=0.7558,top10E=0.25,eRank=158.8,q75/q25=111.04 attn_vo:H=0.8487,top10E=0.13,eRank=302.5,q75/q25=46.16 mlp_w1:H=0.7445,top10E=0.31,eRank=175.4,q75/q25=12.32 mlp_w2:H=0.8409,top10E=0.13,eRank=288.0,q75/q25=22.68 vo_prod:H=0.7686,top10E=0.21,eRank=173.0,q75/q25=1629.63 train_time:668921ms step_avg:76.01ms +[2025-09-02 16:22:11] [Rank 0] PRINT: step:8800/10000 val_loss:3.8755 svd_entropy: attn_qk:H=0.7558,top10E=0.25,eRank=158.8,q75/q25=111.04 attn_vo:H=0.8487,top10E=0.13,eRank=302.5,q75/q25=46.16 mlp_w1:H=0.7445,top10E=0.31,eRank=175.4,q75/q25=12.32 mlp_w2:H=0.8409,top10E=0.13,eRank=288.0,q75/q25=22.68 vo_prod:H=0.7686,top10E=0.21,eRank=173.0,q75/q25=1629.63 train_time:668921ms step_avg:76.01ms +[2025-09-02 16:22:12] [Rank 0] step:8801/10000 train_time:668933ms step_avg:76.01ms +[2025-09-02 16:22:12] [Rank 0] step:8801/10000 train_time:668933ms step_avg:76.01ms +[2025-09-02 16:22:13] [Rank 0] step:8821/10000 train_time:670394ms step_avg:76.00ms +[2025-09-02 16:22:13] [Rank 0] step:8821/10000 train_time:670394ms step_avg:76.00ms +[2025-09-02 16:22:15] [Rank 0] step:8841/10000 train_time:672040ms step_avg:76.01ms +[2025-09-02 16:22:15] [Rank 0] step:8841/10000 train_time:672040ms step_avg:76.01ms +[2025-09-02 16:22:16] [Rank 0] step:8861/10000 train_time:673665ms step_avg:76.03ms +[2025-09-02 16:22:16] [Rank 0] step:8861/10000 train_time:673665ms step_avg:76.03ms +[2025-09-02 16:22:18] [Rank 0] step:8881/10000 train_time:675290ms step_avg:76.04ms +[2025-09-02 16:22:18] [Rank 0] step:8881/10000 train_time:675290ms step_avg:76.04ms +[2025-09-02 16:22:20] [Rank 0] step:8901/10000 train_time:676922ms step_avg:76.05ms +[2025-09-02 16:22:20] [Rank 0] step:8901/10000 train_time:676922ms step_avg:76.05ms +[2025-09-02 16:22:21] [Rank 0] step:8921/10000 train_time:678557ms step_avg:76.06ms +[2025-09-02 16:22:21] [Rank 0] step:8921/10000 train_time:678557ms step_avg:76.06ms +[2025-09-02 16:22:23] [Rank 0] step:8941/10000 train_time:680192ms step_avg:76.08ms +[2025-09-02 16:22:23] [Rank 0] step:8941/10000 train_time:680192ms step_avg:76.08ms +[2025-09-02 16:22:25] [Rank 0] step:8961/10000 train_time:681818ms step_avg:76.09ms +[2025-09-02 16:22:25] [Rank 0] step:8961/10000 train_time:681818ms step_avg:76.09ms +[2025-09-02 16:22:26] [Rank 0] step:8981/10000 train_time:683446ms step_avg:76.10ms +[2025-09-02 16:22:26] [Rank 0] step:8981/10000 train_time:683446ms step_avg:76.10ms +[2025-09-02 16:22:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:22:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:22:40] [Rank 0] PRINT: step:9000/10000 val_loss:3.8661 svd_entropy: attn_qk:H=0.7563,top10E=0.25,eRank=159.3,q75/q25=111.07 attn_vo:H=0.8491,top10E=0.13,eRank=303.2,q75/q25=45.61 mlp_w1:H=0.7452,top10E=0.31,eRank=176.1,q75/q25=12.40 mlp_w2:H=0.8412,top10E=0.13,eRank=288.6,q75/q25=22.67 vo_prod:H=0.7692,top10E=0.20,eRank=173.8,q75/q25=1583.37 train_time:685242ms step_avg:76.14ms +[2025-09-02 16:22:40] [Rank 0] PRINT: step:9000/10000 val_loss:3.8661 svd_entropy: attn_qk:H=0.7563,top10E=0.25,eRank=159.3,q75/q25=111.07 attn_vo:H=0.8491,top10E=0.13,eRank=303.2,q75/q25=45.61 mlp_w1:H=0.7452,top10E=0.31,eRank=176.1,q75/q25=12.40 mlp_w2:H=0.8412,top10E=0.13,eRank=288.6,q75/q25=22.67 vo_prod:H=0.7692,top10E=0.20,eRank=173.8,q75/q25=1583.37 train_time:685242ms step_avg:76.14ms +[2025-09-02 16:22:40] [Rank 0] step:9001/10000 train_time:685254ms step_avg:76.13ms +[2025-09-02 16:22:40] [Rank 0] step:9001/10000 train_time:685254ms step_avg:76.13ms +[2025-09-02 16:22:41] [Rank 0] step:9021/10000 train_time:686731ms step_avg:76.13ms +[2025-09-02 16:22:41] [Rank 0] step:9021/10000 train_time:686731ms step_avg:76.13ms +[2025-09-02 16:22:43] [Rank 0] step:9041/10000 train_time:688352ms step_avg:76.14ms +[2025-09-02 16:22:43] [Rank 0] step:9041/10000 train_time:688352ms step_avg:76.14ms +[2025-09-02 16:22:45] [Rank 0] step:9061/10000 train_time:689990ms step_avg:76.15ms +[2025-09-02 16:22:45] [Rank 0] step:9061/10000 train_time:689990ms step_avg:76.15ms +[2025-09-02 16:22:46] [Rank 0] step:9081/10000 train_time:691623ms step_avg:76.16ms +[2025-09-02 16:22:46] [Rank 0] step:9081/10000 train_time:691623ms step_avg:76.16ms +[2025-09-02 16:22:48] [Rank 0] step:9101/10000 train_time:693274ms step_avg:76.18ms +[2025-09-02 16:22:48] [Rank 0] step:9101/10000 train_time:693274ms step_avg:76.18ms +[2025-09-02 16:22:50] [Rank 0] step:9121/10000 train_time:694905ms step_avg:76.19ms +[2025-09-02 16:22:50] [Rank 0] step:9121/10000 train_time:694905ms step_avg:76.19ms +[2025-09-02 16:22:51] [Rank 0] step:9141/10000 train_time:696523ms step_avg:76.20ms +[2025-09-02 16:22:51] [Rank 0] step:9141/10000 train_time:696523ms step_avg:76.20ms +[2025-09-02 16:22:53] [Rank 0] step:9161/10000 train_time:698136ms step_avg:76.21ms +[2025-09-02 16:22:53] [Rank 0] step:9161/10000 train_time:698136ms step_avg:76.21ms +[2025-09-02 16:22:55] [Rank 0] step:9181/10000 train_time:699794ms step_avg:76.22ms +[2025-09-02 16:22:55] [Rank 0] step:9181/10000 train_time:699794ms step_avg:76.22ms +[2025-09-02 16:22:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:22:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:23:08] [Rank 0] PRINT: step:9200/10000 val_loss:3.8587 svd_entropy: attn_qk:H=0.7568,top10E=0.25,eRank=159.8,q75/q25=111.27 attn_vo:H=0.8495,top10E=0.13,eRank=303.9,q75/q25=45.19 mlp_w1:H=0.7458,top10E=0.31,eRank=176.7,q75/q25=12.42 mlp_w2:H=0.8414,top10E=0.13,eRank=289.1,q75/q25=22.69 vo_prod:H=0.7698,top10E=0.20,eRank=174.4,q75/q25=1524.72 train_time:701586ms step_avg:76.26ms +[2025-09-02 16:23:08] [Rank 0] PRINT: step:9200/10000 val_loss:3.8587 svd_entropy: attn_qk:H=0.7568,top10E=0.25,eRank=159.8,q75/q25=111.27 attn_vo:H=0.8495,top10E=0.13,eRank=303.9,q75/q25=45.19 mlp_w1:H=0.7458,top10E=0.31,eRank=176.7,q75/q25=12.42 mlp_w2:H=0.8414,top10E=0.13,eRank=289.1,q75/q25=22.69 vo_prod:H=0.7698,top10E=0.20,eRank=174.4,q75/q25=1524.72 train_time:701586ms step_avg:76.26ms +[2025-09-02 16:23:08] [Rank 0] step:9201/10000 train_time:701597ms step_avg:76.25ms +[2025-09-02 16:23:08] [Rank 0] step:9201/10000 train_time:701597ms step_avg:76.25ms +[2025-09-02 16:23:10] [Rank 0] step:9221/10000 train_time:703094ms step_avg:76.25ms +[2025-09-02 16:23:10] [Rank 0] step:9221/10000 train_time:703094ms step_avg:76.25ms +[2025-09-02 16:23:11] [Rank 0] step:9241/10000 train_time:704732ms step_avg:76.26ms +[2025-09-02 16:23:11] [Rank 0] step:9241/10000 train_time:704732ms step_avg:76.26ms +[2025-09-02 16:23:13] [Rank 0] step:9261/10000 train_time:706369ms step_avg:76.27ms +[2025-09-02 16:23:13] [Rank 0] step:9261/10000 train_time:706369ms step_avg:76.27ms +[2025-09-02 16:23:15] [Rank 0] step:9281/10000 train_time:707989ms step_avg:76.28ms +[2025-09-02 16:23:15] [Rank 0] step:9281/10000 train_time:707989ms step_avg:76.28ms +[2025-09-02 16:23:16] [Rank 0] step:9301/10000 train_time:709616ms step_avg:76.29ms +[2025-09-02 16:23:16] [Rank 0] step:9301/10000 train_time:709616ms step_avg:76.29ms +[2025-09-02 16:23:18] [Rank 0] step:9321/10000 train_time:711247ms step_avg:76.31ms +[2025-09-02 16:23:18] [Rank 0] step:9321/10000 train_time:711247ms step_avg:76.31ms +[2025-09-02 16:23:20] [Rank 0] step:9341/10000 train_time:712876ms step_avg:76.32ms +[2025-09-02 16:23:20] [Rank 0] step:9341/10000 train_time:712876ms step_avg:76.32ms +[2025-09-02 16:23:21] [Rank 0] step:9361/10000 train_time:714536ms step_avg:76.33ms +[2025-09-02 16:23:21] [Rank 0] step:9361/10000 train_time:714536ms step_avg:76.33ms +[2025-09-02 16:23:23] [Rank 0] step:9381/10000 train_time:716182ms step_avg:76.34ms +[2025-09-02 16:23:23] [Rank 0] step:9381/10000 train_time:716182ms step_avg:76.34ms +[2025-09-02 16:23:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:23:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:23:36] [Rank 0] PRINT: step:9400/10000 val_loss:3.8511 svd_entropy: attn_qk:H=0.7572,top10E=0.25,eRank=160.1,q75/q25=111.09 attn_vo:H=0.8498,top10E=0.13,eRank=304.4,q75/q25=44.89 mlp_w1:H=0.7463,top10E=0.31,eRank=177.3,q75/q25=12.44 mlp_w2:H=0.8416,top10E=0.13,eRank=289.6,q75/q25=22.70 vo_prod:H=0.7703,top10E=0.20,eRank=175.0,q75/q25=1510.62 train_time:717982ms step_avg:76.38ms +[2025-09-02 16:23:36] [Rank 0] PRINT: step:9400/10000 val_loss:3.8511 svd_entropy: attn_qk:H=0.7572,top10E=0.25,eRank=160.1,q75/q25=111.09 attn_vo:H=0.8498,top10E=0.13,eRank=304.4,q75/q25=44.89 mlp_w1:H=0.7463,top10E=0.31,eRank=177.3,q75/q25=12.44 mlp_w2:H=0.8416,top10E=0.13,eRank=289.6,q75/q25=22.70 vo_prod:H=0.7703,top10E=0.20,eRank=175.0,q75/q25=1510.62 train_time:717982ms step_avg:76.38ms +[2025-09-02 16:23:36] [Rank 0] step:9401/10000 train_time:717993ms step_avg:76.37ms +[2025-09-02 16:23:36] [Rank 0] step:9401/10000 train_time:717993ms step_avg:76.37ms +[2025-09-02 16:23:38] [Rank 0] step:9421/10000 train_time:719462ms step_avg:76.37ms +[2025-09-02 16:23:38] [Rank 0] step:9421/10000 train_time:719462ms step_avg:76.37ms +[2025-09-02 16:23:40] [Rank 0] step:9441/10000 train_time:721094ms step_avg:76.38ms +[2025-09-02 16:23:40] [Rank 0] step:9441/10000 train_time:721094ms step_avg:76.38ms +[2025-09-02 16:23:41] [Rank 0] step:9461/10000 train_time:722729ms step_avg:76.39ms +[2025-09-02 16:23:41] [Rank 0] step:9461/10000 train_time:722729ms step_avg:76.39ms +[2025-09-02 16:23:43] [Rank 0] step:9481/10000 train_time:724360ms step_avg:76.40ms +[2025-09-02 16:23:43] [Rank 0] step:9481/10000 train_time:724360ms step_avg:76.40ms +[2025-09-02 16:23:45] [Rank 0] step:9501/10000 train_time:726003ms step_avg:76.41ms +[2025-09-02 16:23:45] [Rank 0] step:9501/10000 train_time:726003ms step_avg:76.41ms +[2025-09-02 16:23:46] [Rank 0] step:9521/10000 train_time:727628ms step_avg:76.42ms +[2025-09-02 16:23:46] [Rank 0] step:9521/10000 train_time:727628ms step_avg:76.42ms +[2025-09-02 16:23:48] [Rank 0] step:9541/10000 train_time:729258ms step_avg:76.43ms +[2025-09-02 16:23:48] [Rank 0] step:9541/10000 train_time:729258ms step_avg:76.43ms +[2025-09-02 16:23:50] [Rank 0] step:9561/10000 train_time:730886ms step_avg:76.44ms +[2025-09-02 16:23:50] [Rank 0] step:9561/10000 train_time:730886ms step_avg:76.44ms +[2025-09-02 16:23:51] [Rank 0] step:9581/10000 train_time:732511ms step_avg:76.45ms +[2025-09-02 16:23:51] [Rank 0] step:9581/10000 train_time:732511ms step_avg:76.45ms +[2025-09-02 16:23:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:23:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:24:05] [Rank 0] PRINT: step:9600/10000 val_loss:3.8454 svd_entropy: attn_qk:H=0.7575,top10E=0.25,eRank=160.5,q75/q25=111.00 attn_vo:H=0.8500,top10E=0.13,eRank=304.8,q75/q25=44.67 mlp_w1:H=0.7468,top10E=0.31,eRank=177.7,q75/q25=12.47 mlp_w2:H=0.8418,top10E=0.13,eRank=290.0,q75/q25=22.69 vo_prod:H=0.7707,top10E=0.20,eRank=175.4,q75/q25=1483.62 train_time:734316ms step_avg:76.49ms +[2025-09-02 16:24:05] [Rank 0] PRINT: step:9600/10000 val_loss:3.8454 svd_entropy: attn_qk:H=0.7575,top10E=0.25,eRank=160.5,q75/q25=111.00 attn_vo:H=0.8500,top10E=0.13,eRank=304.8,q75/q25=44.67 mlp_w1:H=0.7468,top10E=0.31,eRank=177.7,q75/q25=12.47 mlp_w2:H=0.8418,top10E=0.13,eRank=290.0,q75/q25=22.69 vo_prod:H=0.7707,top10E=0.20,eRank=175.4,q75/q25=1483.62 train_time:734316ms step_avg:76.49ms +[2025-09-02 16:24:05] [Rank 0] step:9601/10000 train_time:734327ms step_avg:76.48ms +[2025-09-02 16:24:05] [Rank 0] step:9601/10000 train_time:734327ms step_avg:76.48ms +[2025-09-02 16:24:06] [Rank 0] step:9621/10000 train_time:735824ms step_avg:76.48ms +[2025-09-02 16:24:06] [Rank 0] step:9621/10000 train_time:735824ms step_avg:76.48ms +[2025-09-02 16:24:08] [Rank 0] step:9641/10000 train_time:737456ms step_avg:76.49ms +[2025-09-02 16:24:08] [Rank 0] step:9641/10000 train_time:737456ms step_avg:76.49ms +[2025-09-02 16:24:10] [Rank 0] step:9661/10000 train_time:739110ms step_avg:76.50ms +[2025-09-02 16:24:10] [Rank 0] step:9661/10000 train_time:739110ms step_avg:76.50ms +[2025-09-02 16:24:11] [Rank 0] step:9681/10000 train_time:740760ms step_avg:76.52ms +[2025-09-02 16:24:11] [Rank 0] step:9681/10000 train_time:740760ms step_avg:76.52ms +[2025-09-02 16:24:13] [Rank 0] step:9701/10000 train_time:742431ms step_avg:76.53ms +[2025-09-02 16:24:13] [Rank 0] step:9701/10000 train_time:742431ms step_avg:76.53ms +[2025-09-02 16:24:15] [Rank 0] step:9721/10000 train_time:744079ms step_avg:76.54ms +[2025-09-02 16:24:15] [Rank 0] step:9721/10000 train_time:744079ms step_avg:76.54ms +[2025-09-02 16:24:16] [Rank 0] step:9741/10000 train_time:745753ms step_avg:76.56ms +[2025-09-02 16:24:16] [Rank 0] step:9741/10000 train_time:745753ms step_avg:76.56ms +[2025-09-02 16:24:18] [Rank 0] step:9761/10000 train_time:747411ms step_avg:76.57ms +[2025-09-02 16:24:18] [Rank 0] step:9761/10000 train_time:747411ms step_avg:76.57ms +[2025-09-02 16:24:20] [Rank 0] step:9781/10000 train_time:749081ms step_avg:76.59ms +[2025-09-02 16:24:20] [Rank 0] step:9781/10000 train_time:749081ms step_avg:76.59ms +[2025-09-02 16:24:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:24:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:24:33] [Rank 0] PRINT: step:9800/10000 val_loss:3.8390 svd_entropy: attn_qk:H=0.7577,top10E=0.25,eRank=160.7,q75/q25=110.93 attn_vo:H=0.8502,top10E=0.13,eRank=305.1,q75/q25=44.44 mlp_w1:H=0.7471,top10E=0.31,eRank=178.1,q75/q25=12.50 mlp_w2:H=0.8420,top10E=0.13,eRank=290.3,q75/q25=22.69 vo_prod:H=0.7710,top10E=0.20,eRank=175.8,q75/q25=1462.84 train_time:750920ms step_avg:76.62ms +[2025-09-02 16:24:33] [Rank 0] PRINT: step:9800/10000 val_loss:3.8390 svd_entropy: attn_qk:H=0.7577,top10E=0.25,eRank=160.7,q75/q25=110.93 attn_vo:H=0.8502,top10E=0.13,eRank=305.1,q75/q25=44.44 mlp_w1:H=0.7471,top10E=0.31,eRank=178.1,q75/q25=12.50 mlp_w2:H=0.8420,top10E=0.13,eRank=290.3,q75/q25=22.69 vo_prod:H=0.7710,top10E=0.20,eRank=175.8,q75/q25=1462.84 train_time:750920ms step_avg:76.62ms +[2025-09-02 16:24:33] [Rank 0] step:9801/10000 train_time:750931ms step_avg:76.62ms +[2025-09-02 16:24:33] [Rank 0] step:9801/10000 train_time:750931ms step_avg:76.62ms +[2025-09-02 16:24:35] [Rank 0] step:9821/10000 train_time:752436ms step_avg:76.61ms +[2025-09-02 16:24:35] [Rank 0] step:9821/10000 train_time:752436ms step_avg:76.61ms +[2025-09-02 16:24:37] [Rank 0] step:9841/10000 train_time:754106ms step_avg:76.63ms +[2025-09-02 16:24:37] [Rank 0] step:9841/10000 train_time:754106ms step_avg:76.63ms +[2025-09-02 16:24:38] [Rank 0] step:9861/10000 train_time:755754ms step_avg:76.64ms +[2025-09-02 16:24:38] [Rank 0] step:9861/10000 train_time:755754ms step_avg:76.64ms +[2025-09-02 16:24:40] [Rank 0] step:9881/10000 train_time:757401ms step_avg:76.65ms +[2025-09-02 16:24:40] [Rank 0] step:9881/10000 train_time:757401ms step_avg:76.65ms +[2025-09-02 16:24:42] [Rank 0] step:9901/10000 train_time:759065ms step_avg:76.67ms +[2025-09-02 16:24:42] [Rank 0] step:9901/10000 train_time:759065ms step_avg:76.67ms +[2025-09-02 16:24:43] [Rank 0] step:9921/10000 train_time:760718ms step_avg:76.68ms +[2025-09-02 16:24:43] [Rank 0] step:9921/10000 train_time:760718ms step_avg:76.68ms +[2025-09-02 16:24:45] [Rank 0] step:9941/10000 train_time:762379ms step_avg:76.69ms +[2025-09-02 16:24:45] [Rank 0] step:9941/10000 train_time:762379ms step_avg:76.69ms +[2025-09-02 16:24:47] [Rank 0] step:9961/10000 train_time:764034ms step_avg:76.70ms +[2025-09-02 16:24:47] [Rank 0] step:9961/10000 train_time:764034ms step_avg:76.70ms +[2025-09-02 16:24:48] [Rank 0] step:9981/10000 train_time:765689ms step_avg:76.71ms +[2025-09-02 16:24:48] [Rank 0] step:9981/10000 train_time:765689ms step_avg:76.71ms +[2025-09-02 16:24:50] [Rank 0] step:10000/10000 train_time:767268ms step_avg:76.73ms +[2025-09-02 16:24:50] [Rank 0] step:10000/10000 train_time:767268ms step_avg:76.73ms +[2025-09-02 16:24:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:24:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 16:25:02] [Rank 0] PRINT: step:10000/10000 val_loss:3.8336 svd_entropy: attn_qk:H=0.7579,top10E=0.25,eRank=160.8,q75/q25=110.90 attn_vo:H=0.8503,top10E=0.13,eRank=305.4,q75/q25=44.23 mlp_w1:H=0.7474,top10E=0.31,eRank=178.3,q75/q25=12.50 mlp_w2:H=0.8421,top10E=0.13,eRank=290.5,q75/q25=22.69 vo_prod:H=0.7713,top10E=0.20,eRank=176.1,q75/q25=1446.12 train_time:767526ms step_avg:76.75ms +[2025-09-02 16:25:02] [Rank 0] PRINT: step:10000/10000 val_loss:3.8336 svd_entropy: attn_qk:H=0.7579,top10E=0.25,eRank=160.8,q75/q25=110.90 attn_vo:H=0.8503,top10E=0.13,eRank=305.4,q75/q25=44.23 mlp_w1:H=0.7474,top10E=0.31,eRank=178.3,q75/q25=12.50 mlp_w2:H=0.8421,top10E=0.13,eRank=290.5,q75/q25=22.69 vo_prod:H=0.7713,top10E=0.20,eRank=176.1,q75/q25=1446.12 train_time:767526ms step_avg:76.75ms +[2025-09-02 16:25:02] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 16:25:02 2025 --- +[2025-09-02 16:25:02] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 16:25:02 2025 --- +[2025-09-02 16:25:02] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14436 MiB +[2025-09-02 16:25:02] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14436 MiB diff --git a/logs_svd_qkvo/mode_15_param_qkvo_seed_48/config.json b/logs_svd_qkvo/mode_15_param_qkvo_seed_48/config.json new file mode 100644 index 0000000000000000000000000000000000000000..f417cfadfc2368daff62f74b0d42473df95053fd --- /dev/null +++ b/logs_svd_qkvo/mode_15_param_qkvo_seed_48/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 48, + "optimizer_mode": 15, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "fe5b26d5-d8c6-496d-ac51-0ea10bbb582f", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_15_param_qkvo_seed_48/training_log_fe5b26d5-d8c6-496d-ac51-0ea10bbb582f.txt b/logs_svd_qkvo/mode_15_param_qkvo_seed_48/training_log_fe5b26d5-d8c6-496d-ac51-0ea10bbb582f.txt new file mode 100644 index 0000000000000000000000000000000000000000..b8d8d79731348302fff28b2a0d7e4dde22424320 --- /dev/null +++ b/logs_svd_qkvo/mode_15_param_qkvo_seed_48/training_log_fe5b26d5-d8c6-496d-ac51-0ea10bbb582f.txt @@ -0,0 +1,2984 @@ +[2025-09-02 17:13:56] [Rank 0] PRINT: --- Script Start: Tue Sep 2 17:13:56 2025 --- +[2025-09-02 17:13:56] [Rank 0] PRINT: --- Script Start: Tue Sep 2 17:13:56 2025 --- +[2025-09-02 17:13:56] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=15, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 17:13:56] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=15, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 17:13:56] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 17:13:56] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 17:13:56] [Rank 0] PRINT: Using fixed seed: 48 +[2025-09-02 17:13:56] [Rank 0] PRINT: Using fixed seed: 48 +[2025-09-02 17:13:56] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_15_param_qkvo_seed_48 +[2025-09-02 17:13:56] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_15_param_qkvo_seed_48 +[2025-09-02 17:13:56] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 17:13:56] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 17:13:56] [Rank 0] PRINT: Constructing model... +[2025-09-02 17:13:56] [Rank 0] PRINT: Constructing model... +[2025-09-02 17:13:58] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 17:13:58] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 17:13:58] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 17:13:58] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 17:13:58] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 17:13:58] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 17:13:58] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 15 +[2025-09-02 17:13:58] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 15 +[2025-09-02 17:13:58] [Rank 0] PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 17:13:58] [Rank 0] PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 17:13:58] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 17:13:58] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 17:13:58] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 17:13:58] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 17:13:58] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 17:13:58] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 17:13:58] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 17:13:58] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 17:13:58] [Rank 0] PRINT: Starting warmup... +[2025-09-02 17:13:58] [Rank 0] PRINT: Starting warmup... +[2025-09-02 17:14:41] [Rank 0] PRINT: Warmup complete. +[2025-09-02 17:14:41] [Rank 0] PRINT: Warmup complete. +[2025-09-02 17:14:41] [Rank 0] PRINT: Starting training... +[2025-09-02 17:14:41] [Rank 0] PRINT: Starting training... +[2025-09-02 17:14:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:14:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:14:57] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.25 attn_vo:H=0.4624,top10E=0.02,eRank=233.0,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 17:14:57] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.8,q75/q25=10.25 attn_vo:H=0.4624,top10E=0.02,eRank=233.0,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 17:14:58] [Rank 0] step:21/10000 train_time:1314ms step_avg:62.55ms +[2025-09-02 17:14:58] [Rank 0] step:21/10000 train_time:1314ms step_avg:62.55ms +[2025-09-02 17:15:00] [Rank 0] step:41/10000 train_time:2710ms step_avg:66.11ms +[2025-09-02 17:15:00] [Rank 0] step:41/10000 train_time:2710ms step_avg:66.11ms +[2025-09-02 17:15:01] [Rank 0] step:61/10000 train_time:4112ms step_avg:67.40ms +[2025-09-02 17:15:01] [Rank 0] step:61/10000 train_time:4112ms step_avg:67.40ms +[2025-09-02 17:15:02] [Rank 0] step:81/10000 train_time:5516ms step_avg:68.09ms +[2025-09-02 17:15:02] [Rank 0] step:81/10000 train_time:5516ms step_avg:68.09ms +[2025-09-02 17:15:04] [Rank 0] step:101/10000 train_time:6919ms step_avg:68.50ms +[2025-09-02 17:15:04] [Rank 0] step:101/10000 train_time:6919ms step_avg:68.50ms +[2025-09-02 17:15:05] [Rank 0] step:121/10000 train_time:8323ms step_avg:68.79ms +[2025-09-02 17:15:05] [Rank 0] step:121/10000 train_time:8323ms step_avg:68.79ms +[2025-09-02 17:15:07] [Rank 0] step:141/10000 train_time:9727ms step_avg:68.99ms +[2025-09-02 17:15:07] [Rank 0] step:141/10000 train_time:9727ms step_avg:68.99ms +[2025-09-02 17:15:08] [Rank 0] step:161/10000 train_time:11263ms step_avg:69.96ms +[2025-09-02 17:15:08] [Rank 0] step:161/10000 train_time:11263ms step_avg:69.96ms +[2025-09-02 17:15:09] [Rank 0] step:181/10000 train_time:12580ms step_avg:69.50ms +[2025-09-02 17:15:09] [Rank 0] step:181/10000 train_time:12580ms step_avg:69.50ms +[2025-09-02 17:15:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:15:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:15:22] [Rank 0] PRINT: step:200/10000 val_loss:6.5304 svd_entropy: attn_qk:H=0.4420,top10E=0.81,eRank=34.5,q75/q25=12.32 attn_vo:H=0.5278,top10E=0.66,eRank=99.9,q75/q25=121.68 mlp_w1:H=0.4235,top10E=0.76,eRank=18.5,q75/q25=2.71 mlp_w2:H=0.1684,top10E=0.96,eRank=4.0,q75/q25=402.67 vo_prod:H=0.2349,top10E=0.97,eRank=6.1,q75/q25=794.82 train_time:14128ms step_avg:70.64ms +[2025-09-02 17:15:22] [Rank 0] PRINT: step:200/10000 val_loss:6.5304 svd_entropy: attn_qk:H=0.4420,top10E=0.81,eRank=34.5,q75/q25=12.32 attn_vo:H=0.5278,top10E=0.66,eRank=99.9,q75/q25=121.68 mlp_w1:H=0.4235,top10E=0.76,eRank=18.5,q75/q25=2.71 mlp_w2:H=0.1684,top10E=0.96,eRank=4.0,q75/q25=402.67 vo_prod:H=0.2349,top10E=0.97,eRank=6.1,q75/q25=794.82 train_time:14128ms step_avg:70.64ms +[2025-09-02 17:15:23] [Rank 0] step:201/10000 train_time:14139ms step_avg:70.34ms +[2025-09-02 17:15:23] [Rank 0] step:201/10000 train_time:14139ms step_avg:70.34ms +[2025-09-02 17:15:24] [Rank 0] step:221/10000 train_time:15416ms step_avg:69.76ms +[2025-09-02 17:15:24] [Rank 0] step:221/10000 train_time:15416ms step_avg:69.76ms +[2025-09-02 17:15:25] [Rank 0] step:241/10000 train_time:16820ms step_avg:69.79ms +[2025-09-02 17:15:25] [Rank 0] step:241/10000 train_time:16820ms step_avg:69.79ms +[2025-09-02 17:15:27] [Rank 0] step:261/10000 train_time:18226ms step_avg:69.83ms +[2025-09-02 17:15:27] [Rank 0] step:261/10000 train_time:18226ms step_avg:69.83ms +[2025-09-02 17:15:28] [Rank 0] step:281/10000 train_time:19632ms step_avg:69.86ms +[2025-09-02 17:15:28] [Rank 0] step:281/10000 train_time:19632ms step_avg:69.86ms +[2025-09-02 17:15:30] [Rank 0] step:301/10000 train_time:21039ms step_avg:69.90ms +[2025-09-02 17:15:30] [Rank 0] step:301/10000 train_time:21039ms step_avg:69.90ms +[2025-09-02 17:15:31] [Rank 0] step:321/10000 train_time:22445ms step_avg:69.92ms +[2025-09-02 17:15:31] [Rank 0] step:321/10000 train_time:22445ms step_avg:69.92ms +[2025-09-02 17:15:32] [Rank 0] step:341/10000 train_time:23852ms step_avg:69.95ms +[2025-09-02 17:15:32] [Rank 0] step:341/10000 train_time:23852ms step_avg:69.95ms +[2025-09-02 17:15:34] [Rank 0] step:361/10000 train_time:25257ms step_avg:69.97ms +[2025-09-02 17:15:34] [Rank 0] step:361/10000 train_time:25257ms step_avg:69.97ms +[2025-09-02 17:15:35] [Rank 0] step:381/10000 train_time:26665ms step_avg:69.99ms +[2025-09-02 17:15:35] [Rank 0] step:381/10000 train_time:26665ms step_avg:69.99ms +[2025-09-02 17:15:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:15:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:15:48] [Rank 0] PRINT: step:400/10000 val_loss:6.0262 svd_entropy: attn_qk:H=0.4988,top10E=0.71,eRank=42.8,q75/q25=13.74 attn_vo:H=0.5635,top10E=0.57,eRank=83.3,q75/q25=45.55 mlp_w1:H=0.4503,top10E=0.70,eRank=27.4,q75/q25=3.27 mlp_w2:H=0.5268,top10E=0.62,eRank=34.4,q75/q25=17.95 vo_prod:H=0.3817,top10E=0.86,eRank=14.1,q75/q25=325.93 train_time:28213ms step_avg:70.53ms +[2025-09-02 17:15:48] [Rank 0] PRINT: step:400/10000 val_loss:6.0262 svd_entropy: attn_qk:H=0.4988,top10E=0.71,eRank=42.8,q75/q25=13.74 attn_vo:H=0.5635,top10E=0.57,eRank=83.3,q75/q25=45.55 mlp_w1:H=0.4503,top10E=0.70,eRank=27.4,q75/q25=3.27 mlp_w2:H=0.5268,top10E=0.62,eRank=34.4,q75/q25=17.95 vo_prod:H=0.3817,top10E=0.86,eRank=14.1,q75/q25=325.93 train_time:28213ms step_avg:70.53ms +[2025-09-02 17:15:48] [Rank 0] step:401/10000 train_time:28224ms step_avg:70.38ms +[2025-09-02 17:15:48] [Rank 0] step:401/10000 train_time:28224ms step_avg:70.38ms +[2025-09-02 17:15:50] [Rank 0] step:421/10000 train_time:29518ms step_avg:70.11ms +[2025-09-02 17:15:50] [Rank 0] step:421/10000 train_time:29518ms step_avg:70.11ms +[2025-09-02 17:15:51] [Rank 0] step:441/10000 train_time:30923ms step_avg:70.12ms +[2025-09-02 17:15:51] [Rank 0] step:441/10000 train_time:30923ms step_avg:70.12ms +[2025-09-02 17:15:53] [Rank 0] step:461/10000 train_time:32328ms step_avg:70.13ms +[2025-09-02 17:15:53] [Rank 0] step:461/10000 train_time:32328ms step_avg:70.13ms +[2025-09-02 17:15:54] [Rank 0] step:481/10000 train_time:33734ms step_avg:70.13ms +[2025-09-02 17:15:54] [Rank 0] step:481/10000 train_time:33734ms step_avg:70.13ms +[2025-09-02 17:15:56] [Rank 0] step:501/10000 train_time:35141ms step_avg:70.14ms +[2025-09-02 17:15:56] [Rank 0] step:501/10000 train_time:35141ms step_avg:70.14ms +[2025-09-02 17:15:57] [Rank 0] step:521/10000 train_time:36548ms step_avg:70.15ms +[2025-09-02 17:15:57] [Rank 0] step:521/10000 train_time:36548ms step_avg:70.15ms +[2025-09-02 17:15:58] [Rank 0] step:541/10000 train_time:37955ms step_avg:70.16ms +[2025-09-02 17:15:58] [Rank 0] step:541/10000 train_time:37955ms step_avg:70.16ms +[2025-09-02 17:16:00] [Rank 0] step:561/10000 train_time:39362ms step_avg:70.16ms +[2025-09-02 17:16:00] [Rank 0] step:561/10000 train_time:39362ms step_avg:70.16ms +[2025-09-02 17:16:01] [Rank 0] step:581/10000 train_time:40770ms step_avg:70.17ms +[2025-09-02 17:16:01] [Rank 0] step:581/10000 train_time:40770ms step_avg:70.17ms +[2025-09-02 17:16:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:16:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:16:14] [Rank 0] PRINT: step:600/10000 val_loss:5.7235 svd_entropy: attn_qk:H=0.5353,top10E=0.63,eRank=50.0,q75/q25=15.29 attn_vo:H=0.5981,top10E=0.48,eRank=90.0,q75/q25=32.37 mlp_w1:H=0.4890,top10E=0.65,eRank=37.3,q75/q25=3.65 mlp_w2:H=0.6076,top10E=0.48,eRank=58.7,q75/q25=15.65 vo_prod:H=0.4576,top10E=0.72,eRank=22.6,q75/q25=264.49 train_time:42320ms step_avg:70.53ms +[2025-09-02 17:16:14] [Rank 0] PRINT: step:600/10000 val_loss:5.7235 svd_entropy: attn_qk:H=0.5353,top10E=0.63,eRank=50.0,q75/q25=15.29 attn_vo:H=0.5981,top10E=0.48,eRank=90.0,q75/q25=32.37 mlp_w1:H=0.4890,top10E=0.65,eRank=37.3,q75/q25=3.65 mlp_w2:H=0.6076,top10E=0.48,eRank=58.7,q75/q25=15.65 vo_prod:H=0.4576,top10E=0.72,eRank=22.6,q75/q25=264.49 train_time:42320ms step_avg:70.53ms +[2025-09-02 17:16:14] [Rank 0] step:601/10000 train_time:42331ms step_avg:70.43ms +[2025-09-02 17:16:14] [Rank 0] step:601/10000 train_time:42331ms step_avg:70.43ms +[2025-09-02 17:16:16] [Rank 0] step:621/10000 train_time:43603ms step_avg:70.21ms +[2025-09-02 17:16:16] [Rank 0] step:621/10000 train_time:43603ms step_avg:70.21ms +[2025-09-02 17:16:17] [Rank 0] step:641/10000 train_time:45008ms step_avg:70.21ms +[2025-09-02 17:16:17] [Rank 0] step:641/10000 train_time:45008ms step_avg:70.21ms +[2025-09-02 17:16:19] [Rank 0] step:661/10000 train_time:46414ms step_avg:70.22ms +[2025-09-02 17:16:19] [Rank 0] step:661/10000 train_time:46414ms step_avg:70.22ms +[2025-09-02 17:16:20] [Rank 0] step:681/10000 train_time:47820ms step_avg:70.22ms +[2025-09-02 17:16:20] [Rank 0] step:681/10000 train_time:47820ms step_avg:70.22ms +[2025-09-02 17:16:21] [Rank 0] step:701/10000 train_time:49227ms step_avg:70.22ms +[2025-09-02 17:16:21] [Rank 0] step:701/10000 train_time:49227ms step_avg:70.22ms +[2025-09-02 17:16:23] [Rank 0] step:721/10000 train_time:50636ms step_avg:70.23ms +[2025-09-02 17:16:23] [Rank 0] step:721/10000 train_time:50636ms step_avg:70.23ms +[2025-09-02 17:16:24] [Rank 0] step:741/10000 train_time:52044ms step_avg:70.24ms +[2025-09-02 17:16:24] [Rank 0] step:741/10000 train_time:52044ms step_avg:70.24ms +[2025-09-02 17:16:26] [Rank 0] step:761/10000 train_time:53464ms step_avg:70.25ms +[2025-09-02 17:16:26] [Rank 0] step:761/10000 train_time:53464ms step_avg:70.25ms +[2025-09-02 17:16:27] [Rank 0] step:781/10000 train_time:54882ms step_avg:70.27ms +[2025-09-02 17:16:27] [Rank 0] step:781/10000 train_time:54882ms step_avg:70.27ms +[2025-09-02 17:16:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:16:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:16:40] [Rank 0] PRINT: step:800/10000 val_loss:5.4906 svd_entropy: attn_qk:H=0.5614,top10E=0.57,eRank=55.8,q75/q25=17.09 attn_vo:H=0.6252,top10E=0.42,eRank=98.2,q75/q25=29.60 mlp_w1:H=0.5211,top10E=0.61,eRank=45.9,q75/q25=4.00 mlp_w2:H=0.6542,top10E=0.40,eRank=79.5,q75/q25=15.61 vo_prod:H=0.5014,top10E=0.63,eRank=29.9,q75/q25=307.59 train_time:56445ms step_avg:70.56ms +[2025-09-02 17:16:40] [Rank 0] PRINT: step:800/10000 val_loss:5.4906 svd_entropy: attn_qk:H=0.5614,top10E=0.57,eRank=55.8,q75/q25=17.09 attn_vo:H=0.6252,top10E=0.42,eRank=98.2,q75/q25=29.60 mlp_w1:H=0.5211,top10E=0.61,eRank=45.9,q75/q25=4.00 mlp_w2:H=0.6542,top10E=0.40,eRank=79.5,q75/q25=15.61 vo_prod:H=0.5014,top10E=0.63,eRank=29.9,q75/q25=307.59 train_time:56445ms step_avg:70.56ms +[2025-09-02 17:16:40] [Rank 0] step:801/10000 train_time:56456ms step_avg:70.48ms +[2025-09-02 17:16:40] [Rank 0] step:801/10000 train_time:56456ms step_avg:70.48ms +[2025-09-02 17:16:42] [Rank 0] step:821/10000 train_time:57743ms step_avg:70.33ms +[2025-09-02 17:16:42] [Rank 0] step:821/10000 train_time:57743ms step_avg:70.33ms +[2025-09-02 17:16:43] [Rank 0] step:841/10000 train_time:59163ms step_avg:70.35ms +[2025-09-02 17:16:43] [Rank 0] step:841/10000 train_time:59163ms step_avg:70.35ms +[2025-09-02 17:16:45] [Rank 0] step:861/10000 train_time:60584ms step_avg:70.37ms +[2025-09-02 17:16:45] [Rank 0] step:861/10000 train_time:60584ms step_avg:70.37ms +[2025-09-02 17:16:46] [Rank 0] step:881/10000 train_time:62009ms step_avg:70.39ms +[2025-09-02 17:16:46] [Rank 0] step:881/10000 train_time:62009ms step_avg:70.39ms +[2025-09-02 17:16:47] [Rank 0] step:901/10000 train_time:63432ms step_avg:70.40ms +[2025-09-02 17:16:47] [Rank 0] step:901/10000 train_time:63432ms step_avg:70.40ms +[2025-09-02 17:16:49] [Rank 0] step:921/10000 train_time:64855ms step_avg:70.42ms +[2025-09-02 17:16:49] [Rank 0] step:921/10000 train_time:64855ms step_avg:70.42ms +[2025-09-02 17:16:50] [Rank 0] step:941/10000 train_time:66279ms step_avg:70.43ms +[2025-09-02 17:16:50] [Rank 0] step:941/10000 train_time:66279ms step_avg:70.43ms +[2025-09-02 17:16:52] [Rank 0] step:961/10000 train_time:67703ms step_avg:70.45ms +[2025-09-02 17:16:52] [Rank 0] step:961/10000 train_time:67703ms step_avg:70.45ms +[2025-09-02 17:16:53] [Rank 0] step:981/10000 train_time:69126ms step_avg:70.47ms +[2025-09-02 17:16:53] [Rank 0] step:981/10000 train_time:69126ms step_avg:70.47ms +[2025-09-02 17:16:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:16:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:17:06] [Rank 0] PRINT: step:1000/10000 val_loss:5.3198 svd_entropy: attn_qk:H=0.5826,top10E=0.53,eRank=61.5,q75/q25=19.30 attn_vo:H=0.6475,top10E=0.39,eRank=106.9,q75/q25=32.37 mlp_w1:H=0.5490,top10E=0.58,eRank=53.9,q75/q25=4.40 mlp_w2:H=0.6871,top10E=0.35,eRank=98.4,q75/q25=16.76 vo_prod:H=0.5315,top10E=0.57,eRank=36.3,q75/q25=516.55 train_time:70692ms step_avg:70.69ms +[2025-09-02 17:17:06] [Rank 0] PRINT: step:1000/10000 val_loss:5.3198 svd_entropy: attn_qk:H=0.5826,top10E=0.53,eRank=61.5,q75/q25=19.30 attn_vo:H=0.6475,top10E=0.39,eRank=106.9,q75/q25=32.37 mlp_w1:H=0.5490,top10E=0.58,eRank=53.9,q75/q25=4.40 mlp_w2:H=0.6871,top10E=0.35,eRank=98.4,q75/q25=16.76 vo_prod:H=0.5315,top10E=0.57,eRank=36.3,q75/q25=516.55 train_time:70692ms step_avg:70.69ms +[2025-09-02 17:17:06] [Rank 0] step:1001/10000 train_time:70703ms step_avg:70.63ms +[2025-09-02 17:17:06] [Rank 0] step:1001/10000 train_time:70703ms step_avg:70.63ms +[2025-09-02 17:17:08] [Rank 0] step:1021/10000 train_time:72012ms step_avg:70.53ms +[2025-09-02 17:17:08] [Rank 0] step:1021/10000 train_time:72012ms step_avg:70.53ms +[2025-09-02 17:17:09] [Rank 0] step:1041/10000 train_time:73431ms step_avg:70.54ms +[2025-09-02 17:17:09] [Rank 0] step:1041/10000 train_time:73431ms step_avg:70.54ms +[2025-09-02 17:17:10] [Rank 0] step:1061/10000 train_time:74850ms step_avg:70.55ms +[2025-09-02 17:17:10] [Rank 0] step:1061/10000 train_time:74850ms step_avg:70.55ms +[2025-09-02 17:17:12] [Rank 0] step:1081/10000 train_time:76268ms step_avg:70.55ms +[2025-09-02 17:17:12] [Rank 0] step:1081/10000 train_time:76268ms step_avg:70.55ms +[2025-09-02 17:17:13] [Rank 0] step:1101/10000 train_time:77688ms step_avg:70.56ms +[2025-09-02 17:17:13] [Rank 0] step:1101/10000 train_time:77688ms step_avg:70.56ms +[2025-09-02 17:17:15] [Rank 0] step:1121/10000 train_time:79109ms step_avg:70.57ms +[2025-09-02 17:17:15] [Rank 0] step:1121/10000 train_time:79109ms step_avg:70.57ms +[2025-09-02 17:17:16] [Rank 0] step:1141/10000 train_time:80529ms step_avg:70.58ms +[2025-09-02 17:17:16] [Rank 0] step:1141/10000 train_time:80529ms step_avg:70.58ms +[2025-09-02 17:17:18] [Rank 0] step:1161/10000 train_time:81950ms step_avg:70.59ms +[2025-09-02 17:17:18] [Rank 0] step:1161/10000 train_time:81950ms step_avg:70.59ms +[2025-09-02 17:17:19] [Rank 0] step:1181/10000 train_time:83371ms step_avg:70.59ms +[2025-09-02 17:17:19] [Rank 0] step:1181/10000 train_time:83371ms step_avg:70.59ms +[2025-09-02 17:17:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:17:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:17:32] [Rank 0] PRINT: step:1200/10000 val_loss:5.1781 svd_entropy: attn_qk:H=0.6000,top10E=0.50,eRank=67.0,q75/q25=22.06 attn_vo:H=0.6669,top10E=0.36,eRank=115.9,q75/q25=39.87 mlp_w1:H=0.5722,top10E=0.55,eRank=61.5,q75/q25=4.84 mlp_w2:H=0.7109,top10E=0.31,eRank=115.2,q75/q25=20.41 vo_prod:H=0.5552,top10E=0.53,eRank=42.3,q75/q25=1054.23 train_time:84935ms step_avg:70.78ms +[2025-09-02 17:17:32] [Rank 0] PRINT: step:1200/10000 val_loss:5.1781 svd_entropy: attn_qk:H=0.6000,top10E=0.50,eRank=67.0,q75/q25=22.06 attn_vo:H=0.6669,top10E=0.36,eRank=115.9,q75/q25=39.87 mlp_w1:H=0.5722,top10E=0.55,eRank=61.5,q75/q25=4.84 mlp_w2:H=0.7109,top10E=0.31,eRank=115.2,q75/q25=20.41 vo_prod:H=0.5552,top10E=0.53,eRank=42.3,q75/q25=1054.23 train_time:84935ms step_avg:70.78ms +[2025-09-02 17:17:32] [Rank 0] step:1201/10000 train_time:84946ms step_avg:70.73ms +[2025-09-02 17:17:32] [Rank 0] step:1201/10000 train_time:84946ms step_avg:70.73ms +[2025-09-02 17:17:34] [Rank 0] step:1221/10000 train_time:86233ms step_avg:70.63ms +[2025-09-02 17:17:34] [Rank 0] step:1221/10000 train_time:86233ms step_avg:70.63ms +[2025-09-02 17:17:35] [Rank 0] step:1241/10000 train_time:87652ms step_avg:70.63ms +[2025-09-02 17:17:35] [Rank 0] step:1241/10000 train_time:87652ms step_avg:70.63ms +[2025-09-02 17:17:36] [Rank 0] step:1261/10000 train_time:89072ms step_avg:70.64ms +[2025-09-02 17:17:36] [Rank 0] step:1261/10000 train_time:89072ms step_avg:70.64ms +[2025-09-02 17:17:38] [Rank 0] step:1281/10000 train_time:90492ms step_avg:70.64ms +[2025-09-02 17:17:38] [Rank 0] step:1281/10000 train_time:90492ms step_avg:70.64ms +[2025-09-02 17:17:39] [Rank 0] step:1301/10000 train_time:91913ms step_avg:70.65ms +[2025-09-02 17:17:39] [Rank 0] step:1301/10000 train_time:91913ms step_avg:70.65ms +[2025-09-02 17:17:41] [Rank 0] step:1321/10000 train_time:93334ms step_avg:70.65ms +[2025-09-02 17:17:41] [Rank 0] step:1321/10000 train_time:93334ms step_avg:70.65ms +[2025-09-02 17:17:42] [Rank 0] step:1341/10000 train_time:94756ms step_avg:70.66ms +[2025-09-02 17:17:42] [Rank 0] step:1341/10000 train_time:94756ms step_avg:70.66ms +[2025-09-02 17:17:43] [Rank 0] step:1361/10000 train_time:96178ms step_avg:70.67ms +[2025-09-02 17:17:43] [Rank 0] step:1361/10000 train_time:96178ms step_avg:70.67ms +[2025-09-02 17:17:45] [Rank 0] step:1381/10000 train_time:97602ms step_avg:70.67ms +[2025-09-02 17:17:45] [Rank 0] step:1381/10000 train_time:97602ms step_avg:70.67ms +[2025-09-02 17:17:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:17:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:17:58] [Rank 0] PRINT: step:1400/10000 val_loss:5.0467 svd_entropy: attn_qk:H=0.6146,top10E=0.47,eRank=72.3,q75/q25=25.53 attn_vo:H=0.6836,top10E=0.33,eRank=125.0,q75/q25=50.43 mlp_w1:H=0.5919,top10E=0.52,eRank=68.7,q75/q25=5.36 mlp_w2:H=0.7311,top10E=0.28,eRank=131.5,q75/q25=23.16 vo_prod:H=0.5742,top10E=0.49,eRank=48.0,q75/q25=1988.02 train_time:99167ms step_avg:70.83ms +[2025-09-02 17:17:58] [Rank 0] PRINT: step:1400/10000 val_loss:5.0467 svd_entropy: attn_qk:H=0.6146,top10E=0.47,eRank=72.3,q75/q25=25.53 attn_vo:H=0.6836,top10E=0.33,eRank=125.0,q75/q25=50.43 mlp_w1:H=0.5919,top10E=0.52,eRank=68.7,q75/q25=5.36 mlp_w2:H=0.7311,top10E=0.28,eRank=131.5,q75/q25=23.16 vo_prod:H=0.5742,top10E=0.49,eRank=48.0,q75/q25=1988.02 train_time:99167ms step_avg:70.83ms +[2025-09-02 17:17:58] [Rank 0] step:1401/10000 train_time:99178ms step_avg:70.79ms +[2025-09-02 17:17:58] [Rank 0] step:1401/10000 train_time:99178ms step_avg:70.79ms +[2025-09-02 17:18:00] [Rank 0] step:1421/10000 train_time:100468ms step_avg:70.70ms +[2025-09-02 17:18:00] [Rank 0] step:1421/10000 train_time:100468ms step_avg:70.70ms +[2025-09-02 17:18:01] [Rank 0] step:1441/10000 train_time:101888ms step_avg:70.71ms +[2025-09-02 17:18:01] [Rank 0] step:1441/10000 train_time:101888ms step_avg:70.71ms +[2025-09-02 17:18:02] [Rank 0] step:1461/10000 train_time:103315ms step_avg:70.72ms +[2025-09-02 17:18:02] [Rank 0] step:1461/10000 train_time:103315ms step_avg:70.72ms +[2025-09-02 17:18:04] [Rank 0] step:1481/10000 train_time:104736ms step_avg:70.72ms +[2025-09-02 17:18:04] [Rank 0] step:1481/10000 train_time:104736ms step_avg:70.72ms +[2025-09-02 17:18:05] [Rank 0] step:1501/10000 train_time:106167ms step_avg:70.73ms +[2025-09-02 17:18:05] [Rank 0] step:1501/10000 train_time:106167ms step_avg:70.73ms +[2025-09-02 17:18:07] [Rank 0] step:1521/10000 train_time:107604ms step_avg:70.75ms +[2025-09-02 17:18:07] [Rank 0] step:1521/10000 train_time:107604ms step_avg:70.75ms +[2025-09-02 17:18:08] [Rank 0] step:1541/10000 train_time:109038ms step_avg:70.76ms +[2025-09-02 17:18:08] [Rank 0] step:1541/10000 train_time:109038ms step_avg:70.76ms +[2025-09-02 17:18:10] [Rank 0] step:1561/10000 train_time:110472ms step_avg:70.77ms +[2025-09-02 17:18:10] [Rank 0] step:1561/10000 train_time:110472ms step_avg:70.77ms +[2025-09-02 17:18:11] [Rank 0] step:1581/10000 train_time:111905ms step_avg:70.78ms +[2025-09-02 17:18:11] [Rank 0] step:1581/10000 train_time:111905ms step_avg:70.78ms +[2025-09-02 17:18:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:18:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:18:24] [Rank 0] PRINT: step:1600/10000 val_loss:4.9109 svd_entropy: attn_qk:H=0.6272,top10E=0.45,eRank=77.0,q75/q25=29.94 attn_vo:H=0.6981,top10E=0.31,eRank=133.9,q75/q25=61.73 mlp_w1:H=0.6088,top10E=0.50,eRank=75.6,q75/q25=5.88 mlp_w2:H=0.7468,top10E=0.26,eRank=146.1,q75/q25=26.56 vo_prod:H=0.5903,top10E=0.46,eRank=53.3,q75/q25=3214.87 train_time:113482ms step_avg:70.93ms +[2025-09-02 17:18:24] [Rank 0] PRINT: step:1600/10000 val_loss:4.9109 svd_entropy: attn_qk:H=0.6272,top10E=0.45,eRank=77.0,q75/q25=29.94 attn_vo:H=0.6981,top10E=0.31,eRank=133.9,q75/q25=61.73 mlp_w1:H=0.6088,top10E=0.50,eRank=75.6,q75/q25=5.88 mlp_w2:H=0.7468,top10E=0.26,eRank=146.1,q75/q25=26.56 vo_prod:H=0.5903,top10E=0.46,eRank=53.3,q75/q25=3214.87 train_time:113482ms step_avg:70.93ms +[2025-09-02 17:18:24] [Rank 0] step:1601/10000 train_time:113493ms step_avg:70.89ms +[2025-09-02 17:18:24] [Rank 0] step:1601/10000 train_time:113493ms step_avg:70.89ms +[2025-09-02 17:18:26] [Rank 0] step:1621/10000 train_time:114792ms step_avg:70.82ms +[2025-09-02 17:18:26] [Rank 0] step:1621/10000 train_time:114792ms step_avg:70.82ms +[2025-09-02 17:18:27] [Rank 0] step:1641/10000 train_time:116224ms step_avg:70.82ms +[2025-09-02 17:18:27] [Rank 0] step:1641/10000 train_time:116224ms step_avg:70.82ms +[2025-09-02 17:18:29] [Rank 0] step:1661/10000 train_time:117656ms step_avg:70.83ms +[2025-09-02 17:18:29] [Rank 0] step:1661/10000 train_time:117656ms step_avg:70.83ms +[2025-09-02 17:18:30] [Rank 0] step:1681/10000 train_time:119089ms step_avg:70.84ms +[2025-09-02 17:18:30] [Rank 0] step:1681/10000 train_time:119089ms step_avg:70.84ms +[2025-09-02 17:18:32] [Rank 0] step:1701/10000 train_time:120521ms step_avg:70.85ms +[2025-09-02 17:18:32] [Rank 0] step:1701/10000 train_time:120521ms step_avg:70.85ms +[2025-09-02 17:18:33] [Rank 0] step:1721/10000 train_time:121954ms step_avg:70.86ms +[2025-09-02 17:18:33] [Rank 0] step:1721/10000 train_time:121954ms step_avg:70.86ms +[2025-09-02 17:18:34] [Rank 0] step:1741/10000 train_time:123389ms step_avg:70.87ms +[2025-09-02 17:18:34] [Rank 0] step:1741/10000 train_time:123389ms step_avg:70.87ms +[2025-09-02 17:18:36] [Rank 0] step:1761/10000 train_time:124822ms step_avg:70.88ms +[2025-09-02 17:18:36] [Rank 0] step:1761/10000 train_time:124822ms step_avg:70.88ms +[2025-09-02 17:18:37] [Rank 0] step:1781/10000 train_time:126256ms step_avg:70.89ms +[2025-09-02 17:18:37] [Rank 0] step:1781/10000 train_time:126256ms step_avg:70.89ms +[2025-09-02 17:18:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:18:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:18:50] [Rank 0] PRINT: step:1800/10000 val_loss:4.8063 svd_entropy: attn_qk:H=0.6381,top10E=0.43,eRank=81.4,q75/q25=34.63 attn_vo:H=0.7106,top10E=0.29,eRank=142.5,q75/q25=72.69 mlp_w1:H=0.6236,top10E=0.48,eRank=82.1,q75/q25=6.49 mlp_w2:H=0.7587,top10E=0.24,eRank=158.2,q75/q25=30.41 vo_prod:H=0.6041,top10E=0.44,eRank=58.4,q75/q25=4766.12 train_time:127833ms step_avg:71.02ms +[2025-09-02 17:18:50] [Rank 0] PRINT: step:1800/10000 val_loss:4.8063 svd_entropy: attn_qk:H=0.6381,top10E=0.43,eRank=81.4,q75/q25=34.63 attn_vo:H=0.7106,top10E=0.29,eRank=142.5,q75/q25=72.69 mlp_w1:H=0.6236,top10E=0.48,eRank=82.1,q75/q25=6.49 mlp_w2:H=0.7587,top10E=0.24,eRank=158.2,q75/q25=30.41 vo_prod:H=0.6041,top10E=0.44,eRank=58.4,q75/q25=4766.12 train_time:127833ms step_avg:71.02ms +[2025-09-02 17:18:51] [Rank 0] step:1801/10000 train_time:127844ms step_avg:70.99ms +[2025-09-02 17:18:51] [Rank 0] step:1801/10000 train_time:127844ms step_avg:70.99ms +[2025-09-02 17:18:52] [Rank 0] step:1821/10000 train_time:129145ms step_avg:70.92ms +[2025-09-02 17:18:52] [Rank 0] step:1821/10000 train_time:129145ms step_avg:70.92ms +[2025-09-02 17:18:53] [Rank 0] step:1841/10000 train_time:130616ms step_avg:70.95ms +[2025-09-02 17:18:53] [Rank 0] step:1841/10000 train_time:130616ms step_avg:70.95ms +[2025-09-02 17:18:55] [Rank 0] step:1861/10000 train_time:132047ms step_avg:70.95ms +[2025-09-02 17:18:55] [Rank 0] step:1861/10000 train_time:132047ms step_avg:70.95ms +[2025-09-02 17:18:56] [Rank 0] step:1881/10000 train_time:133478ms step_avg:70.96ms +[2025-09-02 17:18:56] [Rank 0] step:1881/10000 train_time:133478ms step_avg:70.96ms +[2025-09-02 17:18:58] [Rank 0] step:1901/10000 train_time:134910ms step_avg:70.97ms +[2025-09-02 17:18:58] [Rank 0] step:1901/10000 train_time:134910ms step_avg:70.97ms +[2025-09-02 17:18:59] [Rank 0] step:1921/10000 train_time:136342ms step_avg:70.97ms +[2025-09-02 17:18:59] [Rank 0] step:1921/10000 train_time:136342ms step_avg:70.97ms +[2025-09-02 17:19:01] [Rank 0] step:1941/10000 train_time:137774ms step_avg:70.98ms +[2025-09-02 17:19:01] [Rank 0] step:1941/10000 train_time:137774ms step_avg:70.98ms +[2025-09-02 17:19:02] [Rank 0] step:1961/10000 train_time:139207ms step_avg:70.99ms +[2025-09-02 17:19:02] [Rank 0] step:1961/10000 train_time:139207ms step_avg:70.99ms +[2025-09-02 17:19:04] [Rank 0] step:1981/10000 train_time:140640ms step_avg:70.99ms +[2025-09-02 17:19:04] [Rank 0] step:1981/10000 train_time:140640ms step_avg:70.99ms +[2025-09-02 17:19:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:19:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:19:17] [Rank 0] PRINT: step:2000/10000 val_loss:4.7415 svd_entropy: attn_qk:H=0.6476,top10E=0.41,eRank=85.6,q75/q25=39.90 attn_vo:H=0.7215,top10E=0.28,eRank=150.5,q75/q25=81.64 mlp_w1:H=0.6359,top10E=0.46,eRank=88.0,q75/q25=7.10 mlp_w2:H=0.7683,top10E=0.23,eRank=169.0,q75/q25=34.04 vo_prod:H=0.6161,top10E=0.41,eRank=63.3,q75/q25=6476.84 train_time:142217ms step_avg:71.11ms +[2025-09-02 17:19:17] [Rank 0] PRINT: step:2000/10000 val_loss:4.7415 svd_entropy: attn_qk:H=0.6476,top10E=0.41,eRank=85.6,q75/q25=39.90 attn_vo:H=0.7215,top10E=0.28,eRank=150.5,q75/q25=81.64 mlp_w1:H=0.6359,top10E=0.46,eRank=88.0,q75/q25=7.10 mlp_w2:H=0.7683,top10E=0.23,eRank=169.0,q75/q25=34.04 vo_prod:H=0.6161,top10E=0.41,eRank=63.3,q75/q25=6476.84 train_time:142217ms step_avg:71.11ms +[2025-09-02 17:19:17] [Rank 0] step:2001/10000 train_time:142228ms step_avg:71.08ms +[2025-09-02 17:19:17] [Rank 0] step:2001/10000 train_time:142228ms step_avg:71.08ms +[2025-09-02 17:19:18] [Rank 0] step:2021/10000 train_time:143542ms step_avg:71.03ms +[2025-09-02 17:19:18] [Rank 0] step:2021/10000 train_time:143542ms step_avg:71.03ms +[2025-09-02 17:19:20] [Rank 0] step:2041/10000 train_time:145094ms step_avg:71.09ms +[2025-09-02 17:19:20] [Rank 0] step:2041/10000 train_time:145094ms step_avg:71.09ms +[2025-09-02 17:19:21] [Rank 0] step:2061/10000 train_time:146527ms step_avg:71.10ms +[2025-09-02 17:19:21] [Rank 0] step:2061/10000 train_time:146527ms step_avg:71.10ms +[2025-09-02 17:19:23] [Rank 0] step:2081/10000 train_time:147958ms step_avg:71.10ms +[2025-09-02 17:19:23] [Rank 0] step:2081/10000 train_time:147958ms step_avg:71.10ms +[2025-09-02 17:19:24] [Rank 0] step:2101/10000 train_time:149391ms step_avg:71.10ms +[2025-09-02 17:19:24] [Rank 0] step:2101/10000 train_time:149391ms step_avg:71.10ms +[2025-09-02 17:19:25] [Rank 0] step:2121/10000 train_time:150823ms step_avg:71.11ms +[2025-09-02 17:19:25] [Rank 0] step:2121/10000 train_time:150823ms step_avg:71.11ms +[2025-09-02 17:19:27] [Rank 0] step:2141/10000 train_time:152255ms step_avg:71.11ms +[2025-09-02 17:19:27] [Rank 0] step:2141/10000 train_time:152255ms step_avg:71.11ms +[2025-09-02 17:19:28] [Rank 0] step:2161/10000 train_time:153689ms step_avg:71.12ms +[2025-09-02 17:19:28] [Rank 0] step:2161/10000 train_time:153689ms step_avg:71.12ms +[2025-09-02 17:19:30] [Rank 0] step:2181/10000 train_time:155124ms step_avg:71.12ms +[2025-09-02 17:19:30] [Rank 0] step:2181/10000 train_time:155124ms step_avg:71.12ms +[2025-09-02 17:19:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:19:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:19:43] [Rank 0] PRINT: step:2200/10000 val_loss:4.6637 svd_entropy: attn_qk:H=0.6555,top10E=0.39,eRank=89.3,q75/q25=45.12 attn_vo:H=0.7306,top10E=0.27,eRank=157.7,q75/q25=88.62 mlp_w1:H=0.6467,top10E=0.45,eRank=93.4,q75/q25=7.75 mlp_w2:H=0.7761,top10E=0.22,eRank=178.2,q75/q25=37.65 vo_prod:H=0.6261,top10E=0.40,eRank=67.8,q75/q25=8148.74 train_time:156698ms step_avg:71.23ms +[2025-09-02 17:19:43] [Rank 0] PRINT: step:2200/10000 val_loss:4.6637 svd_entropy: attn_qk:H=0.6555,top10E=0.39,eRank=89.3,q75/q25=45.12 attn_vo:H=0.7306,top10E=0.27,eRank=157.7,q75/q25=88.62 mlp_w1:H=0.6467,top10E=0.45,eRank=93.4,q75/q25=7.75 mlp_w2:H=0.7761,top10E=0.22,eRank=178.2,q75/q25=37.65 vo_prod:H=0.6261,top10E=0.40,eRank=67.8,q75/q25=8148.74 train_time:156698ms step_avg:71.23ms +[2025-09-02 17:19:43] [Rank 0] step:2201/10000 train_time:156709ms step_avg:71.20ms +[2025-09-02 17:19:43] [Rank 0] step:2201/10000 train_time:156709ms step_avg:71.20ms +[2025-09-02 17:19:45] [Rank 0] step:2221/10000 train_time:158001ms step_avg:71.14ms +[2025-09-02 17:19:45] [Rank 0] step:2221/10000 train_time:158001ms step_avg:71.14ms +[2025-09-02 17:19:46] [Rank 0] step:2241/10000 train_time:159466ms step_avg:71.16ms +[2025-09-02 17:19:46] [Rank 0] step:2241/10000 train_time:159466ms step_avg:71.16ms +[2025-09-02 17:19:47] [Rank 0] step:2261/10000 train_time:160941ms step_avg:71.18ms +[2025-09-02 17:19:47] [Rank 0] step:2261/10000 train_time:160941ms step_avg:71.18ms +[2025-09-02 17:19:49] [Rank 0] step:2281/10000 train_time:162419ms step_avg:71.21ms +[2025-09-02 17:19:49] [Rank 0] step:2281/10000 train_time:162419ms step_avg:71.21ms +[2025-09-02 17:19:50] [Rank 0] step:2301/10000 train_time:163896ms step_avg:71.23ms +[2025-09-02 17:19:50] [Rank 0] step:2301/10000 train_time:163896ms step_avg:71.23ms +[2025-09-02 17:19:52] [Rank 0] step:2321/10000 train_time:165374ms step_avg:71.25ms +[2025-09-02 17:19:52] [Rank 0] step:2321/10000 train_time:165374ms step_avg:71.25ms +[2025-09-02 17:19:53] [Rank 0] step:2341/10000 train_time:166851ms step_avg:71.27ms +[2025-09-02 17:19:53] [Rank 0] step:2341/10000 train_time:166851ms step_avg:71.27ms +[2025-09-02 17:19:55] [Rank 0] step:2361/10000 train_time:168330ms step_avg:71.30ms +[2025-09-02 17:19:55] [Rank 0] step:2361/10000 train_time:168330ms step_avg:71.30ms +[2025-09-02 17:19:56] [Rank 0] step:2381/10000 train_time:169807ms step_avg:71.32ms +[2025-09-02 17:19:56] [Rank 0] step:2381/10000 train_time:169807ms step_avg:71.32ms +[2025-09-02 17:19:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:19:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:20:09] [Rank 0] PRINT: step:2400/10000 val_loss:4.5864 svd_entropy: attn_qk:H=0.6622,top10E=0.38,eRank=92.6,q75/q25=50.79 attn_vo:H=0.7389,top10E=0.25,eRank=164.8,q75/q25=95.28 mlp_w1:H=0.6564,top10E=0.44,eRank=98.5,q75/q25=8.41 mlp_w2:H=0.7832,top10E=0.21,eRank=187.0,q75/q25=40.93 vo_prod:H=0.6354,top10E=0.38,eRank=72.1,q75/q25=9600.80 train_time:171433ms step_avg:71.43ms +[2025-09-02 17:20:09] [Rank 0] PRINT: step:2400/10000 val_loss:4.5864 svd_entropy: attn_qk:H=0.6622,top10E=0.38,eRank=92.6,q75/q25=50.79 attn_vo:H=0.7389,top10E=0.25,eRank=164.8,q75/q25=95.28 mlp_w1:H=0.6564,top10E=0.44,eRank=98.5,q75/q25=8.41 mlp_w2:H=0.7832,top10E=0.21,eRank=187.0,q75/q25=40.93 vo_prod:H=0.6354,top10E=0.38,eRank=72.1,q75/q25=9600.80 train_time:171433ms step_avg:71.43ms +[2025-09-02 17:20:10] [Rank 0] step:2401/10000 train_time:171445ms step_avg:71.41ms +[2025-09-02 17:20:10] [Rank 0] step:2401/10000 train_time:171445ms step_avg:71.41ms +[2025-09-02 17:20:11] [Rank 0] step:2421/10000 train_time:172792ms step_avg:71.37ms +[2025-09-02 17:20:11] [Rank 0] step:2421/10000 train_time:172792ms step_avg:71.37ms +[2025-09-02 17:20:13] [Rank 0] step:2441/10000 train_time:174267ms step_avg:71.39ms +[2025-09-02 17:20:13] [Rank 0] step:2441/10000 train_time:174267ms step_avg:71.39ms +[2025-09-02 17:20:14] [Rank 0] step:2461/10000 train_time:175743ms step_avg:71.41ms +[2025-09-02 17:20:14] [Rank 0] step:2461/10000 train_time:175743ms step_avg:71.41ms +[2025-09-02 17:20:15] [Rank 0] step:2481/10000 train_time:177219ms step_avg:71.43ms +[2025-09-02 17:20:15] [Rank 0] step:2481/10000 train_time:177219ms step_avg:71.43ms +[2025-09-02 17:20:17] [Rank 0] step:2501/10000 train_time:178696ms step_avg:71.45ms +[2025-09-02 17:20:17] [Rank 0] step:2501/10000 train_time:178696ms step_avg:71.45ms +[2025-09-02 17:20:18] [Rank 0] step:2521/10000 train_time:180174ms step_avg:71.47ms +[2025-09-02 17:20:18] [Rank 0] step:2521/10000 train_time:180174ms step_avg:71.47ms +[2025-09-02 17:20:20] [Rank 0] step:2541/10000 train_time:181653ms step_avg:71.49ms +[2025-09-02 17:20:20] [Rank 0] step:2541/10000 train_time:181653ms step_avg:71.49ms +[2025-09-02 17:20:21] [Rank 0] step:2561/10000 train_time:183131ms step_avg:71.51ms +[2025-09-02 17:20:21] [Rank 0] step:2561/10000 train_time:183131ms step_avg:71.51ms +[2025-09-02 17:20:23] [Rank 0] step:2581/10000 train_time:184609ms step_avg:71.53ms +[2025-09-02 17:20:23] [Rank 0] step:2581/10000 train_time:184609ms step_avg:71.53ms +[2025-09-02 17:20:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:20:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:20:36] [Rank 0] PRINT: step:2600/10000 val_loss:4.5272 svd_entropy: attn_qk:H=0.6688,top10E=0.37,eRank=95.9,q75/q25=56.51 attn_vo:H=0.7464,top10E=0.25,eRank=171.3,q75/q25=98.92 mlp_w1:H=0.6650,top10E=0.42,eRank=103.3,q75/q25=9.12 mlp_w2:H=0.7891,top10E=0.20,eRank=194.7,q75/q25=44.45 vo_prod:H=0.6435,top10E=0.37,eRank=76.0,q75/q25=10870.05 train_time:186235ms step_avg:71.63ms +[2025-09-02 17:20:36] [Rank 0] PRINT: step:2600/10000 val_loss:4.5272 svd_entropy: attn_qk:H=0.6688,top10E=0.37,eRank=95.9,q75/q25=56.51 attn_vo:H=0.7464,top10E=0.25,eRank=171.3,q75/q25=98.92 mlp_w1:H=0.6650,top10E=0.42,eRank=103.3,q75/q25=9.12 mlp_w2:H=0.7891,top10E=0.20,eRank=194.7,q75/q25=44.45 vo_prod:H=0.6435,top10E=0.37,eRank=76.0,q75/q25=10870.05 train_time:186235ms step_avg:71.63ms +[2025-09-02 17:20:36] [Rank 0] step:2601/10000 train_time:186246ms step_avg:71.61ms +[2025-09-02 17:20:36] [Rank 0] step:2601/10000 train_time:186246ms step_avg:71.61ms +[2025-09-02 17:20:37] [Rank 0] step:2621/10000 train_time:187573ms step_avg:71.57ms +[2025-09-02 17:20:37] [Rank 0] step:2621/10000 train_time:187573ms step_avg:71.57ms +[2025-09-02 17:20:39] [Rank 0] step:2641/10000 train_time:189046ms step_avg:71.58ms +[2025-09-02 17:20:39] [Rank 0] step:2641/10000 train_time:189046ms step_avg:71.58ms +[2025-09-02 17:20:40] [Rank 0] step:2661/10000 train_time:190521ms step_avg:71.60ms +[2025-09-02 17:20:40] [Rank 0] step:2661/10000 train_time:190521ms step_avg:71.60ms +[2025-09-02 17:20:42] [Rank 0] step:2681/10000 train_time:191997ms step_avg:71.61ms +[2025-09-02 17:20:42] [Rank 0] step:2681/10000 train_time:191997ms step_avg:71.61ms +[2025-09-02 17:20:43] [Rank 0] step:2701/10000 train_time:193473ms step_avg:71.63ms +[2025-09-02 17:20:43] [Rank 0] step:2701/10000 train_time:193473ms step_avg:71.63ms +[2025-09-02 17:20:45] [Rank 0] step:2721/10000 train_time:194951ms step_avg:71.65ms +[2025-09-02 17:20:45] [Rank 0] step:2721/10000 train_time:194951ms step_avg:71.65ms +[2025-09-02 17:20:46] [Rank 0] step:2741/10000 train_time:196428ms step_avg:71.66ms +[2025-09-02 17:20:46] [Rank 0] step:2741/10000 train_time:196428ms step_avg:71.66ms +[2025-09-02 17:20:48] [Rank 0] step:2761/10000 train_time:197904ms step_avg:71.68ms +[2025-09-02 17:20:48] [Rank 0] step:2761/10000 train_time:197904ms step_avg:71.68ms +[2025-09-02 17:20:49] [Rank 0] step:2781/10000 train_time:199381ms step_avg:71.69ms +[2025-09-02 17:20:49] [Rank 0] step:2781/10000 train_time:199381ms step_avg:71.69ms +[2025-09-02 17:20:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:20:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:21:02] [Rank 0] PRINT: step:2800/10000 val_loss:4.4850 svd_entropy: attn_qk:H=0.6750,top10E=0.36,eRank=99.2,q75/q25=61.87 attn_vo:H=0.7531,top10E=0.24,eRank=177.6,q75/q25=102.98 mlp_w1:H=0.6727,top10E=0.41,eRank=107.8,q75/q25=9.86 mlp_w2:H=0.7942,top10E=0.19,eRank=201.9,q75/q25=47.66 vo_prod:H=0.6509,top10E=0.36,eRank=79.8,q75/q25=12456.48 train_time:201007ms step_avg:71.79ms +[2025-09-02 17:21:02] [Rank 0] PRINT: step:2800/10000 val_loss:4.4850 svd_entropy: attn_qk:H=0.6750,top10E=0.36,eRank=99.2,q75/q25=61.87 attn_vo:H=0.7531,top10E=0.24,eRank=177.6,q75/q25=102.98 mlp_w1:H=0.6727,top10E=0.41,eRank=107.8,q75/q25=9.86 mlp_w2:H=0.7942,top10E=0.19,eRank=201.9,q75/q25=47.66 vo_prod:H=0.6509,top10E=0.36,eRank=79.8,q75/q25=12456.48 train_time:201007ms step_avg:71.79ms +[2025-09-02 17:21:02] [Rank 0] step:2801/10000 train_time:201018ms step_avg:71.77ms +[2025-09-02 17:21:02] [Rank 0] step:2801/10000 train_time:201018ms step_avg:71.77ms +[2025-09-02 17:21:04] [Rank 0] step:2821/10000 train_time:202382ms step_avg:71.74ms +[2025-09-02 17:21:04] [Rank 0] step:2821/10000 train_time:202382ms step_avg:71.74ms +[2025-09-02 17:21:05] [Rank 0] step:2841/10000 train_time:203856ms step_avg:71.76ms +[2025-09-02 17:21:05] [Rank 0] step:2841/10000 train_time:203856ms step_avg:71.76ms +[2025-09-02 17:21:07] [Rank 0] step:2861/10000 train_time:205331ms step_avg:71.77ms +[2025-09-02 17:21:07] [Rank 0] step:2861/10000 train_time:205331ms step_avg:71.77ms +[2025-09-02 17:21:08] [Rank 0] step:2881/10000 train_time:206806ms step_avg:71.78ms +[2025-09-02 17:21:08] [Rank 0] step:2881/10000 train_time:206806ms step_avg:71.78ms +[2025-09-02 17:21:10] [Rank 0] step:2901/10000 train_time:208282ms step_avg:71.80ms +[2025-09-02 17:21:10] [Rank 0] step:2901/10000 train_time:208282ms step_avg:71.80ms +[2025-09-02 17:21:11] [Rank 0] step:2921/10000 train_time:209759ms step_avg:71.81ms +[2025-09-02 17:21:11] [Rank 0] step:2921/10000 train_time:209759ms step_avg:71.81ms +[2025-09-02 17:21:13] [Rank 0] step:2941/10000 train_time:211235ms step_avg:71.82ms +[2025-09-02 17:21:13] [Rank 0] step:2941/10000 train_time:211235ms step_avg:71.82ms +[2025-09-02 17:21:14] [Rank 0] step:2961/10000 train_time:212712ms step_avg:71.84ms +[2025-09-02 17:21:14] [Rank 0] step:2961/10000 train_time:212712ms step_avg:71.84ms +[2025-09-02 17:21:16] [Rank 0] step:2981/10000 train_time:214195ms step_avg:71.85ms +[2025-09-02 17:21:16] [Rank 0] step:2981/10000 train_time:214195ms step_avg:71.85ms +[2025-09-02 17:21:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:21:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:21:29] [Rank 0] PRINT: step:3000/10000 val_loss:4.4415 svd_entropy: attn_qk:H=0.6803,top10E=0.36,eRank=102.2,q75/q25=66.94 attn_vo:H=0.7591,top10E=0.23,eRank=183.5,q75/q25=105.18 mlp_w1:H=0.6801,top10E=0.40,eRank=112.3,q75/q25=10.58 mlp_w2:H=0.7985,top10E=0.19,eRank=208.0,q75/q25=51.31 vo_prod:H=0.6575,top10E=0.35,eRank=83.4,q75/q25=13127.01 train_time:215828ms step_avg:71.94ms +[2025-09-02 17:21:29] [Rank 0] PRINT: step:3000/10000 val_loss:4.4415 svd_entropy: attn_qk:H=0.6803,top10E=0.36,eRank=102.2,q75/q25=66.94 attn_vo:H=0.7591,top10E=0.23,eRank=183.5,q75/q25=105.18 mlp_w1:H=0.6801,top10E=0.40,eRank=112.3,q75/q25=10.58 mlp_w2:H=0.7985,top10E=0.19,eRank=208.0,q75/q25=51.31 vo_prod:H=0.6575,top10E=0.35,eRank=83.4,q75/q25=13127.01 train_time:215828ms step_avg:71.94ms +[2025-09-02 17:21:29] [Rank 0] step:3001/10000 train_time:215839ms step_avg:71.92ms +[2025-09-02 17:21:29] [Rank 0] step:3001/10000 train_time:215839ms step_avg:71.92ms +[2025-09-02 17:21:30] [Rank 0] step:3021/10000 train_time:217188ms step_avg:71.89ms +[2025-09-02 17:21:30] [Rank 0] step:3021/10000 train_time:217188ms step_avg:71.89ms +[2025-09-02 17:21:32] [Rank 0] step:3041/10000 train_time:218668ms step_avg:71.91ms +[2025-09-02 17:21:32] [Rank 0] step:3041/10000 train_time:218668ms step_avg:71.91ms +[2025-09-02 17:21:33] [Rank 0] step:3061/10000 train_time:220151ms step_avg:71.92ms +[2025-09-02 17:21:33] [Rank 0] step:3061/10000 train_time:220151ms step_avg:71.92ms +[2025-09-02 17:21:35] [Rank 0] step:3081/10000 train_time:221634ms step_avg:71.94ms +[2025-09-02 17:21:35] [Rank 0] step:3081/10000 train_time:221634ms step_avg:71.94ms +[2025-09-02 17:21:36] [Rank 0] step:3101/10000 train_time:223118ms step_avg:71.95ms +[2025-09-02 17:21:36] [Rank 0] step:3101/10000 train_time:223118ms step_avg:71.95ms +[2025-09-02 17:21:38] [Rank 0] step:3121/10000 train_time:224601ms step_avg:71.96ms +[2025-09-02 17:21:38] [Rank 0] step:3121/10000 train_time:224601ms step_avg:71.96ms +[2025-09-02 17:21:39] [Rank 0] step:3141/10000 train_time:226086ms step_avg:71.98ms +[2025-09-02 17:21:39] [Rank 0] step:3141/10000 train_time:226086ms step_avg:71.98ms +[2025-09-02 17:21:41] [Rank 0] step:3161/10000 train_time:227571ms step_avg:71.99ms +[2025-09-02 17:21:41] [Rank 0] step:3161/10000 train_time:227571ms step_avg:71.99ms +[2025-09-02 17:21:42] [Rank 0] step:3181/10000 train_time:229058ms step_avg:72.01ms +[2025-09-02 17:21:42] [Rank 0] step:3181/10000 train_time:229058ms step_avg:72.01ms +[2025-09-02 17:21:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:21:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:21:55] [Rank 0] PRINT: step:3200/10000 val_loss:4.4009 svd_entropy: attn_qk:H=0.6854,top10E=0.35,eRank=105.1,q75/q25=71.35 attn_vo:H=0.7646,top10E=0.22,eRank=189.0,q75/q25=106.55 mlp_w1:H=0.6863,top10E=0.39,eRank=116.3,q75/q25=11.30 mlp_w2:H=0.8025,top10E=0.18,eRank=213.9,q75/q25=54.16 vo_prod:H=0.6636,top10E=0.34,eRank=86.8,q75/q25=13862.19 train_time:230693ms step_avg:72.09ms +[2025-09-02 17:21:55] [Rank 0] PRINT: step:3200/10000 val_loss:4.4009 svd_entropy: attn_qk:H=0.6854,top10E=0.35,eRank=105.1,q75/q25=71.35 attn_vo:H=0.7646,top10E=0.22,eRank=189.0,q75/q25=106.55 mlp_w1:H=0.6863,top10E=0.39,eRank=116.3,q75/q25=11.30 mlp_w2:H=0.8025,top10E=0.18,eRank=213.9,q75/q25=54.16 vo_prod:H=0.6636,top10E=0.34,eRank=86.8,q75/q25=13862.19 train_time:230693ms step_avg:72.09ms +[2025-09-02 17:21:55] [Rank 0] step:3201/10000 train_time:230704ms step_avg:72.07ms +[2025-09-02 17:21:55] [Rank 0] step:3201/10000 train_time:230704ms step_avg:72.07ms +[2025-09-02 17:21:57] [Rank 0] step:3221/10000 train_time:232061ms step_avg:72.05ms +[2025-09-02 17:21:57] [Rank 0] step:3221/10000 train_time:232061ms step_avg:72.05ms +[2025-09-02 17:21:58] [Rank 0] step:3241/10000 train_time:233547ms step_avg:72.06ms +[2025-09-02 17:21:58] [Rank 0] step:3241/10000 train_time:233547ms step_avg:72.06ms +[2025-09-02 17:22:00] [Rank 0] step:3261/10000 train_time:235031ms step_avg:72.07ms +[2025-09-02 17:22:00] [Rank 0] step:3261/10000 train_time:235031ms step_avg:72.07ms +[2025-09-02 17:22:01] [Rank 0] step:3281/10000 train_time:236515ms step_avg:72.09ms +[2025-09-02 17:22:01] [Rank 0] step:3281/10000 train_time:236515ms step_avg:72.09ms +[2025-09-02 17:22:03] [Rank 0] step:3301/10000 train_time:238002ms step_avg:72.10ms +[2025-09-02 17:22:03] [Rank 0] step:3301/10000 train_time:238002ms step_avg:72.10ms +[2025-09-02 17:22:04] [Rank 0] step:3321/10000 train_time:239487ms step_avg:72.11ms +[2025-09-02 17:22:04] [Rank 0] step:3321/10000 train_time:239487ms step_avg:72.11ms +[2025-09-02 17:22:06] [Rank 0] step:3341/10000 train_time:240974ms step_avg:72.13ms +[2025-09-02 17:22:06] [Rank 0] step:3341/10000 train_time:240974ms step_avg:72.13ms +[2025-09-02 17:22:07] [Rank 0] step:3361/10000 train_time:242458ms step_avg:72.14ms +[2025-09-02 17:22:07] [Rank 0] step:3361/10000 train_time:242458ms step_avg:72.14ms +[2025-09-02 17:22:09] [Rank 0] step:3381/10000 train_time:243942ms step_avg:72.15ms +[2025-09-02 17:22:09] [Rank 0] step:3381/10000 train_time:243942ms step_avg:72.15ms +[2025-09-02 17:22:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:22:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:22:22] [Rank 0] PRINT: step:3400/10000 val_loss:4.3604 svd_entropy: attn_qk:H=0.6902,top10E=0.34,eRank=108.0,q75/q25=76.51 attn_vo:H=0.7698,top10E=0.22,eRank=194.6,q75/q25=107.96 mlp_w1:H=0.6923,top10E=0.39,eRank=120.3,q75/q25=12.05 mlp_w2:H=0.8061,top10E=0.18,eRank=219.4,q75/q25=56.93 vo_prod:H=0.6696,top10E=0.33,eRank=90.4,q75/q25=14497.99 train_time:245579ms step_avg:72.23ms +[2025-09-02 17:22:22] [Rank 0] PRINT: step:3400/10000 val_loss:4.3604 svd_entropy: attn_qk:H=0.6902,top10E=0.34,eRank=108.0,q75/q25=76.51 attn_vo:H=0.7698,top10E=0.22,eRank=194.6,q75/q25=107.96 mlp_w1:H=0.6923,top10E=0.39,eRank=120.3,q75/q25=12.05 mlp_w2:H=0.8061,top10E=0.18,eRank=219.4,q75/q25=56.93 vo_prod:H=0.6696,top10E=0.33,eRank=90.4,q75/q25=14497.99 train_time:245579ms step_avg:72.23ms +[2025-09-02 17:22:22] [Rank 0] step:3401/10000 train_time:245590ms step_avg:72.21ms +[2025-09-02 17:22:22] [Rank 0] step:3401/10000 train_time:245590ms step_avg:72.21ms +[2025-09-02 17:22:23] [Rank 0] step:3421/10000 train_time:246952ms step_avg:72.19ms +[2025-09-02 17:22:23] [Rank 0] step:3421/10000 train_time:246952ms step_avg:72.19ms +[2025-09-02 17:22:25] [Rank 0] step:3441/10000 train_time:248436ms step_avg:72.20ms +[2025-09-02 17:22:25] [Rank 0] step:3441/10000 train_time:248436ms step_avg:72.20ms +[2025-09-02 17:22:26] [Rank 0] step:3461/10000 train_time:249922ms step_avg:72.21ms +[2025-09-02 17:22:26] [Rank 0] step:3461/10000 train_time:249922ms step_avg:72.21ms +[2025-09-02 17:22:28] [Rank 0] step:3481/10000 train_time:251407ms step_avg:72.22ms +[2025-09-02 17:22:28] [Rank 0] step:3481/10000 train_time:251407ms step_avg:72.22ms +[2025-09-02 17:22:29] [Rank 0] step:3501/10000 train_time:252896ms step_avg:72.24ms +[2025-09-02 17:22:29] [Rank 0] step:3501/10000 train_time:252896ms step_avg:72.24ms +[2025-09-02 17:22:31] [Rank 0] step:3521/10000 train_time:254386ms step_avg:72.25ms +[2025-09-02 17:22:31] [Rank 0] step:3521/10000 train_time:254386ms step_avg:72.25ms +[2025-09-02 17:22:32] [Rank 0] step:3541/10000 train_time:255884ms step_avg:72.26ms +[2025-09-02 17:22:32] [Rank 0] step:3541/10000 train_time:255884ms step_avg:72.26ms +[2025-09-02 17:22:34] [Rank 0] step:3561/10000 train_time:257375ms step_avg:72.28ms +[2025-09-02 17:22:34] [Rank 0] step:3561/10000 train_time:257375ms step_avg:72.28ms +[2025-09-02 17:22:35] [Rank 0] step:3581/10000 train_time:258863ms step_avg:72.29ms +[2025-09-02 17:22:35] [Rank 0] step:3581/10000 train_time:258863ms step_avg:72.29ms +[2025-09-02 17:22:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:22:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:22:49] [Rank 0] PRINT: step:3600/10000 val_loss:4.3490 svd_entropy: attn_qk:H=0.6947,top10E=0.33,eRank=110.8,q75/q25=80.35 attn_vo:H=0.7745,top10E=0.21,eRank=199.6,q75/q25=108.13 mlp_w1:H=0.6977,top10E=0.38,eRank=124.0,q75/q25=12.72 mlp_w2:H=0.8092,top10E=0.17,eRank=224.4,q75/q25=59.48 vo_prod:H=0.6749,top10E=0.33,eRank=93.6,q75/q25=14303.31 train_time:260503ms step_avg:72.36ms +[2025-09-02 17:22:49] [Rank 0] PRINT: step:3600/10000 val_loss:4.3490 svd_entropy: attn_qk:H=0.6947,top10E=0.33,eRank=110.8,q75/q25=80.35 attn_vo:H=0.7745,top10E=0.21,eRank=199.6,q75/q25=108.13 mlp_w1:H=0.6977,top10E=0.38,eRank=124.0,q75/q25=12.72 mlp_w2:H=0.8092,top10E=0.17,eRank=224.4,q75/q25=59.48 vo_prod:H=0.6749,top10E=0.33,eRank=93.6,q75/q25=14303.31 train_time:260503ms step_avg:72.36ms +[2025-09-02 17:22:49] [Rank 0] step:3601/10000 train_time:260514ms step_avg:72.34ms +[2025-09-02 17:22:49] [Rank 0] step:3601/10000 train_time:260514ms step_avg:72.34ms +[2025-09-02 17:22:50] [Rank 0] step:3621/10000 train_time:261883ms step_avg:72.32ms +[2025-09-02 17:22:50] [Rank 0] step:3621/10000 train_time:261883ms step_avg:72.32ms +[2025-09-02 17:22:52] [Rank 0] step:3641/10000 train_time:263368ms step_avg:72.33ms +[2025-09-02 17:22:52] [Rank 0] step:3641/10000 train_time:263368ms step_avg:72.33ms +[2025-09-02 17:22:53] [Rank 0] step:3661/10000 train_time:264852ms step_avg:72.34ms +[2025-09-02 17:22:53] [Rank 0] step:3661/10000 train_time:264852ms step_avg:72.34ms +[2025-09-02 17:22:55] [Rank 0] step:3681/10000 train_time:266338ms step_avg:72.35ms +[2025-09-02 17:22:55] [Rank 0] step:3681/10000 train_time:266338ms step_avg:72.35ms +[2025-09-02 17:22:56] [Rank 0] step:3701/10000 train_time:267823ms step_avg:72.37ms +[2025-09-02 17:22:56] [Rank 0] step:3701/10000 train_time:267823ms step_avg:72.37ms +[2025-09-02 17:22:58] [Rank 0] step:3721/10000 train_time:269334ms step_avg:72.38ms +[2025-09-02 17:22:58] [Rank 0] step:3721/10000 train_time:269334ms step_avg:72.38ms +[2025-09-02 17:22:59] [Rank 0] step:3741/10000 train_time:270853ms step_avg:72.40ms +[2025-09-02 17:22:59] [Rank 0] step:3741/10000 train_time:270853ms step_avg:72.40ms +[2025-09-02 17:23:01] [Rank 0] step:3761/10000 train_time:272376ms step_avg:72.42ms +[2025-09-02 17:23:01] [Rank 0] step:3761/10000 train_time:272376ms step_avg:72.42ms +[2025-09-02 17:23:02] [Rank 0] step:3781/10000 train_time:273899ms step_avg:72.44ms +[2025-09-02 17:23:02] [Rank 0] step:3781/10000 train_time:273899ms step_avg:72.44ms +[2025-09-02 17:23:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:23:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:23:15] [Rank 0] PRINT: step:3800/10000 val_loss:4.2909 svd_entropy: attn_qk:H=0.6985,top10E=0.33,eRank=113.2,q75/q25=83.60 attn_vo:H=0.7788,top10E=0.21,eRank=204.4,q75/q25=107.66 mlp_w1:H=0.7027,top10E=0.37,eRank=127.6,q75/q25=13.41 mlp_w2:H=0.8120,top10E=0.17,eRank=228.9,q75/q25=61.99 vo_prod:H=0.6798,top10E=0.32,eRank=96.8,q75/q25=14288.77 train_time:275576ms step_avg:72.52ms +[2025-09-02 17:23:15] [Rank 0] PRINT: step:3800/10000 val_loss:4.2909 svd_entropy: attn_qk:H=0.6985,top10E=0.33,eRank=113.2,q75/q25=83.60 attn_vo:H=0.7788,top10E=0.21,eRank=204.4,q75/q25=107.66 mlp_w1:H=0.7027,top10E=0.37,eRank=127.6,q75/q25=13.41 mlp_w2:H=0.8120,top10E=0.17,eRank=228.9,q75/q25=61.99 vo_prod:H=0.6798,top10E=0.32,eRank=96.8,q75/q25=14288.77 train_time:275576ms step_avg:72.52ms +[2025-09-02 17:23:15] [Rank 0] step:3801/10000 train_time:275588ms step_avg:72.50ms +[2025-09-02 17:23:15] [Rank 0] step:3801/10000 train_time:275588ms step_avg:72.50ms +[2025-09-02 17:23:17] [Rank 0] step:3821/10000 train_time:276977ms step_avg:72.49ms +[2025-09-02 17:23:17] [Rank 0] step:3821/10000 train_time:276977ms step_avg:72.49ms +[2025-09-02 17:23:18] [Rank 0] step:3841/10000 train_time:278500ms step_avg:72.51ms +[2025-09-02 17:23:18] [Rank 0] step:3841/10000 train_time:278500ms step_avg:72.51ms +[2025-09-02 17:23:20] [Rank 0] step:3861/10000 train_time:280020ms step_avg:72.53ms +[2025-09-02 17:23:20] [Rank 0] step:3861/10000 train_time:280020ms step_avg:72.53ms +[2025-09-02 17:23:22] [Rank 0] step:3881/10000 train_time:281542ms step_avg:72.54ms +[2025-09-02 17:23:22] [Rank 0] step:3881/10000 train_time:281542ms step_avg:72.54ms +[2025-09-02 17:23:23] [Rank 0] step:3901/10000 train_time:283061ms step_avg:72.56ms +[2025-09-02 17:23:23] [Rank 0] step:3901/10000 train_time:283061ms step_avg:72.56ms +[2025-09-02 17:23:25] [Rank 0] step:3921/10000 train_time:284580ms step_avg:72.58ms +[2025-09-02 17:23:25] [Rank 0] step:3921/10000 train_time:284580ms step_avg:72.58ms +[2025-09-02 17:23:26] [Rank 0] step:3941/10000 train_time:286101ms step_avg:72.60ms +[2025-09-02 17:23:26] [Rank 0] step:3941/10000 train_time:286101ms step_avg:72.60ms +[2025-09-02 17:23:28] [Rank 0] step:3961/10000 train_time:287617ms step_avg:72.61ms +[2025-09-02 17:23:28] [Rank 0] step:3961/10000 train_time:287617ms step_avg:72.61ms +[2025-09-02 17:23:29] [Rank 0] step:3981/10000 train_time:289149ms step_avg:72.63ms +[2025-09-02 17:23:29] [Rank 0] step:3981/10000 train_time:289149ms step_avg:72.63ms +[2025-09-02 17:23:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:23:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:23:42] [Rank 0] PRINT: step:4000/10000 val_loss:4.2641 svd_entropy: attn_qk:H=0.7024,top10E=0.32,eRank=115.7,q75/q25=86.79 attn_vo:H=0.7828,top10E=0.20,eRank=209.0,q75/q25=106.04 mlp_w1:H=0.7075,top10E=0.36,eRank=131.2,q75/q25=14.13 mlp_w2:H=0.8147,top10E=0.17,eRank=233.4,q75/q25=64.48 vo_prod:H=0.6845,top10E=0.31,eRank=99.9,q75/q25=14046.35 train_time:290820ms step_avg:72.71ms +[2025-09-02 17:23:42] [Rank 0] PRINT: step:4000/10000 val_loss:4.2641 svd_entropy: attn_qk:H=0.7024,top10E=0.32,eRank=115.7,q75/q25=86.79 attn_vo:H=0.7828,top10E=0.20,eRank=209.0,q75/q25=106.04 mlp_w1:H=0.7075,top10E=0.36,eRank=131.2,q75/q25=14.13 mlp_w2:H=0.8147,top10E=0.17,eRank=233.4,q75/q25=64.48 vo_prod:H=0.6845,top10E=0.31,eRank=99.9,q75/q25=14046.35 train_time:290820ms step_avg:72.71ms +[2025-09-02 17:23:42] [Rank 0] step:4001/10000 train_time:290831ms step_avg:72.69ms +[2025-09-02 17:23:42] [Rank 0] step:4001/10000 train_time:290831ms step_avg:72.69ms +[2025-09-02 17:23:44] [Rank 0] step:4021/10000 train_time:292212ms step_avg:72.67ms +[2025-09-02 17:23:44] [Rank 0] step:4021/10000 train_time:292212ms step_avg:72.67ms +[2025-09-02 17:23:45] [Rank 0] step:4041/10000 train_time:293731ms step_avg:72.69ms +[2025-09-02 17:23:45] [Rank 0] step:4041/10000 train_time:293731ms step_avg:72.69ms +[2025-09-02 17:23:47] [Rank 0] step:4061/10000 train_time:295250ms step_avg:72.70ms +[2025-09-02 17:23:47] [Rank 0] step:4061/10000 train_time:295250ms step_avg:72.70ms +[2025-09-02 17:23:49] [Rank 0] step:4081/10000 train_time:296876ms step_avg:72.75ms +[2025-09-02 17:23:49] [Rank 0] step:4081/10000 train_time:296876ms step_avg:72.75ms +[2025-09-02 17:23:50] [Rank 0] step:4101/10000 train_time:298396ms step_avg:72.76ms +[2025-09-02 17:23:50] [Rank 0] step:4101/10000 train_time:298396ms step_avg:72.76ms +[2025-09-02 17:23:52] [Rank 0] step:4121/10000 train_time:299917ms step_avg:72.78ms +[2025-09-02 17:23:52] [Rank 0] step:4121/10000 train_time:299917ms step_avg:72.78ms +[2025-09-02 17:23:53] [Rank 0] step:4141/10000 train_time:301438ms step_avg:72.79ms +[2025-09-02 17:23:53] [Rank 0] step:4141/10000 train_time:301438ms step_avg:72.79ms +[2025-09-02 17:23:55] [Rank 0] step:4161/10000 train_time:302958ms step_avg:72.81ms +[2025-09-02 17:23:55] [Rank 0] step:4161/10000 train_time:302958ms step_avg:72.81ms +[2025-09-02 17:23:56] [Rank 0] step:4181/10000 train_time:304482ms step_avg:72.83ms +[2025-09-02 17:23:56] [Rank 0] step:4181/10000 train_time:304482ms step_avg:72.83ms +[2025-09-02 17:23:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:23:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:24:09] [Rank 0] PRINT: step:4200/10000 val_loss:4.2454 svd_entropy: attn_qk:H=0.7060,top10E=0.32,eRank=118.1,q75/q25=89.91 attn_vo:H=0.7865,top10E=0.20,eRank=213.4,q75/q25=104.18 mlp_w1:H=0.7118,top10E=0.36,eRank=134.4,q75/q25=14.79 mlp_w2:H=0.8172,top10E=0.16,eRank=237.4,q75/q25=66.68 vo_prod:H=0.6890,top10E=0.31,eRank=102.9,q75/q25=13891.95 train_time:306155ms step_avg:72.89ms +[2025-09-02 17:24:09] [Rank 0] PRINT: step:4200/10000 val_loss:4.2454 svd_entropy: attn_qk:H=0.7060,top10E=0.32,eRank=118.1,q75/q25=89.91 attn_vo:H=0.7865,top10E=0.20,eRank=213.4,q75/q25=104.18 mlp_w1:H=0.7118,top10E=0.36,eRank=134.4,q75/q25=14.79 mlp_w2:H=0.8172,top10E=0.16,eRank=237.4,q75/q25=66.68 vo_prod:H=0.6890,top10E=0.31,eRank=102.9,q75/q25=13891.95 train_time:306155ms step_avg:72.89ms +[2025-09-02 17:24:09] [Rank 0] step:4201/10000 train_time:306166ms step_avg:72.88ms +[2025-09-02 17:24:09] [Rank 0] step:4201/10000 train_time:306166ms step_avg:72.88ms +[2025-09-02 17:24:11] [Rank 0] step:4221/10000 train_time:307546ms step_avg:72.86ms +[2025-09-02 17:24:11] [Rank 0] step:4221/10000 train_time:307546ms step_avg:72.86ms +[2025-09-02 17:24:12] [Rank 0] step:4241/10000 train_time:309067ms step_avg:72.88ms +[2025-09-02 17:24:12] [Rank 0] step:4241/10000 train_time:309067ms step_avg:72.88ms +[2025-09-02 17:24:14] [Rank 0] step:4261/10000 train_time:310588ms step_avg:72.89ms +[2025-09-02 17:24:14] [Rank 0] step:4261/10000 train_time:310588ms step_avg:72.89ms +[2025-09-02 17:24:15] [Rank 0] step:4281/10000 train_time:312108ms step_avg:72.91ms +[2025-09-02 17:24:15] [Rank 0] step:4281/10000 train_time:312108ms step_avg:72.91ms +[2025-09-02 17:24:17] [Rank 0] step:4301/10000 train_time:313633ms step_avg:72.92ms +[2025-09-02 17:24:17] [Rank 0] step:4301/10000 train_time:313633ms step_avg:72.92ms +[2025-09-02 17:24:19] [Rank 0] step:4321/10000 train_time:315157ms step_avg:72.94ms +[2025-09-02 17:24:19] [Rank 0] step:4321/10000 train_time:315157ms step_avg:72.94ms +[2025-09-02 17:24:20] [Rank 0] step:4341/10000 train_time:316678ms step_avg:72.95ms +[2025-09-02 17:24:20] [Rank 0] step:4341/10000 train_time:316678ms step_avg:72.95ms +[2025-09-02 17:24:22] [Rank 0] step:4361/10000 train_time:318202ms step_avg:72.97ms +[2025-09-02 17:24:22] [Rank 0] step:4361/10000 train_time:318202ms step_avg:72.97ms +[2025-09-02 17:24:23] [Rank 0] step:4381/10000 train_time:319723ms step_avg:72.98ms +[2025-09-02 17:24:23] [Rank 0] step:4381/10000 train_time:319723ms step_avg:72.98ms +[2025-09-02 17:24:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:24:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:24:36] [Rank 0] PRINT: step:4400/10000 val_loss:4.2208 svd_entropy: attn_qk:H=0.7093,top10E=0.31,eRank=120.3,q75/q25=93.25 attn_vo:H=0.7899,top10E=0.19,eRank=217.6,q75/q25=102.70 mlp_w1:H=0.7160,top10E=0.35,eRank=137.7,q75/q25=15.51 mlp_w2:H=0.8193,top10E=0.16,eRank=241.2,q75/q25=69.06 vo_prod:H=0.6929,top10E=0.30,eRank=105.7,q75/q25=13526.89 train_time:321400ms step_avg:73.05ms +[2025-09-02 17:24:36] [Rank 0] PRINT: step:4400/10000 val_loss:4.2208 svd_entropy: attn_qk:H=0.7093,top10E=0.31,eRank=120.3,q75/q25=93.25 attn_vo:H=0.7899,top10E=0.19,eRank=217.6,q75/q25=102.70 mlp_w1:H=0.7160,top10E=0.35,eRank=137.7,q75/q25=15.51 mlp_w2:H=0.8193,top10E=0.16,eRank=241.2,q75/q25=69.06 vo_prod:H=0.6929,top10E=0.30,eRank=105.7,q75/q25=13526.89 train_time:321400ms step_avg:73.05ms +[2025-09-02 17:24:36] [Rank 0] step:4401/10000 train_time:321411ms step_avg:73.03ms +[2025-09-02 17:24:36] [Rank 0] step:4401/10000 train_time:321411ms step_avg:73.03ms +[2025-09-02 17:24:38] [Rank 0] step:4421/10000 train_time:322802ms step_avg:73.02ms +[2025-09-02 17:24:38] [Rank 0] step:4421/10000 train_time:322802ms step_avg:73.02ms +[2025-09-02 17:24:39] [Rank 0] step:4441/10000 train_time:324320ms step_avg:73.03ms +[2025-09-02 17:24:39] [Rank 0] step:4441/10000 train_time:324320ms step_avg:73.03ms +[2025-09-02 17:24:41] [Rank 0] step:4461/10000 train_time:325845ms step_avg:73.04ms +[2025-09-02 17:24:41] [Rank 0] step:4461/10000 train_time:325845ms step_avg:73.04ms +[2025-09-02 17:24:42] [Rank 0] step:4481/10000 train_time:327371ms step_avg:73.06ms +[2025-09-02 17:24:42] [Rank 0] step:4481/10000 train_time:327371ms step_avg:73.06ms +[2025-09-02 17:24:44] [Rank 0] step:4501/10000 train_time:328896ms step_avg:73.07ms +[2025-09-02 17:24:44] [Rank 0] step:4501/10000 train_time:328896ms step_avg:73.07ms +[2025-09-02 17:24:46] [Rank 0] step:4521/10000 train_time:330420ms step_avg:73.09ms +[2025-09-02 17:24:46] [Rank 0] step:4521/10000 train_time:330420ms step_avg:73.09ms +[2025-09-02 17:24:47] [Rank 0] step:4541/10000 train_time:331948ms step_avg:73.10ms +[2025-09-02 17:24:47] [Rank 0] step:4541/10000 train_time:331948ms step_avg:73.10ms +[2025-09-02 17:24:49] [Rank 0] step:4561/10000 train_time:333477ms step_avg:73.11ms +[2025-09-02 17:24:49] [Rank 0] step:4561/10000 train_time:333477ms step_avg:73.11ms +[2025-09-02 17:24:50] [Rank 0] step:4581/10000 train_time:335005ms step_avg:73.13ms +[2025-09-02 17:24:50] [Rank 0] step:4581/10000 train_time:335005ms step_avg:73.13ms +[2025-09-02 17:24:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:24:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:25:03] [Rank 0] PRINT: step:4600/10000 val_loss:4.1894 svd_entropy: attn_qk:H=0.7126,top10E=0.31,eRank=122.6,q75/q25=95.10 attn_vo:H=0.7934,top10E=0.19,eRank=221.8,q75/q25=100.93 mlp_w1:H=0.7201,top10E=0.34,eRank=140.9,q75/q25=16.18 mlp_w2:H=0.8215,top10E=0.16,eRank=245.1,q75/q25=69.59 vo_prod:H=0.6971,top10E=0.30,eRank=108.8,q75/q25=13079.32 train_time:336686ms step_avg:73.19ms +[2025-09-02 17:25:03] [Rank 0] PRINT: step:4600/10000 val_loss:4.1894 svd_entropy: attn_qk:H=0.7126,top10E=0.31,eRank=122.6,q75/q25=95.10 attn_vo:H=0.7934,top10E=0.19,eRank=221.8,q75/q25=100.93 mlp_w1:H=0.7201,top10E=0.34,eRank=140.9,q75/q25=16.18 mlp_w2:H=0.8215,top10E=0.16,eRank=245.1,q75/q25=69.59 vo_prod:H=0.6971,top10E=0.30,eRank=108.8,q75/q25=13079.32 train_time:336686ms step_avg:73.19ms +[2025-09-02 17:25:03] [Rank 0] step:4601/10000 train_time:336698ms step_avg:73.18ms +[2025-09-02 17:25:03] [Rank 0] step:4601/10000 train_time:336698ms step_avg:73.18ms +[2025-09-02 17:25:05] [Rank 0] step:4621/10000 train_time:338073ms step_avg:73.16ms +[2025-09-02 17:25:05] [Rank 0] step:4621/10000 train_time:338073ms step_avg:73.16ms +[2025-09-02 17:25:06] [Rank 0] step:4641/10000 train_time:339598ms step_avg:73.17ms +[2025-09-02 17:25:06] [Rank 0] step:4641/10000 train_time:339598ms step_avg:73.17ms +[2025-09-02 17:25:08] [Rank 0] step:4661/10000 train_time:341122ms step_avg:73.19ms +[2025-09-02 17:25:08] [Rank 0] step:4661/10000 train_time:341122ms step_avg:73.19ms +[2025-09-02 17:25:09] [Rank 0] step:4681/10000 train_time:342647ms step_avg:73.20ms +[2025-09-02 17:25:09] [Rank 0] step:4681/10000 train_time:342647ms step_avg:73.20ms +[2025-09-02 17:25:11] [Rank 0] step:4701/10000 train_time:344174ms step_avg:73.21ms +[2025-09-02 17:25:11] [Rank 0] step:4701/10000 train_time:344174ms step_avg:73.21ms +[2025-09-02 17:25:12] [Rank 0] step:4721/10000 train_time:345701ms step_avg:73.23ms +[2025-09-02 17:25:12] [Rank 0] step:4721/10000 train_time:345701ms step_avg:73.23ms +[2025-09-02 17:25:14] [Rank 0] step:4741/10000 train_time:347228ms step_avg:73.24ms +[2025-09-02 17:25:14] [Rank 0] step:4741/10000 train_time:347228ms step_avg:73.24ms +[2025-09-02 17:25:16] [Rank 0] step:4761/10000 train_time:348756ms step_avg:73.25ms +[2025-09-02 17:25:16] [Rank 0] step:4761/10000 train_time:348756ms step_avg:73.25ms +[2025-09-02 17:25:17] [Rank 0] step:4781/10000 train_time:350282ms step_avg:73.27ms +[2025-09-02 17:25:17] [Rank 0] step:4781/10000 train_time:350282ms step_avg:73.27ms +[2025-09-02 17:25:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:25:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:25:30] [Rank 0] PRINT: step:4800/10000 val_loss:4.1774 svd_entropy: attn_qk:H=0.7157,top10E=0.30,eRank=124.8,q75/q25=96.94 attn_vo:H=0.7965,top10E=0.19,eRank=225.7,q75/q25=99.48 mlp_w1:H=0.7236,top10E=0.34,eRank=143.9,q75/q25=16.80 mlp_w2:H=0.8235,top10E=0.15,eRank=248.5,q75/q25=71.97 vo_prod:H=0.7010,top10E=0.29,eRank=111.7,q75/q25=12594.19 train_time:351965ms step_avg:73.33ms +[2025-09-02 17:25:30] [Rank 0] PRINT: step:4800/10000 val_loss:4.1774 svd_entropy: attn_qk:H=0.7157,top10E=0.30,eRank=124.8,q75/q25=96.94 attn_vo:H=0.7965,top10E=0.19,eRank=225.7,q75/q25=99.48 mlp_w1:H=0.7236,top10E=0.34,eRank=143.9,q75/q25=16.80 mlp_w2:H=0.8235,top10E=0.15,eRank=248.5,q75/q25=71.97 vo_prod:H=0.7010,top10E=0.29,eRank=111.7,q75/q25=12594.19 train_time:351965ms step_avg:73.33ms +[2025-09-02 17:25:30] [Rank 0] step:4801/10000 train_time:351976ms step_avg:73.31ms +[2025-09-02 17:25:30] [Rank 0] step:4801/10000 train_time:351976ms step_avg:73.31ms +[2025-09-02 17:25:32] [Rank 0] step:4821/10000 train_time:353356ms step_avg:73.30ms +[2025-09-02 17:25:32] [Rank 0] step:4821/10000 train_time:353356ms step_avg:73.30ms +[2025-09-02 17:25:33] [Rank 0] step:4841/10000 train_time:354881ms step_avg:73.31ms +[2025-09-02 17:25:33] [Rank 0] step:4841/10000 train_time:354881ms step_avg:73.31ms +[2025-09-02 17:25:35] [Rank 0] step:4861/10000 train_time:356409ms step_avg:73.32ms +[2025-09-02 17:25:35] [Rank 0] step:4861/10000 train_time:356409ms step_avg:73.32ms +[2025-09-02 17:25:36] [Rank 0] step:4881/10000 train_time:357932ms step_avg:73.33ms +[2025-09-02 17:25:36] [Rank 0] step:4881/10000 train_time:357932ms step_avg:73.33ms +[2025-09-02 17:25:38] [Rank 0] step:4901/10000 train_time:359456ms step_avg:73.34ms +[2025-09-02 17:25:38] [Rank 0] step:4901/10000 train_time:359456ms step_avg:73.34ms +[2025-09-02 17:25:40] [Rank 0] step:4921/10000 train_time:360984ms step_avg:73.36ms +[2025-09-02 17:25:40] [Rank 0] step:4921/10000 train_time:360984ms step_avg:73.36ms +[2025-09-02 17:25:41] [Rank 0] step:4941/10000 train_time:362514ms step_avg:73.37ms +[2025-09-02 17:25:41] [Rank 0] step:4941/10000 train_time:362514ms step_avg:73.37ms +[2025-09-02 17:25:43] [Rank 0] step:4961/10000 train_time:364042ms step_avg:73.38ms +[2025-09-02 17:25:43] [Rank 0] step:4961/10000 train_time:364042ms step_avg:73.38ms +[2025-09-02 17:25:44] [Rank 0] step:4981/10000 train_time:365609ms step_avg:73.40ms +[2025-09-02 17:25:44] [Rank 0] step:4981/10000 train_time:365609ms step_avg:73.40ms +[2025-09-02 17:25:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:25:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:25:58] [Rank 0] PRINT: step:5000/10000 val_loss:4.1547 svd_entropy: attn_qk:H=0.7186,top10E=0.30,eRank=127.0,q75/q25=99.01 attn_vo:H=0.7994,top10E=0.18,eRank=229.5,q75/q25=97.41 mlp_w1:H=0.7269,top10E=0.33,eRank=146.7,q75/q25=17.47 mlp_w2:H=0.8253,top10E=0.15,eRank=251.8,q75/q25=73.78 vo_prod:H=0.7044,top10E=0.29,eRank=114.3,q75/q25=11905.78 train_time:367291ms step_avg:73.46ms +[2025-09-02 17:25:58] [Rank 0] PRINT: step:5000/10000 val_loss:4.1547 svd_entropy: attn_qk:H=0.7186,top10E=0.30,eRank=127.0,q75/q25=99.01 attn_vo:H=0.7994,top10E=0.18,eRank=229.5,q75/q25=97.41 mlp_w1:H=0.7269,top10E=0.33,eRank=146.7,q75/q25=17.47 mlp_w2:H=0.8253,top10E=0.15,eRank=251.8,q75/q25=73.78 vo_prod:H=0.7044,top10E=0.29,eRank=114.3,q75/q25=11905.78 train_time:367291ms step_avg:73.46ms +[2025-09-02 17:25:58] [Rank 0] step:5001/10000 train_time:367302ms step_avg:73.45ms +[2025-09-02 17:25:58] [Rank 0] step:5001/10000 train_time:367302ms step_avg:73.45ms +[2025-09-02 17:25:59] [Rank 0] step:5021/10000 train_time:368677ms step_avg:73.43ms +[2025-09-02 17:25:59] [Rank 0] step:5021/10000 train_time:368677ms step_avg:73.43ms +[2025-09-02 17:26:01] [Rank 0] step:5041/10000 train_time:370203ms step_avg:73.44ms +[2025-09-02 17:26:01] [Rank 0] step:5041/10000 train_time:370203ms step_avg:73.44ms +[2025-09-02 17:26:02] [Rank 0] step:5061/10000 train_time:371729ms step_avg:73.45ms +[2025-09-02 17:26:02] [Rank 0] step:5061/10000 train_time:371729ms step_avg:73.45ms +[2025-09-02 17:26:04] [Rank 0] step:5081/10000 train_time:373258ms step_avg:73.46ms +[2025-09-02 17:26:04] [Rank 0] step:5081/10000 train_time:373258ms step_avg:73.46ms +[2025-09-02 17:26:05] [Rank 0] step:5101/10000 train_time:374786ms step_avg:73.47ms +[2025-09-02 17:26:05] [Rank 0] step:5101/10000 train_time:374786ms step_avg:73.47ms +[2025-09-02 17:26:07] [Rank 0] step:5121/10000 train_time:376313ms step_avg:73.48ms +[2025-09-02 17:26:07] [Rank 0] step:5121/10000 train_time:376313ms step_avg:73.48ms +[2025-09-02 17:26:08] [Rank 0] step:5141/10000 train_time:377841ms step_avg:73.50ms +[2025-09-02 17:26:08] [Rank 0] step:5141/10000 train_time:377841ms step_avg:73.50ms +[2025-09-02 17:26:10] [Rank 0] step:5161/10000 train_time:379367ms step_avg:73.51ms +[2025-09-02 17:26:10] [Rank 0] step:5161/10000 train_time:379367ms step_avg:73.51ms +[2025-09-02 17:26:11] [Rank 0] step:5181/10000 train_time:380896ms step_avg:73.52ms +[2025-09-02 17:26:11] [Rank 0] step:5181/10000 train_time:380896ms step_avg:73.52ms +[2025-09-02 17:26:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:26:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:26:25] [Rank 0] PRINT: step:5200/10000 val_loss:4.1331 svd_entropy: attn_qk:H=0.7214,top10E=0.29,eRank=129.0,q75/q25=100.32 attn_vo:H=0.8022,top10E=0.18,eRank=233.1,q75/q25=95.15 mlp_w1:H=0.7302,top10E=0.33,eRank=149.5,q75/q25=18.20 mlp_w2:H=0.8270,top10E=0.15,eRank=254.9,q75/q25=74.78 vo_prod:H=0.7077,top10E=0.28,eRank=116.8,q75/q25=11185.64 train_time:382604ms step_avg:73.58ms +[2025-09-02 17:26:25] [Rank 0] PRINT: step:5200/10000 val_loss:4.1331 svd_entropy: attn_qk:H=0.7214,top10E=0.29,eRank=129.0,q75/q25=100.32 attn_vo:H=0.8022,top10E=0.18,eRank=233.1,q75/q25=95.15 mlp_w1:H=0.7302,top10E=0.33,eRank=149.5,q75/q25=18.20 mlp_w2:H=0.8270,top10E=0.15,eRank=254.9,q75/q25=74.78 vo_prod:H=0.7077,top10E=0.28,eRank=116.8,q75/q25=11185.64 train_time:382604ms step_avg:73.58ms +[2025-09-02 17:26:25] [Rank 0] step:5201/10000 train_time:382616ms step_avg:73.57ms +[2025-09-02 17:26:25] [Rank 0] step:5201/10000 train_time:382616ms step_avg:73.57ms +[2025-09-02 17:26:26] [Rank 0] step:5221/10000 train_time:384015ms step_avg:73.55ms +[2025-09-02 17:26:26] [Rank 0] step:5221/10000 train_time:384015ms step_avg:73.55ms +[2025-09-02 17:26:28] [Rank 0] step:5241/10000 train_time:385571ms step_avg:73.57ms +[2025-09-02 17:26:28] [Rank 0] step:5241/10000 train_time:385571ms step_avg:73.57ms +[2025-09-02 17:26:30] [Rank 0] step:5261/10000 train_time:387128ms step_avg:73.58ms +[2025-09-02 17:26:30] [Rank 0] step:5261/10000 train_time:387128ms step_avg:73.58ms +[2025-09-02 17:26:31] [Rank 0] step:5281/10000 train_time:388689ms step_avg:73.60ms +[2025-09-02 17:26:31] [Rank 0] step:5281/10000 train_time:388689ms step_avg:73.60ms +[2025-09-02 17:26:33] [Rank 0] step:5301/10000 train_time:390258ms step_avg:73.62ms +[2025-09-02 17:26:33] [Rank 0] step:5301/10000 train_time:390258ms step_avg:73.62ms +[2025-09-02 17:26:34] [Rank 0] step:5321/10000 train_time:391818ms step_avg:73.64ms +[2025-09-02 17:26:34] [Rank 0] step:5321/10000 train_time:391818ms step_avg:73.64ms +[2025-09-02 17:26:36] [Rank 0] step:5341/10000 train_time:393376ms step_avg:73.65ms +[2025-09-02 17:26:36] [Rank 0] step:5341/10000 train_time:393376ms step_avg:73.65ms +[2025-09-02 17:26:37] [Rank 0] step:5361/10000 train_time:394941ms step_avg:73.67ms +[2025-09-02 17:26:37] [Rank 0] step:5361/10000 train_time:394941ms step_avg:73.67ms +[2025-09-02 17:26:39] [Rank 0] step:5381/10000 train_time:396502ms step_avg:73.69ms +[2025-09-02 17:26:39] [Rank 0] step:5381/10000 train_time:396502ms step_avg:73.69ms +[2025-09-02 17:26:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:26:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:26:52] [Rank 0] PRINT: step:5400/10000 val_loss:4.1159 svd_entropy: attn_qk:H=0.7239,top10E=0.29,eRank=131.0,q75/q25=101.45 attn_vo:H=0.8048,top10E=0.18,eRank=236.6,q75/q25=92.06 mlp_w1:H=0.7335,top10E=0.32,eRank=152.4,q75/q25=18.75 mlp_w2:H=0.8288,top10E=0.15,eRank=258.1,q75/q25=75.35 vo_prod:H=0.7109,top10E=0.28,eRank=119.4,q75/q25=10614.04 train_time:398217ms step_avg:73.74ms +[2025-09-02 17:26:52] [Rank 0] PRINT: step:5400/10000 val_loss:4.1159 svd_entropy: attn_qk:H=0.7239,top10E=0.29,eRank=131.0,q75/q25=101.45 attn_vo:H=0.8048,top10E=0.18,eRank=236.6,q75/q25=92.06 mlp_w1:H=0.7335,top10E=0.32,eRank=152.4,q75/q25=18.75 mlp_w2:H=0.8288,top10E=0.15,eRank=258.1,q75/q25=75.35 vo_prod:H=0.7109,top10E=0.28,eRank=119.4,q75/q25=10614.04 train_time:398217ms step_avg:73.74ms +[2025-09-02 17:26:52] [Rank 0] step:5401/10000 train_time:398229ms step_avg:73.73ms +[2025-09-02 17:26:52] [Rank 0] step:5401/10000 train_time:398229ms step_avg:73.73ms +[2025-09-02 17:26:54] [Rank 0] step:5421/10000 train_time:399644ms step_avg:73.72ms +[2025-09-02 17:26:54] [Rank 0] step:5421/10000 train_time:399644ms step_avg:73.72ms +[2025-09-02 17:26:56] [Rank 0] step:5441/10000 train_time:401197ms step_avg:73.74ms +[2025-09-02 17:26:56] [Rank 0] step:5441/10000 train_time:401197ms step_avg:73.74ms +[2025-09-02 17:26:57] [Rank 0] step:5461/10000 train_time:402759ms step_avg:73.75ms +[2025-09-02 17:26:57] [Rank 0] step:5461/10000 train_time:402759ms step_avg:73.75ms +[2025-09-02 17:26:59] [Rank 0] step:5481/10000 train_time:404323ms step_avg:73.77ms +[2025-09-02 17:26:59] [Rank 0] step:5481/10000 train_time:404323ms step_avg:73.77ms +[2025-09-02 17:27:00] [Rank 0] step:5501/10000 train_time:405887ms step_avg:73.78ms +[2025-09-02 17:27:00] [Rank 0] step:5501/10000 train_time:405887ms step_avg:73.78ms +[2025-09-02 17:27:02] [Rank 0] step:5521/10000 train_time:407452ms step_avg:73.80ms +[2025-09-02 17:27:02] [Rank 0] step:5521/10000 train_time:407452ms step_avg:73.80ms +[2025-09-02 17:27:03] [Rank 0] step:5541/10000 train_time:409012ms step_avg:73.82ms +[2025-09-02 17:27:03] [Rank 0] step:5541/10000 train_time:409012ms step_avg:73.82ms +[2025-09-02 17:27:05] [Rank 0] step:5561/10000 train_time:410573ms step_avg:73.83ms +[2025-09-02 17:27:05] [Rank 0] step:5561/10000 train_time:410573ms step_avg:73.83ms +[2025-09-02 17:27:06] [Rank 0] step:5581/10000 train_time:412132ms step_avg:73.85ms +[2025-09-02 17:27:06] [Rank 0] step:5581/10000 train_time:412132ms step_avg:73.85ms +[2025-09-02 17:27:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:27:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:27:20] [Rank 0] PRINT: step:5600/10000 val_loss:4.1008 svd_entropy: attn_qk:H=0.7265,top10E=0.29,eRank=133.0,q75/q25=103.06 attn_vo:H=0.8072,top10E=0.17,eRank=239.8,q75/q25=90.63 mlp_w1:H=0.7365,top10E=0.32,eRank=155.1,q75/q25=19.37 mlp_w2:H=0.8303,top10E=0.15,eRank=261.0,q75/q25=75.99 vo_prod:H=0.7138,top10E=0.27,eRank=121.8,q75/q25=10082.95 train_time:413859ms step_avg:73.90ms +[2025-09-02 17:27:20] [Rank 0] PRINT: step:5600/10000 val_loss:4.1008 svd_entropy: attn_qk:H=0.7265,top10E=0.29,eRank=133.0,q75/q25=103.06 attn_vo:H=0.8072,top10E=0.17,eRank=239.8,q75/q25=90.63 mlp_w1:H=0.7365,top10E=0.32,eRank=155.1,q75/q25=19.37 mlp_w2:H=0.8303,top10E=0.15,eRank=261.0,q75/q25=75.99 vo_prod:H=0.7138,top10E=0.27,eRank=121.8,q75/q25=10082.95 train_time:413859ms step_avg:73.90ms +[2025-09-02 17:27:20] [Rank 0] step:5601/10000 train_time:413871ms step_avg:73.89ms +[2025-09-02 17:27:20] [Rank 0] step:5601/10000 train_time:413871ms step_avg:73.89ms +[2025-09-02 17:27:21] [Rank 0] step:5621/10000 train_time:415286ms step_avg:73.88ms +[2025-09-02 17:27:21] [Rank 0] step:5621/10000 train_time:415286ms step_avg:73.88ms +[2025-09-02 17:27:23] [Rank 0] step:5641/10000 train_time:416844ms step_avg:73.90ms +[2025-09-02 17:27:23] [Rank 0] step:5641/10000 train_time:416844ms step_avg:73.90ms +[2025-09-02 17:27:25] [Rank 0] step:5661/10000 train_time:418398ms step_avg:73.91ms +[2025-09-02 17:27:25] [Rank 0] step:5661/10000 train_time:418398ms step_avg:73.91ms +[2025-09-02 17:27:26] [Rank 0] step:5681/10000 train_time:419960ms step_avg:73.92ms +[2025-09-02 17:27:26] [Rank 0] step:5681/10000 train_time:419960ms step_avg:73.92ms +[2025-09-02 17:27:28] [Rank 0] step:5701/10000 train_time:421516ms step_avg:73.94ms +[2025-09-02 17:27:28] [Rank 0] step:5701/10000 train_time:421516ms step_avg:73.94ms +[2025-09-02 17:27:29] [Rank 0] step:5721/10000 train_time:423077ms step_avg:73.95ms +[2025-09-02 17:27:29] [Rank 0] step:5721/10000 train_time:423077ms step_avg:73.95ms +[2025-09-02 17:27:31] [Rank 0] step:5741/10000 train_time:424636ms step_avg:73.97ms +[2025-09-02 17:27:31] [Rank 0] step:5741/10000 train_time:424636ms step_avg:73.97ms +[2025-09-02 17:27:32] [Rank 0] step:5761/10000 train_time:426198ms step_avg:73.98ms +[2025-09-02 17:27:32] [Rank 0] step:5761/10000 train_time:426198ms step_avg:73.98ms +[2025-09-02 17:27:34] [Rank 0] step:5781/10000 train_time:427760ms step_avg:73.99ms +[2025-09-02 17:27:34] [Rank 0] step:5781/10000 train_time:427760ms step_avg:73.99ms +[2025-09-02 17:27:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:27:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:27:47] [Rank 0] PRINT: step:5800/10000 val_loss:4.0920 svd_entropy: attn_qk:H=0.7288,top10E=0.28,eRank=134.8,q75/q25=104.00 attn_vo:H=0.8095,top10E=0.17,eRank=243.0,q75/q25=87.90 mlp_w1:H=0.7393,top10E=0.32,eRank=157.6,q75/q25=20.10 mlp_w2:H=0.8318,top10E=0.14,eRank=263.8,q75/q25=77.24 vo_prod:H=0.7166,top10E=0.27,eRank=124.1,q75/q25=9528.56 train_time:429480ms step_avg:74.05ms +[2025-09-02 17:27:47] [Rank 0] PRINT: step:5800/10000 val_loss:4.0920 svd_entropy: attn_qk:H=0.7288,top10E=0.28,eRank=134.8,q75/q25=104.00 attn_vo:H=0.8095,top10E=0.17,eRank=243.0,q75/q25=87.90 mlp_w1:H=0.7393,top10E=0.32,eRank=157.6,q75/q25=20.10 mlp_w2:H=0.8318,top10E=0.14,eRank=263.8,q75/q25=77.24 vo_prod:H=0.7166,top10E=0.27,eRank=124.1,q75/q25=9528.56 train_time:429480ms step_avg:74.05ms +[2025-09-02 17:27:47] [Rank 0] step:5801/10000 train_time:429491ms step_avg:74.04ms +[2025-09-02 17:27:47] [Rank 0] step:5801/10000 train_time:429491ms step_avg:74.04ms +[2025-09-02 17:27:49] [Rank 0] step:5821/10000 train_time:430914ms step_avg:74.03ms +[2025-09-02 17:27:49] [Rank 0] step:5821/10000 train_time:430914ms step_avg:74.03ms +[2025-09-02 17:27:50] [Rank 0] step:5841/10000 train_time:432504ms step_avg:74.05ms +[2025-09-02 17:27:50] [Rank 0] step:5841/10000 train_time:432504ms step_avg:74.05ms +[2025-09-02 17:27:52] [Rank 0] step:5861/10000 train_time:434069ms step_avg:74.06ms +[2025-09-02 17:27:52] [Rank 0] step:5861/10000 train_time:434069ms step_avg:74.06ms +[2025-09-02 17:27:54] [Rank 0] step:5881/10000 train_time:435630ms step_avg:74.07ms +[2025-09-02 17:27:54] [Rank 0] step:5881/10000 train_time:435630ms step_avg:74.07ms +[2025-09-02 17:27:55] [Rank 0] step:5901/10000 train_time:437189ms step_avg:74.09ms +[2025-09-02 17:27:55] [Rank 0] step:5901/10000 train_time:437189ms step_avg:74.09ms +[2025-09-02 17:27:57] [Rank 0] step:5921/10000 train_time:438750ms step_avg:74.10ms +[2025-09-02 17:27:57] [Rank 0] step:5921/10000 train_time:438750ms step_avg:74.10ms +[2025-09-02 17:27:58] [Rank 0] step:5941/10000 train_time:440313ms step_avg:74.11ms +[2025-09-02 17:27:58] [Rank 0] step:5941/10000 train_time:440313ms step_avg:74.11ms +[2025-09-02 17:28:00] [Rank 0] step:5961/10000 train_time:441878ms step_avg:74.13ms +[2025-09-02 17:28:00] [Rank 0] step:5961/10000 train_time:441878ms step_avg:74.13ms +[2025-09-02 17:28:01] [Rank 0] step:5981/10000 train_time:443442ms step_avg:74.14ms +[2025-09-02 17:28:01] [Rank 0] step:5981/10000 train_time:443442ms step_avg:74.14ms +[2025-09-02 17:28:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:28:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:28:14] [Rank 0] PRINT: step:6000/10000 val_loss:4.0654 svd_entropy: attn_qk:H=0.7312,top10E=0.28,eRank=136.7,q75/q25=105.29 attn_vo:H=0.8117,top10E=0.17,eRank=246.1,q75/q25=85.88 mlp_w1:H=0.7420,top10E=0.31,eRank=160.2,q75/q25=20.68 mlp_w2:H=0.8333,top10E=0.14,eRank=266.5,q75/q25=76.24 vo_prod:H=0.7195,top10E=0.27,eRank=126.5,q75/q25=8968.27 train_time:445158ms step_avg:74.19ms +[2025-09-02 17:28:14] [Rank 0] PRINT: step:6000/10000 val_loss:4.0654 svd_entropy: attn_qk:H=0.7312,top10E=0.28,eRank=136.7,q75/q25=105.29 attn_vo:H=0.8117,top10E=0.17,eRank=246.1,q75/q25=85.88 mlp_w1:H=0.7420,top10E=0.31,eRank=160.2,q75/q25=20.68 mlp_w2:H=0.8333,top10E=0.14,eRank=266.5,q75/q25=76.24 vo_prod:H=0.7195,top10E=0.27,eRank=126.5,q75/q25=8968.27 train_time:445158ms step_avg:74.19ms +[2025-09-02 17:28:15] [Rank 0] step:6001/10000 train_time:445170ms step_avg:74.18ms +[2025-09-02 17:28:15] [Rank 0] step:6001/10000 train_time:445170ms step_avg:74.18ms +[2025-09-02 17:28:16] [Rank 0] step:6021/10000 train_time:446585ms step_avg:74.17ms +[2025-09-02 17:28:16] [Rank 0] step:6021/10000 train_time:446585ms step_avg:74.17ms +[2025-09-02 17:28:18] [Rank 0] step:6041/10000 train_time:448145ms step_avg:74.18ms +[2025-09-02 17:28:18] [Rank 0] step:6041/10000 train_time:448145ms step_avg:74.18ms +[2025-09-02 17:28:19] [Rank 0] step:6061/10000 train_time:449715ms step_avg:74.20ms +[2025-09-02 17:28:19] [Rank 0] step:6061/10000 train_time:449715ms step_avg:74.20ms +[2025-09-02 17:28:21] [Rank 0] step:6081/10000 train_time:451281ms step_avg:74.21ms +[2025-09-02 17:28:21] [Rank 0] step:6081/10000 train_time:451281ms step_avg:74.21ms +[2025-09-02 17:28:22] [Rank 0] step:6101/10000 train_time:452847ms step_avg:74.23ms +[2025-09-02 17:28:22] [Rank 0] step:6101/10000 train_time:452847ms step_avg:74.23ms +[2025-09-02 17:28:24] [Rank 0] step:6121/10000 train_time:454686ms step_avg:74.28ms +[2025-09-02 17:28:24] [Rank 0] step:6121/10000 train_time:454686ms step_avg:74.28ms +[2025-09-02 17:28:26] [Rank 0] step:6141/10000 train_time:456260ms step_avg:74.30ms +[2025-09-02 17:28:26] [Rank 0] step:6141/10000 train_time:456260ms step_avg:74.30ms +[2025-09-02 17:28:27] [Rank 0] step:6161/10000 train_time:457825ms step_avg:74.31ms +[2025-09-02 17:28:27] [Rank 0] step:6161/10000 train_time:457825ms step_avg:74.31ms +[2025-09-02 17:28:29] [Rank 0] step:6181/10000 train_time:459390ms step_avg:74.32ms +[2025-09-02 17:28:29] [Rank 0] step:6181/10000 train_time:459390ms step_avg:74.32ms +[2025-09-02 17:28:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:28:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:28:42] [Rank 0] PRINT: step:6200/10000 val_loss:4.0508 svd_entropy: attn_qk:H=0.7335,top10E=0.28,eRank=138.6,q75/q25=106.46 attn_vo:H=0.8138,top10E=0.17,eRank=249.1,q75/q25=83.75 mlp_w1:H=0.7445,top10E=0.31,eRank=162.5,q75/q25=21.31 mlp_w2:H=0.8347,top10E=0.14,eRank=269.2,q75/q25=75.32 vo_prod:H=0.7221,top10E=0.26,eRank=128.7,q75/q25=8381.61 train_time:461114ms step_avg:74.37ms +[2025-09-02 17:28:42] [Rank 0] PRINT: step:6200/10000 val_loss:4.0508 svd_entropy: attn_qk:H=0.7335,top10E=0.28,eRank=138.6,q75/q25=106.46 attn_vo:H=0.8138,top10E=0.17,eRank=249.1,q75/q25=83.75 mlp_w1:H=0.7445,top10E=0.31,eRank=162.5,q75/q25=21.31 mlp_w2:H=0.8347,top10E=0.14,eRank=269.2,q75/q25=75.32 vo_prod:H=0.7221,top10E=0.26,eRank=128.7,q75/q25=8381.61 train_time:461114ms step_avg:74.37ms +[2025-09-02 17:28:42] [Rank 0] step:6201/10000 train_time:461126ms step_avg:74.36ms +[2025-09-02 17:28:42] [Rank 0] step:6201/10000 train_time:461126ms step_avg:74.36ms +[2025-09-02 17:28:44] [Rank 0] step:6221/10000 train_time:462543ms step_avg:74.35ms +[2025-09-02 17:28:44] [Rank 0] step:6221/10000 train_time:462543ms step_avg:74.35ms +[2025-09-02 17:28:45] [Rank 0] step:6241/10000 train_time:464104ms step_avg:74.36ms +[2025-09-02 17:28:45] [Rank 0] step:6241/10000 train_time:464104ms step_avg:74.36ms +[2025-09-02 17:28:47] [Rank 0] step:6261/10000 train_time:465667ms step_avg:74.38ms +[2025-09-02 17:28:47] [Rank 0] step:6261/10000 train_time:465667ms step_avg:74.38ms +[2025-09-02 17:28:48] [Rank 0] step:6281/10000 train_time:467237ms step_avg:74.39ms +[2025-09-02 17:28:48] [Rank 0] step:6281/10000 train_time:467237ms step_avg:74.39ms +[2025-09-02 17:28:50] [Rank 0] step:6301/10000 train_time:468804ms step_avg:74.40ms +[2025-09-02 17:28:50] [Rank 0] step:6301/10000 train_time:468804ms step_avg:74.40ms +[2025-09-02 17:28:52] [Rank 0] step:6321/10000 train_time:470391ms step_avg:74.42ms +[2025-09-02 17:28:52] [Rank 0] step:6321/10000 train_time:470391ms step_avg:74.42ms +[2025-09-02 17:28:53] [Rank 0] step:6341/10000 train_time:471959ms step_avg:74.43ms +[2025-09-02 17:28:53] [Rank 0] step:6341/10000 train_time:471959ms step_avg:74.43ms +[2025-09-02 17:28:55] [Rank 0] step:6361/10000 train_time:473532ms step_avg:74.44ms +[2025-09-02 17:28:55] [Rank 0] step:6361/10000 train_time:473532ms step_avg:74.44ms +[2025-09-02 17:28:56] [Rank 0] step:6381/10000 train_time:475103ms step_avg:74.46ms +[2025-09-02 17:28:56] [Rank 0] step:6381/10000 train_time:475103ms step_avg:74.46ms +[2025-09-02 17:28:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:28:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:29:10] [Rank 0] PRINT: step:6400/10000 val_loss:4.0336 svd_entropy: attn_qk:H=0.7354,top10E=0.28,eRank=140.1,q75/q25=107.44 attn_vo:H=0.8157,top10E=0.17,eRank=251.8,q75/q25=81.69 mlp_w1:H=0.7468,top10E=0.30,eRank=164.7,q75/q25=21.82 mlp_w2:H=0.8358,top10E=0.14,eRank=271.3,q75/q25=75.98 vo_prod:H=0.7246,top10E=0.26,eRank=130.8,q75/q25=7721.86 train_time:476827ms step_avg:74.50ms +[2025-09-02 17:29:10] [Rank 0] PRINT: step:6400/10000 val_loss:4.0336 svd_entropy: attn_qk:H=0.7354,top10E=0.28,eRank=140.1,q75/q25=107.44 attn_vo:H=0.8157,top10E=0.17,eRank=251.8,q75/q25=81.69 mlp_w1:H=0.7468,top10E=0.30,eRank=164.7,q75/q25=21.82 mlp_w2:H=0.8358,top10E=0.14,eRank=271.3,q75/q25=75.98 vo_prod:H=0.7246,top10E=0.26,eRank=130.8,q75/q25=7721.86 train_time:476827ms step_avg:74.50ms +[2025-09-02 17:29:10] [Rank 0] step:6401/10000 train_time:476838ms step_avg:74.49ms +[2025-09-02 17:29:10] [Rank 0] step:6401/10000 train_time:476838ms step_avg:74.49ms +[2025-09-02 17:29:11] [Rank 0] step:6421/10000 train_time:478254ms step_avg:74.48ms +[2025-09-02 17:29:11] [Rank 0] step:6421/10000 train_time:478254ms step_avg:74.48ms +[2025-09-02 17:29:13] [Rank 0] step:6441/10000 train_time:479817ms step_avg:74.49ms +[2025-09-02 17:29:13] [Rank 0] step:6441/10000 train_time:479817ms step_avg:74.49ms +[2025-09-02 17:29:14] [Rank 0] step:6461/10000 train_time:481384ms step_avg:74.51ms +[2025-09-02 17:29:14] [Rank 0] step:6461/10000 train_time:481384ms step_avg:74.51ms +[2025-09-02 17:29:16] [Rank 0] step:6481/10000 train_time:482959ms step_avg:74.52ms +[2025-09-02 17:29:16] [Rank 0] step:6481/10000 train_time:482959ms step_avg:74.52ms +[2025-09-02 17:29:17] [Rank 0] step:6501/10000 train_time:484521ms step_avg:74.53ms +[2025-09-02 17:29:17] [Rank 0] step:6501/10000 train_time:484521ms step_avg:74.53ms +[2025-09-02 17:29:19] [Rank 0] step:6521/10000 train_time:486085ms step_avg:74.54ms +[2025-09-02 17:29:19] [Rank 0] step:6521/10000 train_time:486085ms step_avg:74.54ms +[2025-09-02 17:29:21] [Rank 0] step:6541/10000 train_time:487651ms step_avg:74.55ms +[2025-09-02 17:29:21] [Rank 0] step:6541/10000 train_time:487651ms step_avg:74.55ms +[2025-09-02 17:29:22] [Rank 0] step:6561/10000 train_time:489221ms step_avg:74.56ms +[2025-09-02 17:29:22] [Rank 0] step:6561/10000 train_time:489221ms step_avg:74.56ms +[2025-09-02 17:29:24] [Rank 0] step:6581/10000 train_time:490784ms step_avg:74.58ms +[2025-09-02 17:29:24] [Rank 0] step:6581/10000 train_time:490784ms step_avg:74.58ms +[2025-09-02 17:29:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:29:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:29:37] [Rank 0] PRINT: step:6600/10000 val_loss:4.0216 svd_entropy: attn_qk:H=0.7373,top10E=0.27,eRank=141.8,q75/q25=107.93 attn_vo:H=0.8175,top10E=0.16,eRank=254.3,q75/q25=79.89 mlp_w1:H=0.7490,top10E=0.30,eRank=166.9,q75/q25=22.42 mlp_w2:H=0.8369,top10E=0.14,eRank=273.5,q75/q25=76.21 vo_prod:H=0.7268,top10E=0.26,eRank=132.8,q75/q25=7369.73 train_time:492508ms step_avg:74.62ms +[2025-09-02 17:29:37] [Rank 0] PRINT: step:6600/10000 val_loss:4.0216 svd_entropy: attn_qk:H=0.7373,top10E=0.27,eRank=141.8,q75/q25=107.93 attn_vo:H=0.8175,top10E=0.16,eRank=254.3,q75/q25=79.89 mlp_w1:H=0.7490,top10E=0.30,eRank=166.9,q75/q25=22.42 mlp_w2:H=0.8369,top10E=0.14,eRank=273.5,q75/q25=76.21 vo_prod:H=0.7268,top10E=0.26,eRank=132.8,q75/q25=7369.73 train_time:492508ms step_avg:74.62ms +[2025-09-02 17:29:37] [Rank 0] step:6601/10000 train_time:492520ms step_avg:74.61ms +[2025-09-02 17:29:37] [Rank 0] step:6601/10000 train_time:492520ms step_avg:74.61ms +[2025-09-02 17:29:39] [Rank 0] step:6621/10000 train_time:493936ms step_avg:74.60ms +[2025-09-02 17:29:39] [Rank 0] step:6621/10000 train_time:493936ms step_avg:74.60ms +[2025-09-02 17:29:40] [Rank 0] step:6641/10000 train_time:495504ms step_avg:74.61ms +[2025-09-02 17:29:40] [Rank 0] step:6641/10000 train_time:495504ms step_avg:74.61ms +[2025-09-02 17:29:42] [Rank 0] step:6661/10000 train_time:497070ms step_avg:74.62ms +[2025-09-02 17:29:42] [Rank 0] step:6661/10000 train_time:497070ms step_avg:74.62ms +[2025-09-02 17:29:43] [Rank 0] step:6681/10000 train_time:498652ms step_avg:74.64ms +[2025-09-02 17:29:43] [Rank 0] step:6681/10000 train_time:498652ms step_avg:74.64ms +[2025-09-02 17:29:45] [Rank 0] step:6701/10000 train_time:500252ms step_avg:74.65ms +[2025-09-02 17:29:45] [Rank 0] step:6701/10000 train_time:500252ms step_avg:74.65ms +[2025-09-02 17:29:47] [Rank 0] step:6721/10000 train_time:501846ms step_avg:74.67ms +[2025-09-02 17:29:47] [Rank 0] step:6721/10000 train_time:501846ms step_avg:74.67ms +[2025-09-02 17:29:48] [Rank 0] step:6741/10000 train_time:503437ms step_avg:74.68ms +[2025-09-02 17:29:48] [Rank 0] step:6741/10000 train_time:503437ms step_avg:74.68ms +[2025-09-02 17:29:50] [Rank 0] step:6761/10000 train_time:505029ms step_avg:74.70ms +[2025-09-02 17:29:50] [Rank 0] step:6761/10000 train_time:505029ms step_avg:74.70ms +[2025-09-02 17:29:51] [Rank 0] step:6781/10000 train_time:506627ms step_avg:74.71ms +[2025-09-02 17:29:51] [Rank 0] step:6781/10000 train_time:506627ms step_avg:74.71ms +[2025-09-02 17:29:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:29:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:30:04] [Rank 0] PRINT: step:6800/10000 val_loss:4.0034 svd_entropy: attn_qk:H=0.7389,top10E=0.27,eRank=143.2,q75/q25=108.47 attn_vo:H=0.8191,top10E=0.16,eRank=256.6,q75/q25=78.15 mlp_w1:H=0.7510,top10E=0.30,eRank=168.9,q75/q25=22.89 mlp_w2:H=0.8379,top10E=0.14,eRank=275.5,q75/q25=75.87 vo_prod:H=0.7288,top10E=0.26,eRank=134.6,q75/q25=7224.33 train_time:508484ms step_avg:74.78ms +[2025-09-02 17:30:04] [Rank 0] PRINT: step:6800/10000 val_loss:4.0034 svd_entropy: attn_qk:H=0.7389,top10E=0.27,eRank=143.2,q75/q25=108.47 attn_vo:H=0.8191,top10E=0.16,eRank=256.6,q75/q25=78.15 mlp_w1:H=0.7510,top10E=0.30,eRank=168.9,q75/q25=22.89 mlp_w2:H=0.8379,top10E=0.14,eRank=275.5,q75/q25=75.87 vo_prod:H=0.7288,top10E=0.26,eRank=134.6,q75/q25=7224.33 train_time:508484ms step_avg:74.78ms +[2025-09-02 17:30:04] [Rank 0] step:6801/10000 train_time:508496ms step_avg:74.77ms +[2025-09-02 17:30:04] [Rank 0] step:6801/10000 train_time:508496ms step_avg:74.77ms +[2025-09-02 17:30:06] [Rank 0] step:6821/10000 train_time:509947ms step_avg:74.76ms +[2025-09-02 17:30:06] [Rank 0] step:6821/10000 train_time:509947ms step_avg:74.76ms +[2025-09-02 17:30:08] [Rank 0] step:6841/10000 train_time:511536ms step_avg:74.78ms +[2025-09-02 17:30:08] [Rank 0] step:6841/10000 train_time:511536ms step_avg:74.78ms +[2025-09-02 17:30:09] [Rank 0] step:6861/10000 train_time:513130ms step_avg:74.79ms +[2025-09-02 17:30:09] [Rank 0] step:6861/10000 train_time:513130ms step_avg:74.79ms +[2025-09-02 17:30:11] [Rank 0] step:6881/10000 train_time:514721ms step_avg:74.80ms +[2025-09-02 17:30:11] [Rank 0] step:6881/10000 train_time:514721ms step_avg:74.80ms +[2025-09-02 17:30:12] [Rank 0] step:6901/10000 train_time:516312ms step_avg:74.82ms +[2025-09-02 17:30:12] [Rank 0] step:6901/10000 train_time:516312ms step_avg:74.82ms +[2025-09-02 17:30:14] [Rank 0] step:6921/10000 train_time:517902ms step_avg:74.83ms +[2025-09-02 17:30:14] [Rank 0] step:6921/10000 train_time:517902ms step_avg:74.83ms +[2025-09-02 17:30:16] [Rank 0] step:6941/10000 train_time:519501ms step_avg:74.85ms +[2025-09-02 17:30:16] [Rank 0] step:6941/10000 train_time:519501ms step_avg:74.85ms +[2025-09-02 17:30:17] [Rank 0] step:6961/10000 train_time:521108ms step_avg:74.86ms +[2025-09-02 17:30:17] [Rank 0] step:6961/10000 train_time:521108ms step_avg:74.86ms +[2025-09-02 17:30:19] [Rank 0] step:6981/10000 train_time:522707ms step_avg:74.88ms +[2025-09-02 17:30:19] [Rank 0] step:6981/10000 train_time:522707ms step_avg:74.88ms +[2025-09-02 17:30:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:30:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:30:32] [Rank 0] PRINT: step:7000/10000 val_loss:3.9876 svd_entropy: attn_qk:H=0.7405,top10E=0.27,eRank=144.5,q75/q25=108.45 attn_vo:H=0.8205,top10E=0.16,eRank=258.7,q75/q25=76.32 mlp_w1:H=0.7527,top10E=0.30,eRank=170.7,q75/q25=23.50 mlp_w2:H=0.8390,top10E=0.14,eRank=277.4,q75/q25=75.42 vo_prod:H=0.7307,top10E=0.25,eRank=136.3,q75/q25=6694.12 train_time:524467ms step_avg:74.92ms +[2025-09-02 17:30:32] [Rank 0] PRINT: step:7000/10000 val_loss:3.9876 svd_entropy: attn_qk:H=0.7405,top10E=0.27,eRank=144.5,q75/q25=108.45 attn_vo:H=0.8205,top10E=0.16,eRank=258.7,q75/q25=76.32 mlp_w1:H=0.7527,top10E=0.30,eRank=170.7,q75/q25=23.50 mlp_w2:H=0.8390,top10E=0.14,eRank=277.4,q75/q25=75.42 vo_prod:H=0.7307,top10E=0.25,eRank=136.3,q75/q25=6694.12 train_time:524467ms step_avg:74.92ms +[2025-09-02 17:30:32] [Rank 0] step:7001/10000 train_time:524479ms step_avg:74.91ms +[2025-09-02 17:30:32] [Rank 0] step:7001/10000 train_time:524479ms step_avg:74.91ms +[2025-09-02 17:30:34] [Rank 0] step:7021/10000 train_time:525914ms step_avg:74.91ms +[2025-09-02 17:30:34] [Rank 0] step:7021/10000 train_time:525914ms step_avg:74.91ms +[2025-09-02 17:30:35] [Rank 0] step:7041/10000 train_time:527510ms step_avg:74.92ms +[2025-09-02 17:30:35] [Rank 0] step:7041/10000 train_time:527510ms step_avg:74.92ms +[2025-09-02 17:30:37] [Rank 0] step:7061/10000 train_time:529103ms step_avg:74.93ms +[2025-09-02 17:30:37] [Rank 0] step:7061/10000 train_time:529103ms step_avg:74.93ms +[2025-09-02 17:30:39] [Rank 0] step:7081/10000 train_time:530697ms step_avg:74.95ms +[2025-09-02 17:30:39] [Rank 0] step:7081/10000 train_time:530697ms step_avg:74.95ms +[2025-09-02 17:30:40] [Rank 0] step:7101/10000 train_time:532291ms step_avg:74.96ms +[2025-09-02 17:30:40] [Rank 0] step:7101/10000 train_time:532291ms step_avg:74.96ms +[2025-09-02 17:30:42] [Rank 0] step:7121/10000 train_time:533888ms step_avg:74.97ms +[2025-09-02 17:30:42] [Rank 0] step:7121/10000 train_time:533888ms step_avg:74.97ms +[2025-09-02 17:30:43] [Rank 0] step:7141/10000 train_time:535484ms step_avg:74.99ms +[2025-09-02 17:30:43] [Rank 0] step:7141/10000 train_time:535484ms step_avg:74.99ms +[2025-09-02 17:30:45] [Rank 0] step:7161/10000 train_time:537082ms step_avg:75.00ms +[2025-09-02 17:30:45] [Rank 0] step:7161/10000 train_time:537082ms step_avg:75.00ms +[2025-09-02 17:30:47] [Rank 0] step:7181/10000 train_time:538678ms step_avg:75.01ms +[2025-09-02 17:30:47] [Rank 0] step:7181/10000 train_time:538678ms step_avg:75.01ms +[2025-09-02 17:30:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:30:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:31:00] [Rank 0] PRINT: step:7200/10000 val_loss:3.9786 svd_entropy: attn_qk:H=0.7420,top10E=0.27,eRank=145.8,q75/q25=108.95 attn_vo:H=0.8219,top10E=0.16,eRank=260.7,q75/q25=74.97 mlp_w1:H=0.7545,top10E=0.29,eRank=172.5,q75/q25=24.12 mlp_w2:H=0.8400,top10E=0.13,eRank=279.3,q75/q25=74.80 vo_prod:H=0.7325,top10E=0.25,eRank=137.9,q75/q25=6362.46 train_time:540440ms step_avg:75.06ms +[2025-09-02 17:31:00] [Rank 0] PRINT: step:7200/10000 val_loss:3.9786 svd_entropy: attn_qk:H=0.7420,top10E=0.27,eRank=145.8,q75/q25=108.95 attn_vo:H=0.8219,top10E=0.16,eRank=260.7,q75/q25=74.97 mlp_w1:H=0.7545,top10E=0.29,eRank=172.5,q75/q25=24.12 mlp_w2:H=0.8400,top10E=0.13,eRank=279.3,q75/q25=74.80 vo_prod:H=0.7325,top10E=0.25,eRank=137.9,q75/q25=6362.46 train_time:540440ms step_avg:75.06ms +[2025-09-02 17:31:00] [Rank 0] step:7201/10000 train_time:540452ms step_avg:75.05ms +[2025-09-02 17:31:00] [Rank 0] step:7201/10000 train_time:540452ms step_avg:75.05ms +[2025-09-02 17:31:02] [Rank 0] step:7221/10000 train_time:541920ms step_avg:75.05ms +[2025-09-02 17:31:02] [Rank 0] step:7221/10000 train_time:541920ms step_avg:75.05ms +[2025-09-02 17:31:03] [Rank 0] step:7241/10000 train_time:543511ms step_avg:75.06ms +[2025-09-02 17:31:03] [Rank 0] step:7241/10000 train_time:543511ms step_avg:75.06ms +[2025-09-02 17:31:05] [Rank 0] step:7261/10000 train_time:545105ms step_avg:75.07ms +[2025-09-02 17:31:05] [Rank 0] step:7261/10000 train_time:545105ms step_avg:75.07ms +[2025-09-02 17:31:06] [Rank 0] step:7281/10000 train_time:546708ms step_avg:75.09ms +[2025-09-02 17:31:06] [Rank 0] step:7281/10000 train_time:546708ms step_avg:75.09ms +[2025-09-02 17:31:08] [Rank 0] step:7301/10000 train_time:548307ms step_avg:75.10ms +[2025-09-02 17:31:08] [Rank 0] step:7301/10000 train_time:548307ms step_avg:75.10ms +[2025-09-02 17:31:10] [Rank 0] step:7321/10000 train_time:549910ms step_avg:75.11ms +[2025-09-02 17:31:10] [Rank 0] step:7321/10000 train_time:549910ms step_avg:75.11ms +[2025-09-02 17:31:11] [Rank 0] step:7341/10000 train_time:551511ms step_avg:75.13ms +[2025-09-02 17:31:11] [Rank 0] step:7341/10000 train_time:551511ms step_avg:75.13ms +[2025-09-02 17:31:13] [Rank 0] step:7361/10000 train_time:553119ms step_avg:75.14ms +[2025-09-02 17:31:13] [Rank 0] step:7361/10000 train_time:553119ms step_avg:75.14ms +[2025-09-02 17:31:14] [Rank 0] step:7381/10000 train_time:554727ms step_avg:75.16ms +[2025-09-02 17:31:14] [Rank 0] step:7381/10000 train_time:554727ms step_avg:75.16ms +[2025-09-02 17:31:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:31:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:31:28] [Rank 0] PRINT: step:7400/10000 val_loss:3.9585 svd_entropy: attn_qk:H=0.7433,top10E=0.27,eRank=146.9,q75/q25=109.28 attn_vo:H=0.8230,top10E=0.16,eRank=262.4,q75/q25=73.62 mlp_w1:H=0.7559,top10E=0.29,eRank=174.1,q75/q25=24.38 mlp_w2:H=0.8408,top10E=0.13,eRank=280.9,q75/q25=74.12 vo_prod:H=0.7340,top10E=0.25,eRank=139.4,q75/q25=6031.30 train_time:556473ms step_avg:75.20ms +[2025-09-02 17:31:28] [Rank 0] PRINT: step:7400/10000 val_loss:3.9585 svd_entropy: attn_qk:H=0.7433,top10E=0.27,eRank=146.9,q75/q25=109.28 attn_vo:H=0.8230,top10E=0.16,eRank=262.4,q75/q25=73.62 mlp_w1:H=0.7559,top10E=0.29,eRank=174.1,q75/q25=24.38 mlp_w2:H=0.8408,top10E=0.13,eRank=280.9,q75/q25=74.12 vo_prod:H=0.7340,top10E=0.25,eRank=139.4,q75/q25=6031.30 train_time:556473ms step_avg:75.20ms +[2025-09-02 17:31:28] [Rank 0] step:7401/10000 train_time:556485ms step_avg:75.19ms +[2025-09-02 17:31:28] [Rank 0] step:7401/10000 train_time:556485ms step_avg:75.19ms +[2025-09-02 17:31:29] [Rank 0] step:7421/10000 train_time:557936ms step_avg:75.18ms +[2025-09-02 17:31:29] [Rank 0] step:7421/10000 train_time:557936ms step_avg:75.18ms +[2025-09-02 17:31:31] [Rank 0] step:7441/10000 train_time:559530ms step_avg:75.20ms +[2025-09-02 17:31:31] [Rank 0] step:7441/10000 train_time:559530ms step_avg:75.20ms +[2025-09-02 17:31:32] [Rank 0] step:7461/10000 train_time:561127ms step_avg:75.21ms +[2025-09-02 17:31:32] [Rank 0] step:7461/10000 train_time:561127ms step_avg:75.21ms +[2025-09-02 17:31:34] [Rank 0] step:7481/10000 train_time:562730ms step_avg:75.22ms +[2025-09-02 17:31:34] [Rank 0] step:7481/10000 train_time:562730ms step_avg:75.22ms +[2025-09-02 17:31:36] [Rank 0] step:7501/10000 train_time:564332ms step_avg:75.23ms +[2025-09-02 17:31:36] [Rank 0] step:7501/10000 train_time:564332ms step_avg:75.23ms +[2025-09-02 17:31:37] [Rank 0] step:7521/10000 train_time:565934ms step_avg:75.25ms +[2025-09-02 17:31:37] [Rank 0] step:7521/10000 train_time:565934ms step_avg:75.25ms +[2025-09-02 17:31:39] [Rank 0] step:7541/10000 train_time:567548ms step_avg:75.26ms +[2025-09-02 17:31:39] [Rank 0] step:7541/10000 train_time:567548ms step_avg:75.26ms +[2025-09-02 17:31:40] [Rank 0] step:7561/10000 train_time:569138ms step_avg:75.27ms +[2025-09-02 17:31:40] [Rank 0] step:7561/10000 train_time:569138ms step_avg:75.27ms +[2025-09-02 17:31:42] [Rank 0] step:7581/10000 train_time:570745ms step_avg:75.29ms +[2025-09-02 17:31:42] [Rank 0] step:7581/10000 train_time:570745ms step_avg:75.29ms +[2025-09-02 17:31:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:31:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:31:55] [Rank 0] PRINT: step:7600/10000 val_loss:3.9553 svd_entropy: attn_qk:H=0.7445,top10E=0.26,eRank=148.0,q75/q25=110.08 attn_vo:H=0.8241,top10E=0.16,eRank=264.0,q75/q25=72.55 mlp_w1:H=0.7572,top10E=0.29,eRank=175.5,q75/q25=24.76 mlp_w2:H=0.8415,top10E=0.13,eRank=282.3,q75/q25=74.27 vo_prod:H=0.7356,top10E=0.25,eRank=140.8,q75/q25=5784.79 train_time:572515ms step_avg:75.33ms +[2025-09-02 17:31:55] [Rank 0] PRINT: step:7600/10000 val_loss:3.9553 svd_entropy: attn_qk:H=0.7445,top10E=0.26,eRank=148.0,q75/q25=110.08 attn_vo:H=0.8241,top10E=0.16,eRank=264.0,q75/q25=72.55 mlp_w1:H=0.7572,top10E=0.29,eRank=175.5,q75/q25=24.76 mlp_w2:H=0.8415,top10E=0.13,eRank=282.3,q75/q25=74.27 vo_prod:H=0.7356,top10E=0.25,eRank=140.8,q75/q25=5784.79 train_time:572515ms step_avg:75.33ms +[2025-09-02 17:31:55] [Rank 0] step:7601/10000 train_time:572526ms step_avg:75.32ms +[2025-09-02 17:31:55] [Rank 0] step:7601/10000 train_time:572526ms step_avg:75.32ms +[2025-09-02 17:31:57] [Rank 0] step:7621/10000 train_time:573983ms step_avg:75.32ms +[2025-09-02 17:31:57] [Rank 0] step:7621/10000 train_time:573983ms step_avg:75.32ms +[2025-09-02 17:31:59] [Rank 0] step:7641/10000 train_time:575578ms step_avg:75.33ms +[2025-09-02 17:31:59] [Rank 0] step:7641/10000 train_time:575578ms step_avg:75.33ms +[2025-09-02 17:32:00] [Rank 0] step:7661/10000 train_time:577177ms step_avg:75.34ms +[2025-09-02 17:32:00] [Rank 0] step:7661/10000 train_time:577177ms step_avg:75.34ms +[2025-09-02 17:32:02] [Rank 0] step:7681/10000 train_time:578771ms step_avg:75.35ms +[2025-09-02 17:32:02] [Rank 0] step:7681/10000 train_time:578771ms step_avg:75.35ms +[2025-09-02 17:32:03] [Rank 0] step:7701/10000 train_time:580421ms step_avg:75.37ms +[2025-09-02 17:32:03] [Rank 0] step:7701/10000 train_time:580421ms step_avg:75.37ms +[2025-09-02 17:32:05] [Rank 0] step:7721/10000 train_time:582031ms step_avg:75.38ms +[2025-09-02 17:32:05] [Rank 0] step:7721/10000 train_time:582031ms step_avg:75.38ms +[2025-09-02 17:32:07] [Rank 0] step:7741/10000 train_time:583630ms step_avg:75.39ms +[2025-09-02 17:32:07] [Rank 0] step:7741/10000 train_time:583630ms step_avg:75.39ms +[2025-09-02 17:32:08] [Rank 0] step:7761/10000 train_time:585231ms step_avg:75.41ms +[2025-09-02 17:32:08] [Rank 0] step:7761/10000 train_time:585231ms step_avg:75.41ms +[2025-09-02 17:32:10] [Rank 0] step:7781/10000 train_time:586837ms step_avg:75.42ms +[2025-09-02 17:32:10] [Rank 0] step:7781/10000 train_time:586837ms step_avg:75.42ms +[2025-09-02 17:32:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:32:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:32:23] [Rank 0] PRINT: step:7800/10000 val_loss:3.9386 svd_entropy: attn_qk:H=0.7456,top10E=0.26,eRank=149.0,q75/q25=109.93 attn_vo:H=0.8251,top10E=0.16,eRank=265.5,q75/q25=71.12 mlp_w1:H=0.7585,top10E=0.29,eRank=176.9,q75/q25=25.09 mlp_w2:H=0.8421,top10E=0.13,eRank=283.6,q75/q25=74.75 vo_prod:H=0.7369,top10E=0.25,eRank=142.1,q75/q25=5528.10 train_time:588611ms step_avg:75.46ms +[2025-09-02 17:32:23] [Rank 0] PRINT: step:7800/10000 val_loss:3.9386 svd_entropy: attn_qk:H=0.7456,top10E=0.26,eRank=149.0,q75/q25=109.93 attn_vo:H=0.8251,top10E=0.16,eRank=265.5,q75/q25=71.12 mlp_w1:H=0.7585,top10E=0.29,eRank=176.9,q75/q25=25.09 mlp_w2:H=0.8421,top10E=0.13,eRank=283.6,q75/q25=74.75 vo_prod:H=0.7369,top10E=0.25,eRank=142.1,q75/q25=5528.10 train_time:588611ms step_avg:75.46ms +[2025-09-02 17:32:23] [Rank 0] step:7801/10000 train_time:588623ms step_avg:75.45ms +[2025-09-02 17:32:23] [Rank 0] step:7801/10000 train_time:588623ms step_avg:75.45ms +[2025-09-02 17:32:25] [Rank 0] step:7821/10000 train_time:590068ms step_avg:75.45ms +[2025-09-02 17:32:25] [Rank 0] step:7821/10000 train_time:590068ms step_avg:75.45ms +[2025-09-02 17:32:26] [Rank 0] step:7841/10000 train_time:591662ms step_avg:75.46ms +[2025-09-02 17:32:26] [Rank 0] step:7841/10000 train_time:591662ms step_avg:75.46ms +[2025-09-02 17:32:28] [Rank 0] step:7861/10000 train_time:593266ms step_avg:75.47ms +[2025-09-02 17:32:28] [Rank 0] step:7861/10000 train_time:593266ms step_avg:75.47ms +[2025-09-02 17:32:30] [Rank 0] step:7881/10000 train_time:594871ms step_avg:75.48ms +[2025-09-02 17:32:30] [Rank 0] step:7881/10000 train_time:594871ms step_avg:75.48ms +[2025-09-02 17:32:31] [Rank 0] step:7901/10000 train_time:596469ms step_avg:75.49ms +[2025-09-02 17:32:31] [Rank 0] step:7901/10000 train_time:596469ms step_avg:75.49ms +[2025-09-02 17:32:33] [Rank 0] step:7921/10000 train_time:598072ms step_avg:75.50ms +[2025-09-02 17:32:33] [Rank 0] step:7921/10000 train_time:598072ms step_avg:75.50ms +[2025-09-02 17:32:34] [Rank 0] step:7941/10000 train_time:599676ms step_avg:75.52ms +[2025-09-02 17:32:34] [Rank 0] step:7941/10000 train_time:599676ms step_avg:75.52ms +[2025-09-02 17:32:36] [Rank 0] step:7961/10000 train_time:601289ms step_avg:75.53ms +[2025-09-02 17:32:36] [Rank 0] step:7961/10000 train_time:601289ms step_avg:75.53ms +[2025-09-02 17:32:38] [Rank 0] step:7981/10000 train_time:602884ms step_avg:75.54ms +[2025-09-02 17:32:38] [Rank 0] step:7981/10000 train_time:602884ms step_avg:75.54ms +[2025-09-02 17:32:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:32:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:32:51] [Rank 0] PRINT: step:8000/10000 val_loss:3.9235 svd_entropy: attn_qk:H=0.7466,top10E=0.26,eRank=149.9,q75/q25=109.58 attn_vo:H=0.8260,top10E=0.16,eRank=266.8,q75/q25=70.01 mlp_w1:H=0.7597,top10E=0.29,eRank=178.1,q75/q25=25.36 mlp_w2:H=0.8427,top10E=0.13,eRank=284.8,q75/q25=75.46 vo_prod:H=0.7383,top10E=0.24,eRank=143.4,q75/q25=5245.21 train_time:604651ms step_avg:75.58ms +[2025-09-02 17:32:51] [Rank 0] PRINT: step:8000/10000 val_loss:3.9235 svd_entropy: attn_qk:H=0.7466,top10E=0.26,eRank=149.9,q75/q25=109.58 attn_vo:H=0.8260,top10E=0.16,eRank=266.8,q75/q25=70.01 mlp_w1:H=0.7597,top10E=0.29,eRank=178.1,q75/q25=25.36 mlp_w2:H=0.8427,top10E=0.13,eRank=284.8,q75/q25=75.46 vo_prod:H=0.7383,top10E=0.24,eRank=143.4,q75/q25=5245.21 train_time:604651ms step_avg:75.58ms +[2025-09-02 17:32:51] [Rank 0] step:8001/10000 train_time:604662ms step_avg:75.57ms +[2025-09-02 17:32:51] [Rank 0] step:8001/10000 train_time:604662ms step_avg:75.57ms +[2025-09-02 17:32:53] [Rank 0] step:8021/10000 train_time:606115ms step_avg:75.57ms +[2025-09-02 17:32:53] [Rank 0] step:8021/10000 train_time:606115ms step_avg:75.57ms +[2025-09-02 17:32:54] [Rank 0] step:8041/10000 train_time:607724ms step_avg:75.58ms +[2025-09-02 17:32:54] [Rank 0] step:8041/10000 train_time:607724ms step_avg:75.58ms +[2025-09-02 17:32:56] [Rank 0] step:8061/10000 train_time:609321ms step_avg:75.59ms +[2025-09-02 17:32:56] [Rank 0] step:8061/10000 train_time:609321ms step_avg:75.59ms +[2025-09-02 17:32:58] [Rank 0] step:8081/10000 train_time:610912ms step_avg:75.60ms +[2025-09-02 17:32:58] [Rank 0] step:8081/10000 train_time:610912ms step_avg:75.60ms +[2025-09-02 17:32:59] [Rank 0] step:8101/10000 train_time:612521ms step_avg:75.61ms +[2025-09-02 17:32:59] [Rank 0] step:8101/10000 train_time:612521ms step_avg:75.61ms +[2025-09-02 17:33:01] [Rank 0] step:8121/10000 train_time:614119ms step_avg:75.62ms +[2025-09-02 17:33:01] [Rank 0] step:8121/10000 train_time:614119ms step_avg:75.62ms +[2025-09-02 17:33:02] [Rank 0] step:8141/10000 train_time:615828ms step_avg:75.65ms +[2025-09-02 17:33:02] [Rank 0] step:8141/10000 train_time:615828ms step_avg:75.65ms +[2025-09-02 17:33:04] [Rank 0] step:8161/10000 train_time:617476ms step_avg:75.66ms +[2025-09-02 17:33:04] [Rank 0] step:8161/10000 train_time:617476ms step_avg:75.66ms +[2025-09-02 17:33:06] [Rank 0] step:8181/10000 train_time:619108ms step_avg:75.68ms +[2025-09-02 17:33:06] [Rank 0] step:8181/10000 train_time:619108ms step_avg:75.68ms +[2025-09-02 17:33:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:33:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:33:19] [Rank 0] PRINT: step:8200/10000 val_loss:3.9147 svd_entropy: attn_qk:H=0.7475,top10E=0.26,eRank=150.7,q75/q25=109.39 attn_vo:H=0.8268,top10E=0.15,eRank=268.0,q75/q25=69.10 mlp_w1:H=0.7606,top10E=0.29,eRank=179.2,q75/q25=25.59 mlp_w2:H=0.8433,top10E=0.13,eRank=285.9,q75/q25=75.12 vo_prod:H=0.7394,top10E=0.24,eRank=144.4,q75/q25=5173.67 train_time:620925ms step_avg:75.72ms +[2025-09-02 17:33:19] [Rank 0] PRINT: step:8200/10000 val_loss:3.9147 svd_entropy: attn_qk:H=0.7475,top10E=0.26,eRank=150.7,q75/q25=109.39 attn_vo:H=0.8268,top10E=0.15,eRank=268.0,q75/q25=69.10 mlp_w1:H=0.7606,top10E=0.29,eRank=179.2,q75/q25=25.59 mlp_w2:H=0.8433,top10E=0.13,eRank=285.9,q75/q25=75.12 vo_prod:H=0.7394,top10E=0.24,eRank=144.4,q75/q25=5173.67 train_time:620925ms step_avg:75.72ms +[2025-09-02 17:33:19] [Rank 0] step:8201/10000 train_time:620936ms step_avg:75.71ms +[2025-09-02 17:33:19] [Rank 0] step:8201/10000 train_time:620936ms step_avg:75.71ms +[2025-09-02 17:33:21] [Rank 0] step:8221/10000 train_time:622432ms step_avg:75.71ms +[2025-09-02 17:33:21] [Rank 0] step:8221/10000 train_time:622432ms step_avg:75.71ms +[2025-09-02 17:33:22] [Rank 0] step:8241/10000 train_time:624070ms step_avg:75.73ms +[2025-09-02 17:33:22] [Rank 0] step:8241/10000 train_time:624070ms step_avg:75.73ms +[2025-09-02 17:33:24] [Rank 0] step:8261/10000 train_time:625700ms step_avg:75.74ms +[2025-09-02 17:33:24] [Rank 0] step:8261/10000 train_time:625700ms step_avg:75.74ms +[2025-09-02 17:33:26] [Rank 0] step:8281/10000 train_time:627334ms step_avg:75.76ms +[2025-09-02 17:33:26] [Rank 0] step:8281/10000 train_time:627334ms step_avg:75.76ms +[2025-09-02 17:33:27] [Rank 0] step:8301/10000 train_time:628965ms step_avg:75.77ms +[2025-09-02 17:33:27] [Rank 0] step:8301/10000 train_time:628965ms step_avg:75.77ms +[2025-09-02 17:33:29] [Rank 0] step:8321/10000 train_time:630588ms step_avg:75.78ms +[2025-09-02 17:33:29] [Rank 0] step:8321/10000 train_time:630588ms step_avg:75.78ms +[2025-09-02 17:33:31] [Rank 0] step:8341/10000 train_time:632224ms step_avg:75.80ms +[2025-09-02 17:33:31] [Rank 0] step:8341/10000 train_time:632224ms step_avg:75.80ms +[2025-09-02 17:33:32] [Rank 0] step:8361/10000 train_time:633859ms step_avg:75.81ms +[2025-09-02 17:33:32] [Rank 0] step:8361/10000 train_time:633859ms step_avg:75.81ms +[2025-09-02 17:33:34] [Rank 0] step:8381/10000 train_time:635492ms step_avg:75.83ms +[2025-09-02 17:33:34] [Rank 0] step:8381/10000 train_time:635492ms step_avg:75.83ms +[2025-09-02 17:33:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:33:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:33:47] [Rank 0] PRINT: step:8400/10000 val_loss:3.9035 svd_entropy: attn_qk:H=0.7483,top10E=0.26,eRank=151.4,q75/q25=109.82 attn_vo:H=0.8275,top10E=0.15,eRank=269.1,q75/q25=68.30 mlp_w1:H=0.7615,top10E=0.28,eRank=180.2,q75/q25=25.81 mlp_w2:H=0.8437,top10E=0.13,eRank=286.9,q75/q25=75.42 vo_prod:H=0.7405,top10E=0.24,eRank=145.5,q75/q25=4993.38 train_time:637287ms step_avg:75.87ms +[2025-09-02 17:33:47] [Rank 0] PRINT: step:8400/10000 val_loss:3.9035 svd_entropy: attn_qk:H=0.7483,top10E=0.26,eRank=151.4,q75/q25=109.82 attn_vo:H=0.8275,top10E=0.15,eRank=269.1,q75/q25=68.30 mlp_w1:H=0.7615,top10E=0.28,eRank=180.2,q75/q25=25.81 mlp_w2:H=0.8437,top10E=0.13,eRank=286.9,q75/q25=75.42 vo_prod:H=0.7405,top10E=0.24,eRank=145.5,q75/q25=4993.38 train_time:637287ms step_avg:75.87ms +[2025-09-02 17:33:47] [Rank 0] step:8401/10000 train_time:637298ms step_avg:75.86ms +[2025-09-02 17:33:47] [Rank 0] step:8401/10000 train_time:637298ms step_avg:75.86ms +[2025-09-02 17:33:49] [Rank 0] step:8421/10000 train_time:638773ms step_avg:75.85ms +[2025-09-02 17:33:49] [Rank 0] step:8421/10000 train_time:638773ms step_avg:75.85ms +[2025-09-02 17:33:50] [Rank 0] step:8441/10000 train_time:640407ms step_avg:75.87ms +[2025-09-02 17:33:50] [Rank 0] step:8441/10000 train_time:640407ms step_avg:75.87ms +[2025-09-02 17:33:52] [Rank 0] step:8461/10000 train_time:642031ms step_avg:75.88ms +[2025-09-02 17:33:52] [Rank 0] step:8461/10000 train_time:642031ms step_avg:75.88ms +[2025-09-02 17:33:54] [Rank 0] step:8481/10000 train_time:643666ms step_avg:75.90ms +[2025-09-02 17:33:54] [Rank 0] step:8481/10000 train_time:643666ms step_avg:75.90ms +[2025-09-02 17:33:55] [Rank 0] step:8501/10000 train_time:645319ms step_avg:75.91ms +[2025-09-02 17:33:55] [Rank 0] step:8501/10000 train_time:645319ms step_avg:75.91ms +[2025-09-02 17:33:57] [Rank 0] step:8521/10000 train_time:646959ms step_avg:75.93ms +[2025-09-02 17:33:57] [Rank 0] step:8521/10000 train_time:646959ms step_avg:75.93ms +[2025-09-02 17:33:59] [Rank 0] step:8541/10000 train_time:648605ms step_avg:75.94ms +[2025-09-02 17:33:59] [Rank 0] step:8541/10000 train_time:648605ms step_avg:75.94ms +[2025-09-02 17:34:00] [Rank 0] step:8561/10000 train_time:650240ms step_avg:75.95ms +[2025-09-02 17:34:00] [Rank 0] step:8561/10000 train_time:650240ms step_avg:75.95ms +[2025-09-02 17:34:02] [Rank 0] step:8581/10000 train_time:651875ms step_avg:75.97ms +[2025-09-02 17:34:02] [Rank 0] step:8581/10000 train_time:651875ms step_avg:75.97ms +[2025-09-02 17:34:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:34:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:34:15] [Rank 0] PRINT: step:8600/10000 val_loss:3.8947 svd_entropy: attn_qk:H=0.7490,top10E=0.26,eRank=152.1,q75/q25=109.49 attn_vo:H=0.8281,top10E=0.15,eRank=270.0,q75/q25=67.28 mlp_w1:H=0.7624,top10E=0.28,eRank=181.2,q75/q25=26.01 mlp_w2:H=0.8442,top10E=0.13,eRank=287.7,q75/q25=75.46 vo_prod:H=0.7413,top10E=0.24,eRank=146.3,q75/q25=4806.87 train_time:653659ms step_avg:76.01ms +[2025-09-02 17:34:15] [Rank 0] PRINT: step:8600/10000 val_loss:3.8947 svd_entropy: attn_qk:H=0.7490,top10E=0.26,eRank=152.1,q75/q25=109.49 attn_vo:H=0.8281,top10E=0.15,eRank=270.0,q75/q25=67.28 mlp_w1:H=0.7624,top10E=0.28,eRank=181.2,q75/q25=26.01 mlp_w2:H=0.8442,top10E=0.13,eRank=287.7,q75/q25=75.46 vo_prod:H=0.7413,top10E=0.24,eRank=146.3,q75/q25=4806.87 train_time:653659ms step_avg:76.01ms +[2025-09-02 17:34:15] [Rank 0] step:8601/10000 train_time:653670ms step_avg:76.00ms +[2025-09-02 17:34:15] [Rank 0] step:8601/10000 train_time:653670ms step_avg:76.00ms +[2025-09-02 17:34:17] [Rank 0] step:8621/10000 train_time:655145ms step_avg:75.99ms +[2025-09-02 17:34:17] [Rank 0] step:8621/10000 train_time:655145ms step_avg:75.99ms +[2025-09-02 17:34:19] [Rank 0] step:8641/10000 train_time:656765ms step_avg:76.01ms +[2025-09-02 17:34:19] [Rank 0] step:8641/10000 train_time:656765ms step_avg:76.01ms +[2025-09-02 17:34:20] [Rank 0] step:8661/10000 train_time:658391ms step_avg:76.02ms +[2025-09-02 17:34:20] [Rank 0] step:8661/10000 train_time:658391ms step_avg:76.02ms +[2025-09-02 17:34:22] [Rank 0] step:8681/10000 train_time:660018ms step_avg:76.03ms +[2025-09-02 17:34:22] [Rank 0] step:8681/10000 train_time:660018ms step_avg:76.03ms +[2025-09-02 17:34:23] [Rank 0] step:8701/10000 train_time:661635ms step_avg:76.04ms +[2025-09-02 17:34:23] [Rank 0] step:8701/10000 train_time:661635ms step_avg:76.04ms +[2025-09-02 17:34:25] [Rank 0] step:8721/10000 train_time:663268ms step_avg:76.05ms +[2025-09-02 17:34:25] [Rank 0] step:8721/10000 train_time:663268ms step_avg:76.05ms +[2025-09-02 17:34:27] [Rank 0] step:8741/10000 train_time:664888ms step_avg:76.07ms +[2025-09-02 17:34:27] [Rank 0] step:8741/10000 train_time:664888ms step_avg:76.07ms +[2025-09-02 17:34:28] [Rank 0] step:8761/10000 train_time:666513ms step_avg:76.08ms +[2025-09-02 17:34:28] [Rank 0] step:8761/10000 train_time:666513ms step_avg:76.08ms +[2025-09-02 17:34:30] [Rank 0] step:8781/10000 train_time:668147ms step_avg:76.09ms +[2025-09-02 17:34:30] [Rank 0] step:8781/10000 train_time:668147ms step_avg:76.09ms +[2025-09-02 17:34:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:34:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:34:43] [Rank 0] PRINT: step:8800/10000 val_loss:3.8846 svd_entropy: attn_qk:H=0.7497,top10E=0.26,eRank=152.7,q75/q25=109.48 attn_vo:H=0.8287,top10E=0.15,eRank=270.9,q75/q25=66.36 mlp_w1:H=0.7632,top10E=0.28,eRank=182.1,q75/q25=26.12 mlp_w2:H=0.8446,top10E=0.13,eRank=288.6,q75/q25=75.59 vo_prod:H=0.7422,top10E=0.24,eRank=147.2,q75/q25=4615.37 train_time:669940ms step_avg:76.13ms +[2025-09-02 17:34:43] [Rank 0] PRINT: step:8800/10000 val_loss:3.8846 svd_entropy: attn_qk:H=0.7497,top10E=0.26,eRank=152.7,q75/q25=109.48 attn_vo:H=0.8287,top10E=0.15,eRank=270.9,q75/q25=66.36 mlp_w1:H=0.7632,top10E=0.28,eRank=182.1,q75/q25=26.12 mlp_w2:H=0.8446,top10E=0.13,eRank=288.6,q75/q25=75.59 vo_prod:H=0.7422,top10E=0.24,eRank=147.2,q75/q25=4615.37 train_time:669940ms step_avg:76.13ms +[2025-09-02 17:34:43] [Rank 0] step:8801/10000 train_time:669952ms step_avg:76.12ms +[2025-09-02 17:34:43] [Rank 0] step:8801/10000 train_time:669952ms step_avg:76.12ms +[2025-09-02 17:34:45] [Rank 0] step:8821/10000 train_time:671420ms step_avg:76.12ms +[2025-09-02 17:34:45] [Rank 0] step:8821/10000 train_time:671420ms step_avg:76.12ms +[2025-09-02 17:34:47] [Rank 0] step:8841/10000 train_time:673065ms step_avg:76.13ms +[2025-09-02 17:34:47] [Rank 0] step:8841/10000 train_time:673065ms step_avg:76.13ms +[2025-09-02 17:34:48] [Rank 0] step:8861/10000 train_time:674689ms step_avg:76.14ms +[2025-09-02 17:34:48] [Rank 0] step:8861/10000 train_time:674689ms step_avg:76.14ms +[2025-09-02 17:34:50] [Rank 0] step:8881/10000 train_time:676316ms step_avg:76.15ms +[2025-09-02 17:34:50] [Rank 0] step:8881/10000 train_time:676316ms step_avg:76.15ms +[2025-09-02 17:34:51] [Rank 0] step:8901/10000 train_time:677948ms step_avg:76.17ms +[2025-09-02 17:34:51] [Rank 0] step:8901/10000 train_time:677948ms step_avg:76.17ms +[2025-09-02 17:34:53] [Rank 0] step:8921/10000 train_time:679585ms step_avg:76.18ms +[2025-09-02 17:34:53] [Rank 0] step:8921/10000 train_time:679585ms step_avg:76.18ms +[2025-09-02 17:34:55] [Rank 0] step:8941/10000 train_time:681227ms step_avg:76.19ms +[2025-09-02 17:34:55] [Rank 0] step:8941/10000 train_time:681227ms step_avg:76.19ms +[2025-09-02 17:34:56] [Rank 0] step:8961/10000 train_time:682855ms step_avg:76.20ms +[2025-09-02 17:34:56] [Rank 0] step:8961/10000 train_time:682855ms step_avg:76.20ms +[2025-09-02 17:34:58] [Rank 0] step:8981/10000 train_time:684481ms step_avg:76.21ms +[2025-09-02 17:34:58] [Rank 0] step:8981/10000 train_time:684481ms step_avg:76.21ms +[2025-09-02 17:35:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:35:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:35:11] [Rank 0] PRINT: step:9000/10000 val_loss:3.8758 svd_entropy: attn_qk:H=0.7503,top10E=0.26,eRank=153.3,q75/q25=109.59 attn_vo:H=0.8292,top10E=0.15,eRank=271.7,q75/q25=65.64 mlp_w1:H=0.7637,top10E=0.28,eRank=182.8,q75/q25=26.29 mlp_w2:H=0.8449,top10E=0.13,eRank=289.3,q75/q25=75.98 vo_prod:H=0.7430,top10E=0.24,eRank=147.9,q75/q25=4572.51 train_time:686272ms step_avg:76.25ms +[2025-09-02 17:35:11] [Rank 0] PRINT: step:9000/10000 val_loss:3.8758 svd_entropy: attn_qk:H=0.7503,top10E=0.26,eRank=153.3,q75/q25=109.59 attn_vo:H=0.8292,top10E=0.15,eRank=271.7,q75/q25=65.64 mlp_w1:H=0.7637,top10E=0.28,eRank=182.8,q75/q25=26.29 mlp_w2:H=0.8449,top10E=0.13,eRank=289.3,q75/q25=75.98 vo_prod:H=0.7430,top10E=0.24,eRank=147.9,q75/q25=4572.51 train_time:686272ms step_avg:76.25ms +[2025-09-02 17:35:11] [Rank 0] step:9001/10000 train_time:686284ms step_avg:76.25ms +[2025-09-02 17:35:11] [Rank 0] step:9001/10000 train_time:686284ms step_avg:76.25ms +[2025-09-02 17:35:13] [Rank 0] step:9021/10000 train_time:687757ms step_avg:76.24ms +[2025-09-02 17:35:13] [Rank 0] step:9021/10000 train_time:687757ms step_avg:76.24ms +[2025-09-02 17:35:15] [Rank 0] step:9041/10000 train_time:689377ms step_avg:76.25ms +[2025-09-02 17:35:15] [Rank 0] step:9041/10000 train_time:689377ms step_avg:76.25ms +[2025-09-02 17:35:16] [Rank 0] step:9061/10000 train_time:691016ms step_avg:76.26ms +[2025-09-02 17:35:16] [Rank 0] step:9061/10000 train_time:691016ms step_avg:76.26ms +[2025-09-02 17:35:18] [Rank 0] step:9081/10000 train_time:692652ms step_avg:76.27ms +[2025-09-02 17:35:18] [Rank 0] step:9081/10000 train_time:692652ms step_avg:76.27ms +[2025-09-02 17:35:20] [Rank 0] step:9101/10000 train_time:694301ms step_avg:76.29ms +[2025-09-02 17:35:20] [Rank 0] step:9101/10000 train_time:694301ms step_avg:76.29ms +[2025-09-02 17:35:21] [Rank 0] step:9121/10000 train_time:695933ms step_avg:76.30ms +[2025-09-02 17:35:21] [Rank 0] step:9121/10000 train_time:695933ms step_avg:76.30ms +[2025-09-02 17:35:23] [Rank 0] step:9141/10000 train_time:697555ms step_avg:76.31ms +[2025-09-02 17:35:23] [Rank 0] step:9141/10000 train_time:697555ms step_avg:76.31ms +[2025-09-02 17:35:24] [Rank 0] step:9161/10000 train_time:699177ms step_avg:76.32ms +[2025-09-02 17:35:24] [Rank 0] step:9161/10000 train_time:699177ms step_avg:76.32ms +[2025-09-02 17:35:26] [Rank 0] step:9181/10000 train_time:700839ms step_avg:76.34ms +[2025-09-02 17:35:26] [Rank 0] step:9181/10000 train_time:700839ms step_avg:76.34ms +[2025-09-02 17:35:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:35:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:35:39] [Rank 0] PRINT: step:9200/10000 val_loss:3.8681 svd_entropy: attn_qk:H=0.7507,top10E=0.26,eRank=153.7,q75/q25=109.71 attn_vo:H=0.8296,top10E=0.15,eRank=272.3,q75/q25=65.19 mlp_w1:H=0.7643,top10E=0.28,eRank=183.5,q75/q25=26.45 mlp_w2:H=0.8453,top10E=0.13,eRank=290.0,q75/q25=75.73 vo_prod:H=0.7436,top10E=0.24,eRank=148.5,q75/q25=4418.35 train_time:702630ms step_avg:76.37ms +[2025-09-02 17:35:39] [Rank 0] PRINT: step:9200/10000 val_loss:3.8681 svd_entropy: attn_qk:H=0.7507,top10E=0.26,eRank=153.7,q75/q25=109.71 attn_vo:H=0.8296,top10E=0.15,eRank=272.3,q75/q25=65.19 mlp_w1:H=0.7643,top10E=0.28,eRank=183.5,q75/q25=26.45 mlp_w2:H=0.8453,top10E=0.13,eRank=290.0,q75/q25=75.73 vo_prod:H=0.7436,top10E=0.24,eRank=148.5,q75/q25=4418.35 train_time:702630ms step_avg:76.37ms +[2025-09-02 17:35:40] [Rank 0] step:9201/10000 train_time:702642ms step_avg:76.37ms +[2025-09-02 17:35:40] [Rank 0] step:9201/10000 train_time:702642ms step_avg:76.37ms +[2025-09-02 17:35:41] [Rank 0] step:9221/10000 train_time:704143ms step_avg:76.36ms +[2025-09-02 17:35:41] [Rank 0] step:9221/10000 train_time:704143ms step_avg:76.36ms +[2025-09-02 17:35:43] [Rank 0] step:9241/10000 train_time:705785ms step_avg:76.38ms +[2025-09-02 17:35:43] [Rank 0] step:9241/10000 train_time:705785ms step_avg:76.38ms +[2025-09-02 17:35:44] [Rank 0] step:9261/10000 train_time:707425ms step_avg:76.39ms +[2025-09-02 17:35:44] [Rank 0] step:9261/10000 train_time:707425ms step_avg:76.39ms +[2025-09-02 17:35:46] [Rank 0] step:9281/10000 train_time:709044ms step_avg:76.40ms +[2025-09-02 17:35:46] [Rank 0] step:9281/10000 train_time:709044ms step_avg:76.40ms +[2025-09-02 17:35:48] [Rank 0] step:9301/10000 train_time:710677ms step_avg:76.41ms +[2025-09-02 17:35:48] [Rank 0] step:9301/10000 train_time:710677ms step_avg:76.41ms +[2025-09-02 17:35:49] [Rank 0] step:9321/10000 train_time:712312ms step_avg:76.42ms +[2025-09-02 17:35:49] [Rank 0] step:9321/10000 train_time:712312ms step_avg:76.42ms +[2025-09-02 17:35:51] [Rank 0] step:9341/10000 train_time:713947ms step_avg:76.43ms +[2025-09-02 17:35:51] [Rank 0] step:9341/10000 train_time:713947ms step_avg:76.43ms +[2025-09-02 17:35:53] [Rank 0] step:9361/10000 train_time:715585ms step_avg:76.44ms +[2025-09-02 17:35:53] [Rank 0] step:9361/10000 train_time:715585ms step_avg:76.44ms +[2025-09-02 17:35:54] [Rank 0] step:9381/10000 train_time:717235ms step_avg:76.46ms +[2025-09-02 17:35:54] [Rank 0] step:9381/10000 train_time:717235ms step_avg:76.46ms +[2025-09-02 17:35:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:35:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:36:08] [Rank 0] PRINT: step:9400/10000 val_loss:3.8607 svd_entropy: attn_qk:H=0.7512,top10E=0.26,eRank=154.1,q75/q25=109.70 attn_vo:H=0.8300,top10E=0.15,eRank=272.9,q75/q25=64.83 mlp_w1:H=0.7648,top10E=0.28,eRank=184.0,q75/q25=26.50 mlp_w2:H=0.8455,top10E=0.13,eRank=290.5,q75/q25=75.71 vo_prod:H=0.7442,top10E=0.24,eRank=149.1,q75/q25=4390.29 train_time:719035ms step_avg:76.49ms +[2025-09-02 17:36:08] [Rank 0] PRINT: step:9400/10000 val_loss:3.8607 svd_entropy: attn_qk:H=0.7512,top10E=0.26,eRank=154.1,q75/q25=109.70 attn_vo:H=0.8300,top10E=0.15,eRank=272.9,q75/q25=64.83 mlp_w1:H=0.7648,top10E=0.28,eRank=184.0,q75/q25=26.50 mlp_w2:H=0.8455,top10E=0.13,eRank=290.5,q75/q25=75.71 vo_prod:H=0.7442,top10E=0.24,eRank=149.1,q75/q25=4390.29 train_time:719035ms step_avg:76.49ms +[2025-09-02 17:36:08] [Rank 0] step:9401/10000 train_time:719047ms step_avg:76.49ms +[2025-09-02 17:36:08] [Rank 0] step:9401/10000 train_time:719047ms step_avg:76.49ms +[2025-09-02 17:36:09] [Rank 0] step:9421/10000 train_time:720520ms step_avg:76.48ms +[2025-09-02 17:36:09] [Rank 0] step:9421/10000 train_time:720520ms step_avg:76.48ms +[2025-09-02 17:36:11] [Rank 0] step:9441/10000 train_time:722155ms step_avg:76.49ms +[2025-09-02 17:36:11] [Rank 0] step:9441/10000 train_time:722155ms step_avg:76.49ms +[2025-09-02 17:36:13] [Rank 0] step:9461/10000 train_time:723918ms step_avg:76.52ms +[2025-09-02 17:36:13] [Rank 0] step:9461/10000 train_time:723918ms step_avg:76.52ms +[2025-09-02 17:36:14] [Rank 0] step:9481/10000 train_time:725432ms step_avg:76.51ms +[2025-09-02 17:36:14] [Rank 0] step:9481/10000 train_time:725432ms step_avg:76.51ms +[2025-09-02 17:36:16] [Rank 0] step:9501/10000 train_time:727081ms step_avg:76.53ms +[2025-09-02 17:36:16] [Rank 0] step:9501/10000 train_time:727081ms step_avg:76.53ms +[2025-09-02 17:36:18] [Rank 0] step:9521/10000 train_time:728710ms step_avg:76.54ms +[2025-09-02 17:36:18] [Rank 0] step:9521/10000 train_time:728710ms step_avg:76.54ms +[2025-09-02 17:36:19] [Rank 0] step:9541/10000 train_time:730345ms step_avg:76.55ms +[2025-09-02 17:36:19] [Rank 0] step:9541/10000 train_time:730345ms step_avg:76.55ms +[2025-09-02 17:36:21] [Rank 0] step:9561/10000 train_time:731976ms step_avg:76.56ms +[2025-09-02 17:36:21] [Rank 0] step:9561/10000 train_time:731976ms step_avg:76.56ms +[2025-09-02 17:36:22] [Rank 0] step:9581/10000 train_time:733613ms step_avg:76.57ms +[2025-09-02 17:36:22] [Rank 0] step:9581/10000 train_time:733613ms step_avg:76.57ms +[2025-09-02 17:36:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:36:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:36:36] [Rank 0] PRINT: step:9600/10000 val_loss:3.8547 svd_entropy: attn_qk:H=0.7515,top10E=0.25,eRank=154.4,q75/q25=109.89 attn_vo:H=0.8303,top10E=0.15,eRank=273.3,q75/q25=64.54 mlp_w1:H=0.7652,top10E=0.28,eRank=184.4,q75/q25=26.50 mlp_w2:H=0.8457,top10E=0.13,eRank=291.0,q75/q25=75.80 vo_prod:H=0.7447,top10E=0.24,eRank=149.6,q75/q25=4320.75 train_time:735424ms step_avg:76.61ms +[2025-09-02 17:36:36] [Rank 0] PRINT: step:9600/10000 val_loss:3.8547 svd_entropy: attn_qk:H=0.7515,top10E=0.25,eRank=154.4,q75/q25=109.89 attn_vo:H=0.8303,top10E=0.15,eRank=273.3,q75/q25=64.54 mlp_w1:H=0.7652,top10E=0.28,eRank=184.4,q75/q25=26.50 mlp_w2:H=0.8457,top10E=0.13,eRank=291.0,q75/q25=75.80 vo_prod:H=0.7447,top10E=0.24,eRank=149.6,q75/q25=4320.75 train_time:735424ms step_avg:76.61ms +[2025-09-02 17:36:36] [Rank 0] step:9601/10000 train_time:735436ms step_avg:76.60ms +[2025-09-02 17:36:36] [Rank 0] step:9601/10000 train_time:735436ms step_avg:76.60ms +[2025-09-02 17:36:37] [Rank 0] step:9621/10000 train_time:736913ms step_avg:76.59ms +[2025-09-02 17:36:37] [Rank 0] step:9621/10000 train_time:736913ms step_avg:76.59ms +[2025-09-02 17:36:39] [Rank 0] step:9641/10000 train_time:738551ms step_avg:76.61ms +[2025-09-02 17:36:39] [Rank 0] step:9641/10000 train_time:738551ms step_avg:76.61ms +[2025-09-02 17:36:41] [Rank 0] step:9661/10000 train_time:740213ms step_avg:76.62ms +[2025-09-02 17:36:41] [Rank 0] step:9661/10000 train_time:740213ms step_avg:76.62ms +[2025-09-02 17:36:42] [Rank 0] step:9681/10000 train_time:741870ms step_avg:76.63ms +[2025-09-02 17:36:42] [Rank 0] step:9681/10000 train_time:741870ms step_avg:76.63ms +[2025-09-02 17:36:44] [Rank 0] step:9701/10000 train_time:743544ms step_avg:76.65ms +[2025-09-02 17:36:44] [Rank 0] step:9701/10000 train_time:743544ms step_avg:76.65ms +[2025-09-02 17:36:46] [Rank 0] step:9721/10000 train_time:745194ms step_avg:76.66ms +[2025-09-02 17:36:46] [Rank 0] step:9721/10000 train_time:745194ms step_avg:76.66ms +[2025-09-02 17:36:47] [Rank 0] step:9741/10000 train_time:746867ms step_avg:76.67ms +[2025-09-02 17:36:47] [Rank 0] step:9741/10000 train_time:746867ms step_avg:76.67ms +[2025-09-02 17:36:49] [Rank 0] step:9761/10000 train_time:748525ms step_avg:76.69ms +[2025-09-02 17:36:49] [Rank 0] step:9761/10000 train_time:748525ms step_avg:76.69ms +[2025-09-02 17:36:51] [Rank 0] step:9781/10000 train_time:750199ms step_avg:76.70ms +[2025-09-02 17:36:51] [Rank 0] step:9781/10000 train_time:750199ms step_avg:76.70ms +[2025-09-02 17:36:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:36:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:37:04] [Rank 0] PRINT: step:9800/10000 val_loss:3.8483 svd_entropy: attn_qk:H=0.7518,top10E=0.25,eRank=154.7,q75/q25=109.93 attn_vo:H=0.8305,top10E=0.15,eRank=273.7,q75/q25=64.32 mlp_w1:H=0.7655,top10E=0.28,eRank=184.8,q75/q25=26.56 mlp_w2:H=0.8459,top10E=0.13,eRank=291.3,q75/q25=75.68 vo_prod:H=0.7451,top10E=0.24,eRank=149.9,q75/q25=4270.09 train_time:752043ms step_avg:76.74ms +[2025-09-02 17:37:04] [Rank 0] PRINT: step:9800/10000 val_loss:3.8483 svd_entropy: attn_qk:H=0.7518,top10E=0.25,eRank=154.7,q75/q25=109.93 attn_vo:H=0.8305,top10E=0.15,eRank=273.7,q75/q25=64.32 mlp_w1:H=0.7655,top10E=0.28,eRank=184.8,q75/q25=26.56 mlp_w2:H=0.8459,top10E=0.13,eRank=291.3,q75/q25=75.68 vo_prod:H=0.7451,top10E=0.24,eRank=149.9,q75/q25=4270.09 train_time:752043ms step_avg:76.74ms +[2025-09-02 17:37:04] [Rank 0] step:9801/10000 train_time:752055ms step_avg:76.73ms +[2025-09-02 17:37:04] [Rank 0] step:9801/10000 train_time:752055ms step_avg:76.73ms +[2025-09-02 17:37:06] [Rank 0] step:9821/10000 train_time:753552ms step_avg:76.73ms +[2025-09-02 17:37:06] [Rank 0] step:9821/10000 train_time:753552ms step_avg:76.73ms +[2025-09-02 17:37:07] [Rank 0] step:9841/10000 train_time:755224ms step_avg:76.74ms +[2025-09-02 17:37:07] [Rank 0] step:9841/10000 train_time:755224ms step_avg:76.74ms +[2025-09-02 17:37:09] [Rank 0] step:9861/10000 train_time:756871ms step_avg:76.75ms +[2025-09-02 17:37:09] [Rank 0] step:9861/10000 train_time:756871ms step_avg:76.75ms +[2025-09-02 17:37:11] [Rank 0] step:9881/10000 train_time:758518ms step_avg:76.77ms +[2025-09-02 17:37:11] [Rank 0] step:9881/10000 train_time:758518ms step_avg:76.77ms +[2025-09-02 17:37:12] [Rank 0] step:9901/10000 train_time:760181ms step_avg:76.78ms +[2025-09-02 17:37:12] [Rank 0] step:9901/10000 train_time:760181ms step_avg:76.78ms +[2025-09-02 17:37:14] [Rank 0] step:9921/10000 train_time:761835ms step_avg:76.79ms +[2025-09-02 17:37:14] [Rank 0] step:9921/10000 train_time:761835ms step_avg:76.79ms +[2025-09-02 17:37:16] [Rank 0] step:9941/10000 train_time:763496ms step_avg:76.80ms +[2025-09-02 17:37:16] [Rank 0] step:9941/10000 train_time:763496ms step_avg:76.80ms +[2025-09-02 17:37:17] [Rank 0] step:9961/10000 train_time:765153ms step_avg:76.81ms +[2025-09-02 17:37:17] [Rank 0] step:9961/10000 train_time:765153ms step_avg:76.81ms +[2025-09-02 17:37:19] [Rank 0] step:9981/10000 train_time:766807ms step_avg:76.83ms +[2025-09-02 17:37:19] [Rank 0] step:9981/10000 train_time:766807ms step_avg:76.83ms +[2025-09-02 17:37:21] [Rank 0] step:10000/10000 train_time:768390ms step_avg:76.84ms +[2025-09-02 17:37:21] [Rank 0] step:10000/10000 train_time:768390ms step_avg:76.84ms +[2025-09-02 17:37:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:37:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 17:37:32] [Rank 0] PRINT: step:10000/10000 val_loss:3.8424 svd_entropy: attn_qk:H=0.7519,top10E=0.25,eRank=154.8,q75/q25=109.96 attn_vo:H=0.8307,top10E=0.15,eRank=273.9,q75/q25=64.22 mlp_w1:H=0.7657,top10E=0.28,eRank=185.1,q75/q25=26.62 mlp_w2:H=0.8460,top10E=0.13,eRank=291.6,q75/q25=75.73 vo_prod:H=0.7453,top10E=0.24,eRank=150.2,q75/q25=4239.02 train_time:768646ms step_avg:76.86ms +[2025-09-02 17:37:32] [Rank 0] PRINT: step:10000/10000 val_loss:3.8424 svd_entropy: attn_qk:H=0.7519,top10E=0.25,eRank=154.8,q75/q25=109.96 attn_vo:H=0.8307,top10E=0.15,eRank=273.9,q75/q25=64.22 mlp_w1:H=0.7657,top10E=0.28,eRank=185.1,q75/q25=26.62 mlp_w2:H=0.8460,top10E=0.13,eRank=291.6,q75/q25=75.73 vo_prod:H=0.7453,top10E=0.24,eRank=150.2,q75/q25=4239.02 train_time:768646ms step_avg:76.86ms +[2025-09-02 17:37:32] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 17:37:32 2025 --- +[2025-09-02 17:37:32] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 17:37:32 2025 --- +[2025-09-02 17:37:32] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14436 MiB +[2025-09-02 17:37:32] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14436 MiB diff --git a/logs_svd_qkvo/mode_15_param_qkvo_seed_49/config.json b/logs_svd_qkvo/mode_15_param_qkvo_seed_49/config.json new file mode 100644 index 0000000000000000000000000000000000000000..4419964dabe845e6eeaf78232542509b70de81e9 --- /dev/null +++ b/logs_svd_qkvo/mode_15_param_qkvo_seed_49/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 49, + "optimizer_mode": 15, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "96e23939-980c-4aa3-9e0d-3079efe90b11", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_15_param_qkvo_seed_49/training_log_96e23939-980c-4aa3-9e0d-3079efe90b11.txt b/logs_svd_qkvo/mode_15_param_qkvo_seed_49/training_log_96e23939-980c-4aa3-9e0d-3079efe90b11.txt new file mode 100644 index 0000000000000000000000000000000000000000..03ac3a6fe3a917667e5baadb4ca002623eb4827e --- /dev/null +++ b/logs_svd_qkvo/mode_15_param_qkvo_seed_49/training_log_96e23939-980c-4aa3-9e0d-3079efe90b11.txt @@ -0,0 +1,2984 @@ +[2025-09-02 18:26:21] [Rank 0] PRINT: --- Script Start: Tue Sep 2 18:26:21 2025 --- +[2025-09-02 18:26:21] [Rank 0] PRINT: --- Script Start: Tue Sep 2 18:26:21 2025 --- +[2025-09-02 18:26:21] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=15, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 18:26:21] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=15, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-02 18:26:21] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 18:26:21] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-02 18:26:21] [Rank 0] PRINT: Using fixed seed: 49 +[2025-09-02 18:26:21] [Rank 0] PRINT: Using fixed seed: 49 +[2025-09-02 18:26:21] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_15_param_qkvo_seed_49 +[2025-09-02 18:26:21] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_15_param_qkvo_seed_49 +[2025-09-02 18:26:21] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 18:26:21] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-02 18:26:21] [Rank 0] PRINT: Constructing model... +[2025-09-02 18:26:21] [Rank 0] PRINT: Constructing model... +[2025-09-02 18:26:23] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 18:26:23] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-02 18:26:23] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 18:26:23] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-02 18:26:23] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 18:26:23] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-02 18:26:23] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 15 +[2025-09-02 18:26:23] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 15 +[2025-09-02 18:26:23] [Rank 0] PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 18:26:23] [Rank 0] PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-02 18:26:23] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 18:26:23] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-02 18:26:23] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 18:26:23] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-02 18:26:23] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 18:26:23] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-02 18:26:23] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 18:26:23] [Rank 0] PRINT: Model compilation complete. +[2025-09-02 18:26:23] [Rank 0] PRINT: Starting warmup... +[2025-09-02 18:26:23] [Rank 0] PRINT: Starting warmup... +[2025-09-02 18:27:04] [Rank 0] PRINT: Warmup complete. +[2025-09-02 18:27:04] [Rank 0] PRINT: Warmup complete. +[2025-09-02 18:27:04] [Rank 0] PRINT: Starting training... +[2025-09-02 18:27:04] [Rank 0] PRINT: Starting training... +[2025-09-02 18:27:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:27:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:27:20] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.7,q75/q25=10.30 attn_vo:H=0.4624,top10E=0.02,eRank=232.9,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 18:27:20] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9247,top10E=0.05,eRank=465.7,q75/q25=10.30 attn_vo:H=0.4624,top10E=0.02,eRank=232.9,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.8,q75/q25=2.36 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-02 18:27:21] [Rank 0] step:21/10000 train_time:1289ms step_avg:61.36ms +[2025-09-02 18:27:21] [Rank 0] step:21/10000 train_time:1289ms step_avg:61.36ms +[2025-09-02 18:27:23] [Rank 0] step:41/10000 train_time:2683ms step_avg:65.43ms +[2025-09-02 18:27:23] [Rank 0] step:41/10000 train_time:2683ms step_avg:65.43ms +[2025-09-02 18:27:24] [Rank 0] step:61/10000 train_time:4082ms step_avg:66.91ms +[2025-09-02 18:27:24] [Rank 0] step:61/10000 train_time:4082ms step_avg:66.91ms +[2025-09-02 18:27:26] [Rank 0] step:81/10000 train_time:5483ms step_avg:67.69ms +[2025-09-02 18:27:26] [Rank 0] step:81/10000 train_time:5483ms step_avg:67.69ms +[2025-09-02 18:27:27] [Rank 0] step:101/10000 train_time:6885ms step_avg:68.17ms +[2025-09-02 18:27:27] [Rank 0] step:101/10000 train_time:6885ms step_avg:68.17ms +[2025-09-02 18:27:28] [Rank 0] step:121/10000 train_time:8288ms step_avg:68.50ms +[2025-09-02 18:27:28] [Rank 0] step:121/10000 train_time:8288ms step_avg:68.50ms +[2025-09-02 18:27:30] [Rank 0] step:141/10000 train_time:9693ms step_avg:68.74ms +[2025-09-02 18:27:30] [Rank 0] step:141/10000 train_time:9693ms step_avg:68.74ms +[2025-09-02 18:27:31] [Rank 0] step:161/10000 train_time:11096ms step_avg:68.92ms +[2025-09-02 18:27:31] [Rank 0] step:161/10000 train_time:11096ms step_avg:68.92ms +[2025-09-02 18:27:33] [Rank 0] step:181/10000 train_time:12501ms step_avg:69.07ms +[2025-09-02 18:27:33] [Rank 0] step:181/10000 train_time:12501ms step_avg:69.07ms +[2025-09-02 18:27:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:27:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:27:46] [Rank 0] PRINT: step:200/10000 val_loss:6.4945 svd_entropy: attn_qk:H=0.4616,top10E=0.79,eRank=39.3,q75/q25=12.12 attn_vo:H=0.5383,top10E=0.65,eRank=108.2,q75/q25=111.26 mlp_w1:H=0.3971,top10E=0.78,eRank=16.6,q75/q25=2.78 mlp_w2:H=0.2080,top10E=0.94,eRank=4.9,q75/q25=102.75 vo_prod:H=0.2386,top10E=0.97,eRank=6.2,q75/q25=718.67 train_time:14048ms step_avg:70.24ms +[2025-09-02 18:27:46] [Rank 0] PRINT: step:200/10000 val_loss:6.4945 svd_entropy: attn_qk:H=0.4616,top10E=0.79,eRank=39.3,q75/q25=12.12 attn_vo:H=0.5383,top10E=0.65,eRank=108.2,q75/q25=111.26 mlp_w1:H=0.3971,top10E=0.78,eRank=16.6,q75/q25=2.78 mlp_w2:H=0.2080,top10E=0.94,eRank=4.9,q75/q25=102.75 vo_prod:H=0.2386,top10E=0.97,eRank=6.2,q75/q25=718.67 train_time:14048ms step_avg:70.24ms +[2025-09-02 18:27:46] [Rank 0] step:201/10000 train_time:14061ms step_avg:69.95ms +[2025-09-02 18:27:46] [Rank 0] step:201/10000 train_time:14061ms step_avg:69.95ms +[2025-09-02 18:27:48] [Rank 0] step:221/10000 train_time:15351ms step_avg:69.46ms +[2025-09-02 18:27:48] [Rank 0] step:221/10000 train_time:15351ms step_avg:69.46ms +[2025-09-02 18:27:49] [Rank 0] step:241/10000 train_time:16754ms step_avg:69.52ms +[2025-09-02 18:27:49] [Rank 0] step:241/10000 train_time:16754ms step_avg:69.52ms +[2025-09-02 18:27:50] [Rank 0] step:261/10000 train_time:18357ms step_avg:70.33ms +[2025-09-02 18:27:50] [Rank 0] step:261/10000 train_time:18357ms step_avg:70.33ms +[2025-09-02 18:27:52] [Rank 0] step:281/10000 train_time:19600ms step_avg:69.75ms +[2025-09-02 18:27:52] [Rank 0] step:281/10000 train_time:19600ms step_avg:69.75ms +[2025-09-02 18:27:53] [Rank 0] step:301/10000 train_time:21005ms step_avg:69.78ms +[2025-09-02 18:27:53] [Rank 0] step:301/10000 train_time:21005ms step_avg:69.78ms +[2025-09-02 18:27:55] [Rank 0] step:321/10000 train_time:22409ms step_avg:69.81ms +[2025-09-02 18:27:55] [Rank 0] step:321/10000 train_time:22409ms step_avg:69.81ms +[2025-09-02 18:27:56] [Rank 0] step:341/10000 train_time:23813ms step_avg:69.83ms +[2025-09-02 18:27:56] [Rank 0] step:341/10000 train_time:23813ms step_avg:69.83ms +[2025-09-02 18:27:57] [Rank 0] step:361/10000 train_time:25219ms step_avg:69.86ms +[2025-09-02 18:27:57] [Rank 0] step:361/10000 train_time:25219ms step_avg:69.86ms +[2025-09-02 18:27:59] [Rank 0] step:381/10000 train_time:26624ms step_avg:69.88ms +[2025-09-02 18:27:59] [Rank 0] step:381/10000 train_time:26624ms step_avg:69.88ms +[2025-09-02 18:28:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:28:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:28:12] [Rank 0] PRINT: step:400/10000 val_loss:5.9920 svd_entropy: attn_qk:H=0.5099,top10E=0.69,eRank=46.5,q75/q25=13.49 attn_vo:H=0.5711,top10E=0.55,eRank=85.7,q75/q25=47.88 mlp_w1:H=0.4365,top10E=0.71,eRank=30.4,q75/q25=3.25 mlp_w2:H=0.5321,top10E=0.62,eRank=35.7,q75/q25=14.58 vo_prod:H=0.3943,top10E=0.84,eRank=15.2,q75/q25=355.31 train_time:28172ms step_avg:70.43ms +[2025-09-02 18:28:12] [Rank 0] PRINT: step:400/10000 val_loss:5.9920 svd_entropy: attn_qk:H=0.5099,top10E=0.69,eRank=46.5,q75/q25=13.49 attn_vo:H=0.5711,top10E=0.55,eRank=85.7,q75/q25=47.88 mlp_w1:H=0.4365,top10E=0.71,eRank=30.4,q75/q25=3.25 mlp_w2:H=0.5321,top10E=0.62,eRank=35.7,q75/q25=14.58 vo_prod:H=0.3943,top10E=0.84,eRank=15.2,q75/q25=355.31 train_time:28172ms step_avg:70.43ms +[2025-09-02 18:28:12] [Rank 0] step:401/10000 train_time:28185ms step_avg:70.29ms +[2025-09-02 18:28:12] [Rank 0] step:401/10000 train_time:28185ms step_avg:70.29ms +[2025-09-02 18:28:14] [Rank 0] step:421/10000 train_time:29471ms step_avg:70.00ms +[2025-09-02 18:28:14] [Rank 0] step:421/10000 train_time:29471ms step_avg:70.00ms +[2025-09-02 18:28:15] [Rank 0] step:441/10000 train_time:30874ms step_avg:70.01ms +[2025-09-02 18:28:15] [Rank 0] step:441/10000 train_time:30874ms step_avg:70.01ms +[2025-09-02 18:28:16] [Rank 0] step:461/10000 train_time:32277ms step_avg:70.01ms +[2025-09-02 18:28:16] [Rank 0] step:461/10000 train_time:32277ms step_avg:70.01ms +[2025-09-02 18:28:18] [Rank 0] step:481/10000 train_time:33682ms step_avg:70.03ms +[2025-09-02 18:28:18] [Rank 0] step:481/10000 train_time:33682ms step_avg:70.03ms +[2025-09-02 18:28:19] [Rank 0] step:501/10000 train_time:35086ms step_avg:70.03ms +[2025-09-02 18:28:19] [Rank 0] step:501/10000 train_time:35086ms step_avg:70.03ms +[2025-09-02 18:28:21] [Rank 0] step:521/10000 train_time:36491ms step_avg:70.04ms +[2025-09-02 18:28:21] [Rank 0] step:521/10000 train_time:36491ms step_avg:70.04ms +[2025-09-02 18:28:22] [Rank 0] step:541/10000 train_time:37898ms step_avg:70.05ms +[2025-09-02 18:28:22] [Rank 0] step:541/10000 train_time:37898ms step_avg:70.05ms +[2025-09-02 18:28:23] [Rank 0] step:561/10000 train_time:39304ms step_avg:70.06ms +[2025-09-02 18:28:23] [Rank 0] step:561/10000 train_time:39304ms step_avg:70.06ms +[2025-09-02 18:28:25] [Rank 0] step:581/10000 train_time:40710ms step_avg:70.07ms +[2025-09-02 18:28:25] [Rank 0] step:581/10000 train_time:40710ms step_avg:70.07ms +[2025-09-02 18:28:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:28:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:28:38] [Rank 0] PRINT: step:600/10000 val_loss:5.6858 svd_entropy: attn_qk:H=0.5459,top10E=0.61,eRank=53.5,q75/q25=15.09 attn_vo:H=0.6065,top10E=0.47,eRank=93.4,q75/q25=32.92 mlp_w1:H=0.4789,top10E=0.66,eRank=42.2,q75/q25=3.59 mlp_w2:H=0.6212,top10E=0.47,eRank=64.7,q75/q25=11.33 vo_prod:H=0.4679,top10E=0.71,eRank=24.1,q75/q25=283.88 train_time:42256ms step_avg:70.43ms +[2025-09-02 18:28:38] [Rank 0] PRINT: step:600/10000 val_loss:5.6858 svd_entropy: attn_qk:H=0.5459,top10E=0.61,eRank=53.5,q75/q25=15.09 attn_vo:H=0.6065,top10E=0.47,eRank=93.4,q75/q25=32.92 mlp_w1:H=0.4789,top10E=0.66,eRank=42.2,q75/q25=3.59 mlp_w2:H=0.6212,top10E=0.47,eRank=64.7,q75/q25=11.33 vo_prod:H=0.4679,top10E=0.71,eRank=24.1,q75/q25=283.88 train_time:42256ms step_avg:70.43ms +[2025-09-02 18:28:38] [Rank 0] step:601/10000 train_time:42269ms step_avg:70.33ms +[2025-09-02 18:28:38] [Rank 0] step:601/10000 train_time:42269ms step_avg:70.33ms +[2025-09-02 18:28:40] [Rank 0] step:621/10000 train_time:43561ms step_avg:70.15ms +[2025-09-02 18:28:40] [Rank 0] step:621/10000 train_time:43561ms step_avg:70.15ms +[2025-09-02 18:28:41] [Rank 0] step:641/10000 train_time:44965ms step_avg:70.15ms +[2025-09-02 18:28:41] [Rank 0] step:641/10000 train_time:44965ms step_avg:70.15ms +[2025-09-02 18:28:42] [Rank 0] step:661/10000 train_time:46368ms step_avg:70.15ms +[2025-09-02 18:28:42] [Rank 0] step:661/10000 train_time:46368ms step_avg:70.15ms +[2025-09-02 18:28:44] [Rank 0] step:681/10000 train_time:47771ms step_avg:70.15ms +[2025-09-02 18:28:44] [Rank 0] step:681/10000 train_time:47771ms step_avg:70.15ms +[2025-09-02 18:28:45] [Rank 0] step:701/10000 train_time:49175ms step_avg:70.15ms +[2025-09-02 18:28:45] [Rank 0] step:701/10000 train_time:49175ms step_avg:70.15ms +[2025-09-02 18:28:47] [Rank 0] step:721/10000 train_time:50580ms step_avg:70.15ms +[2025-09-02 18:28:47] [Rank 0] step:721/10000 train_time:50580ms step_avg:70.15ms +[2025-09-02 18:28:48] [Rank 0] step:741/10000 train_time:51985ms step_avg:70.16ms +[2025-09-02 18:28:48] [Rank 0] step:741/10000 train_time:51985ms step_avg:70.16ms +[2025-09-02 18:28:49] [Rank 0] step:761/10000 train_time:53401ms step_avg:70.17ms +[2025-09-02 18:28:49] [Rank 0] step:761/10000 train_time:53401ms step_avg:70.17ms +[2025-09-02 18:28:51] [Rank 0] step:781/10000 train_time:54820ms step_avg:70.19ms +[2025-09-02 18:28:51] [Rank 0] step:781/10000 train_time:54820ms step_avg:70.19ms +[2025-09-02 18:28:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:28:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:29:04] [Rank 0] PRINT: step:800/10000 val_loss:5.4590 svd_entropy: attn_qk:H=0.5724,top10E=0.55,eRank=59.6,q75/q25=17.17 attn_vo:H=0.6353,top10E=0.41,eRank=103.1,q75/q25=31.04 mlp_w1:H=0.5125,top10E=0.62,eRank=51.6,q75/q25=3.94 mlp_w2:H=0.6675,top10E=0.39,eRank=87.6,q75/q25=11.92 vo_prod:H=0.5130,top10E=0.62,eRank=32.0,q75/q25=378.21 train_time:56383ms step_avg:70.48ms +[2025-09-02 18:29:04] [Rank 0] PRINT: step:800/10000 val_loss:5.4590 svd_entropy: attn_qk:H=0.5724,top10E=0.55,eRank=59.6,q75/q25=17.17 attn_vo:H=0.6353,top10E=0.41,eRank=103.1,q75/q25=31.04 mlp_w1:H=0.5125,top10E=0.62,eRank=51.6,q75/q25=3.94 mlp_w2:H=0.6675,top10E=0.39,eRank=87.6,q75/q25=11.92 vo_prod:H=0.5130,top10E=0.62,eRank=32.0,q75/q25=378.21 train_time:56383ms step_avg:70.48ms +[2025-09-02 18:29:04] [Rank 0] step:801/10000 train_time:56395ms step_avg:70.41ms +[2025-09-02 18:29:04] [Rank 0] step:801/10000 train_time:56395ms step_avg:70.41ms +[2025-09-02 18:29:06] [Rank 0] step:821/10000 train_time:57689ms step_avg:70.27ms +[2025-09-02 18:29:06] [Rank 0] step:821/10000 train_time:57689ms step_avg:70.27ms +[2025-09-02 18:29:07] [Rank 0] step:841/10000 train_time:59106ms step_avg:70.28ms +[2025-09-02 18:29:07] [Rank 0] step:841/10000 train_time:59106ms step_avg:70.28ms +[2025-09-02 18:29:09] [Rank 0] step:861/10000 train_time:60524ms step_avg:70.30ms +[2025-09-02 18:29:09] [Rank 0] step:861/10000 train_time:60524ms step_avg:70.30ms +[2025-09-02 18:29:10] [Rank 0] step:881/10000 train_time:61953ms step_avg:70.32ms +[2025-09-02 18:29:10] [Rank 0] step:881/10000 train_time:61953ms step_avg:70.32ms +[2025-09-02 18:29:11] [Rank 0] step:901/10000 train_time:63373ms step_avg:70.34ms +[2025-09-02 18:29:11] [Rank 0] step:901/10000 train_time:63373ms step_avg:70.34ms +[2025-09-02 18:29:13] [Rank 0] step:921/10000 train_time:64797ms step_avg:70.35ms +[2025-09-02 18:29:13] [Rank 0] step:921/10000 train_time:64797ms step_avg:70.35ms +[2025-09-02 18:29:14] [Rank 0] step:941/10000 train_time:66217ms step_avg:70.37ms +[2025-09-02 18:29:14] [Rank 0] step:941/10000 train_time:66217ms step_avg:70.37ms +[2025-09-02 18:29:16] [Rank 0] step:961/10000 train_time:67639ms step_avg:70.38ms +[2025-09-02 18:29:16] [Rank 0] step:961/10000 train_time:67639ms step_avg:70.38ms +[2025-09-02 18:29:17] [Rank 0] step:981/10000 train_time:69060ms step_avg:70.40ms +[2025-09-02 18:29:17] [Rank 0] step:981/10000 train_time:69060ms step_avg:70.40ms +[2025-09-02 18:29:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:29:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:29:30] [Rank 0] PRINT: step:1000/10000 val_loss:5.2900 svd_entropy: attn_qk:H=0.5936,top10E=0.51,eRank=65.5,q75/q25=19.62 attn_vo:H=0.6588,top10E=0.37,eRank=113.2,q75/q25=36.44 mlp_w1:H=0.5422,top10E=0.58,eRank=59.9,q75/q25=4.36 mlp_w2:H=0.6986,top10E=0.34,eRank=107.0,q75/q25=13.28 vo_prod:H=0.5454,top10E=0.55,eRank=39.4,q75/q25=754.19 train_time:70623ms step_avg:70.62ms +[2025-09-02 18:29:30] [Rank 0] PRINT: step:1000/10000 val_loss:5.2900 svd_entropy: attn_qk:H=0.5936,top10E=0.51,eRank=65.5,q75/q25=19.62 attn_vo:H=0.6588,top10E=0.37,eRank=113.2,q75/q25=36.44 mlp_w1:H=0.5422,top10E=0.58,eRank=59.9,q75/q25=4.36 mlp_w2:H=0.6986,top10E=0.34,eRank=107.0,q75/q25=13.28 vo_prod:H=0.5454,top10E=0.55,eRank=39.4,q75/q25=754.19 train_time:70623ms step_avg:70.62ms +[2025-09-02 18:29:30] [Rank 0] step:1001/10000 train_time:70635ms step_avg:70.56ms +[2025-09-02 18:29:30] [Rank 0] step:1001/10000 train_time:70635ms step_avg:70.56ms +[2025-09-02 18:29:32] [Rank 0] step:1021/10000 train_time:71919ms step_avg:70.44ms +[2025-09-02 18:29:32] [Rank 0] step:1021/10000 train_time:71919ms step_avg:70.44ms +[2025-09-02 18:29:33] [Rank 0] step:1041/10000 train_time:73338ms step_avg:70.45ms +[2025-09-02 18:29:33] [Rank 0] step:1041/10000 train_time:73338ms step_avg:70.45ms +[2025-09-02 18:29:35] [Rank 0] step:1061/10000 train_time:74753ms step_avg:70.46ms +[2025-09-02 18:29:35] [Rank 0] step:1061/10000 train_time:74753ms step_avg:70.46ms +[2025-09-02 18:29:36] [Rank 0] step:1081/10000 train_time:76171ms step_avg:70.46ms +[2025-09-02 18:29:36] [Rank 0] step:1081/10000 train_time:76171ms step_avg:70.46ms +[2025-09-02 18:29:38] [Rank 0] step:1101/10000 train_time:77588ms step_avg:70.47ms +[2025-09-02 18:29:38] [Rank 0] step:1101/10000 train_time:77588ms step_avg:70.47ms +[2025-09-02 18:29:39] [Rank 0] step:1121/10000 train_time:79008ms step_avg:70.48ms +[2025-09-02 18:29:39] [Rank 0] step:1121/10000 train_time:79008ms step_avg:70.48ms +[2025-09-02 18:29:40] [Rank 0] step:1141/10000 train_time:80427ms step_avg:70.49ms +[2025-09-02 18:29:40] [Rank 0] step:1141/10000 train_time:80427ms step_avg:70.49ms +[2025-09-02 18:29:42] [Rank 0] step:1161/10000 train_time:81845ms step_avg:70.50ms +[2025-09-02 18:29:42] [Rank 0] step:1161/10000 train_time:81845ms step_avg:70.50ms +[2025-09-02 18:29:43] [Rank 0] step:1181/10000 train_time:83264ms step_avg:70.50ms +[2025-09-02 18:29:43] [Rank 0] step:1181/10000 train_time:83264ms step_avg:70.50ms +[2025-09-02 18:29:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:29:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:29:56] [Rank 0] PRINT: step:1200/10000 val_loss:5.1347 svd_entropy: attn_qk:H=0.6110,top10E=0.48,eRank=71.3,q75/q25=23.06 attn_vo:H=0.6793,top10E=0.34,eRank=123.6,q75/q25=47.29 mlp_w1:H=0.5661,top10E=0.55,eRank=67.5,q75/q25=4.85 mlp_w2:H=0.7225,top10E=0.31,eRank=125.1,q75/q25=15.88 vo_prod:H=0.5711,top10E=0.50,eRank=46.6,q75/q25=1586.49 train_time:84825ms step_avg:70.69ms +[2025-09-02 18:29:56] [Rank 0] PRINT: step:1200/10000 val_loss:5.1347 svd_entropy: attn_qk:H=0.6110,top10E=0.48,eRank=71.3,q75/q25=23.06 attn_vo:H=0.6793,top10E=0.34,eRank=123.6,q75/q25=47.29 mlp_w1:H=0.5661,top10E=0.55,eRank=67.5,q75/q25=4.85 mlp_w2:H=0.7225,top10E=0.31,eRank=125.1,q75/q25=15.88 vo_prod:H=0.5711,top10E=0.50,eRank=46.6,q75/q25=1586.49 train_time:84825ms step_avg:70.69ms +[2025-09-02 18:29:57] [Rank 0] step:1201/10000 train_time:84837ms step_avg:70.64ms +[2025-09-02 18:29:57] [Rank 0] step:1201/10000 train_time:84837ms step_avg:70.64ms +[2025-09-02 18:29:58] [Rank 0] step:1221/10000 train_time:86130ms step_avg:70.54ms +[2025-09-02 18:29:58] [Rank 0] step:1221/10000 train_time:86130ms step_avg:70.54ms +[2025-09-02 18:29:59] [Rank 0] step:1241/10000 train_time:87547ms step_avg:70.55ms +[2025-09-02 18:29:59] [Rank 0] step:1241/10000 train_time:87547ms step_avg:70.55ms +[2025-09-02 18:30:01] [Rank 0] step:1261/10000 train_time:88966ms step_avg:70.55ms +[2025-09-02 18:30:01] [Rank 0] step:1261/10000 train_time:88966ms step_avg:70.55ms +[2025-09-02 18:30:02] [Rank 0] step:1281/10000 train_time:90385ms step_avg:70.56ms +[2025-09-02 18:30:02] [Rank 0] step:1281/10000 train_time:90385ms step_avg:70.56ms +[2025-09-02 18:30:04] [Rank 0] step:1301/10000 train_time:91804ms step_avg:70.56ms +[2025-09-02 18:30:04] [Rank 0] step:1301/10000 train_time:91804ms step_avg:70.56ms +[2025-09-02 18:30:05] [Rank 0] step:1321/10000 train_time:93224ms step_avg:70.57ms +[2025-09-02 18:30:05] [Rank 0] step:1321/10000 train_time:93224ms step_avg:70.57ms +[2025-09-02 18:30:06] [Rank 0] step:1341/10000 train_time:94643ms step_avg:70.58ms +[2025-09-02 18:30:06] [Rank 0] step:1341/10000 train_time:94643ms step_avg:70.58ms +[2025-09-02 18:30:08] [Rank 0] step:1361/10000 train_time:96063ms step_avg:70.58ms +[2025-09-02 18:30:08] [Rank 0] step:1361/10000 train_time:96063ms step_avg:70.58ms +[2025-09-02 18:30:09] [Rank 0] step:1381/10000 train_time:97481ms step_avg:70.59ms +[2025-09-02 18:30:09] [Rank 0] step:1381/10000 train_time:97481ms step_avg:70.59ms +[2025-09-02 18:30:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:30:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:30:22] [Rank 0] PRINT: step:1400/10000 val_loss:5.0094 svd_entropy: attn_qk:H=0.6259,top10E=0.45,eRank=76.8,q75/q25=27.25 attn_vo:H=0.6969,top10E=0.31,eRank=134.1,q75/q25=59.42 mlp_w1:H=0.5861,top10E=0.53,eRank=74.8,q75/q25=5.41 mlp_w2:H=0.7410,top10E=0.28,eRank=141.3,q75/q25=18.78 vo_prod:H=0.5907,top10E=0.46,eRank=53.1,q75/q25=2889.10 train_time:99044ms step_avg:70.75ms +[2025-09-02 18:30:22] [Rank 0] PRINT: step:1400/10000 val_loss:5.0094 svd_entropy: attn_qk:H=0.6259,top10E=0.45,eRank=76.8,q75/q25=27.25 attn_vo:H=0.6969,top10E=0.31,eRank=134.1,q75/q25=59.42 mlp_w1:H=0.5861,top10E=0.53,eRank=74.8,q75/q25=5.41 mlp_w2:H=0.7410,top10E=0.28,eRank=141.3,q75/q25=18.78 vo_prod:H=0.5907,top10E=0.46,eRank=53.1,q75/q25=2889.10 train_time:99044ms step_avg:70.75ms +[2025-09-02 18:30:22] [Rank 0] step:1401/10000 train_time:99056ms step_avg:70.70ms +[2025-09-02 18:30:22] [Rank 0] step:1401/10000 train_time:99056ms step_avg:70.70ms +[2025-09-02 18:30:24] [Rank 0] step:1421/10000 train_time:100352ms step_avg:70.62ms +[2025-09-02 18:30:24] [Rank 0] step:1421/10000 train_time:100352ms step_avg:70.62ms +[2025-09-02 18:30:25] [Rank 0] step:1441/10000 train_time:101770ms step_avg:70.62ms +[2025-09-02 18:30:25] [Rank 0] step:1441/10000 train_time:101770ms step_avg:70.62ms +[2025-09-02 18:30:27] [Rank 0] step:1461/10000 train_time:103189ms step_avg:70.63ms +[2025-09-02 18:30:27] [Rank 0] step:1461/10000 train_time:103189ms step_avg:70.63ms +[2025-09-02 18:30:28] [Rank 0] step:1481/10000 train_time:104609ms step_avg:70.63ms +[2025-09-02 18:30:28] [Rank 0] step:1481/10000 train_time:104609ms step_avg:70.63ms +[2025-09-02 18:30:29] [Rank 0] step:1501/10000 train_time:106037ms step_avg:70.64ms +[2025-09-02 18:30:29] [Rank 0] step:1501/10000 train_time:106037ms step_avg:70.64ms +[2025-09-02 18:30:31] [Rank 0] step:1521/10000 train_time:107466ms step_avg:70.66ms +[2025-09-02 18:30:31] [Rank 0] step:1521/10000 train_time:107466ms step_avg:70.66ms +[2025-09-02 18:30:32] [Rank 0] step:1541/10000 train_time:108896ms step_avg:70.67ms +[2025-09-02 18:30:32] [Rank 0] step:1541/10000 train_time:108896ms step_avg:70.67ms +[2025-09-02 18:30:34] [Rank 0] step:1561/10000 train_time:110326ms step_avg:70.68ms +[2025-09-02 18:30:34] [Rank 0] step:1561/10000 train_time:110326ms step_avg:70.68ms +[2025-09-02 18:30:35] [Rank 0] step:1581/10000 train_time:111757ms step_avg:70.69ms +[2025-09-02 18:30:35] [Rank 0] step:1581/10000 train_time:111757ms step_avg:70.69ms +[2025-09-02 18:30:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:30:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:30:48] [Rank 0] PRINT: step:1600/10000 val_loss:4.8790 svd_entropy: attn_qk:H=0.6382,top10E=0.43,eRank=81.6,q75/q25=32.14 attn_vo:H=0.7118,top10E=0.29,eRank=144.1,q75/q25=71.54 mlp_w1:H=0.6036,top10E=0.51,eRank=81.8,q75/q25=5.99 mlp_w2:H=0.7559,top10E=0.26,eRank=156.1,q75/q25=21.71 vo_prod:H=0.6074,top10E=0.43,eRank=59.3,q75/q25=4514.17 train_time:113331ms step_avg:70.83ms +[2025-09-02 18:30:48] [Rank 0] PRINT: step:1600/10000 val_loss:4.8790 svd_entropy: attn_qk:H=0.6382,top10E=0.43,eRank=81.6,q75/q25=32.14 attn_vo:H=0.7118,top10E=0.29,eRank=144.1,q75/q25=71.54 mlp_w1:H=0.6036,top10E=0.51,eRank=81.8,q75/q25=5.99 mlp_w2:H=0.7559,top10E=0.26,eRank=156.1,q75/q25=21.71 vo_prod:H=0.6074,top10E=0.43,eRank=59.3,q75/q25=4514.17 train_time:113331ms step_avg:70.83ms +[2025-09-02 18:30:48] [Rank 0] step:1601/10000 train_time:113342ms step_avg:70.79ms +[2025-09-02 18:30:48] [Rank 0] step:1601/10000 train_time:113342ms step_avg:70.79ms +[2025-09-02 18:30:50] [Rank 0] step:1621/10000 train_time:114631ms step_avg:70.72ms +[2025-09-02 18:30:50] [Rank 0] step:1621/10000 train_time:114631ms step_avg:70.72ms +[2025-09-02 18:30:51] [Rank 0] step:1641/10000 train_time:116060ms step_avg:70.73ms +[2025-09-02 18:30:51] [Rank 0] step:1641/10000 train_time:116060ms step_avg:70.73ms +[2025-09-02 18:30:53] [Rank 0] step:1661/10000 train_time:117491ms step_avg:70.74ms +[2025-09-02 18:30:53] [Rank 0] step:1661/10000 train_time:117491ms step_avg:70.74ms +[2025-09-02 18:30:54] [Rank 0] step:1681/10000 train_time:118920ms step_avg:70.74ms +[2025-09-02 18:30:54] [Rank 0] step:1681/10000 train_time:118920ms step_avg:70.74ms +[2025-09-02 18:30:55] [Rank 0] step:1701/10000 train_time:120350ms step_avg:70.75ms +[2025-09-02 18:30:55] [Rank 0] step:1701/10000 train_time:120350ms step_avg:70.75ms +[2025-09-02 18:30:57] [Rank 0] step:1721/10000 train_time:121782ms step_avg:70.76ms +[2025-09-02 18:30:57] [Rank 0] step:1721/10000 train_time:121782ms step_avg:70.76ms +[2025-09-02 18:30:58] [Rank 0] step:1741/10000 train_time:123213ms step_avg:70.77ms +[2025-09-02 18:30:58] [Rank 0] step:1741/10000 train_time:123213ms step_avg:70.77ms +[2025-09-02 18:31:00] [Rank 0] step:1761/10000 train_time:124644ms step_avg:70.78ms +[2025-09-02 18:31:00] [Rank 0] step:1761/10000 train_time:124644ms step_avg:70.78ms +[2025-09-02 18:31:01] [Rank 0] step:1781/10000 train_time:126075ms step_avg:70.79ms +[2025-09-02 18:31:01] [Rank 0] step:1781/10000 train_time:126075ms step_avg:70.79ms +[2025-09-02 18:31:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:31:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:31:14] [Rank 0] PRINT: step:1800/10000 val_loss:4.7720 svd_entropy: attn_qk:H=0.6489,top10E=0.41,eRank=86.2,q75/q25=37.77 attn_vo:H=0.7246,top10E=0.28,eRank=153.6,q75/q25=81.83 mlp_w1:H=0.6184,top10E=0.49,eRank=88.1,q75/q25=6.70 mlp_w2:H=0.7677,top10E=0.24,eRank=168.9,q75/q25=24.82 vo_prod:H=0.6214,top10E=0.41,eRank=65.0,q75/q25=6271.08 train_time:127652ms step_avg:70.92ms +[2025-09-02 18:31:14] [Rank 0] PRINT: step:1800/10000 val_loss:4.7720 svd_entropy: attn_qk:H=0.6489,top10E=0.41,eRank=86.2,q75/q25=37.77 attn_vo:H=0.7246,top10E=0.28,eRank=153.6,q75/q25=81.83 mlp_w1:H=0.6184,top10E=0.49,eRank=88.1,q75/q25=6.70 mlp_w2:H=0.7677,top10E=0.24,eRank=168.9,q75/q25=24.82 vo_prod:H=0.6214,top10E=0.41,eRank=65.0,q75/q25=6271.08 train_time:127652ms step_avg:70.92ms +[2025-09-02 18:31:14] [Rank 0] step:1801/10000 train_time:127664ms step_avg:70.89ms +[2025-09-02 18:31:14] [Rank 0] step:1801/10000 train_time:127664ms step_avg:70.89ms +[2025-09-02 18:31:16] [Rank 0] step:1821/10000 train_time:128965ms step_avg:70.82ms +[2025-09-02 18:31:16] [Rank 0] step:1821/10000 train_time:128965ms step_avg:70.82ms +[2025-09-02 18:31:17] [Rank 0] step:1841/10000 train_time:130395ms step_avg:70.83ms +[2025-09-02 18:31:17] [Rank 0] step:1841/10000 train_time:130395ms step_avg:70.83ms +[2025-09-02 18:31:19] [Rank 0] step:1861/10000 train_time:131825ms step_avg:70.84ms +[2025-09-02 18:31:19] [Rank 0] step:1861/10000 train_time:131825ms step_avg:70.84ms +[2025-09-02 18:31:20] [Rank 0] step:1881/10000 train_time:133255ms step_avg:70.84ms +[2025-09-02 18:31:20] [Rank 0] step:1881/10000 train_time:133255ms step_avg:70.84ms +[2025-09-02 18:31:21] [Rank 0] step:1901/10000 train_time:134687ms step_avg:70.85ms +[2025-09-02 18:31:21] [Rank 0] step:1901/10000 train_time:134687ms step_avg:70.85ms +[2025-09-02 18:31:23] [Rank 0] step:1921/10000 train_time:136119ms step_avg:70.86ms +[2025-09-02 18:31:23] [Rank 0] step:1921/10000 train_time:136119ms step_avg:70.86ms +[2025-09-02 18:31:24] [Rank 0] step:1941/10000 train_time:137550ms step_avg:70.87ms +[2025-09-02 18:31:24] [Rank 0] step:1941/10000 train_time:137550ms step_avg:70.87ms +[2025-09-02 18:31:26] [Rank 0] step:1961/10000 train_time:138982ms step_avg:70.87ms +[2025-09-02 18:31:26] [Rank 0] step:1961/10000 train_time:138982ms step_avg:70.87ms +[2025-09-02 18:31:27] [Rank 0] step:1981/10000 train_time:140415ms step_avg:70.88ms +[2025-09-02 18:31:27] [Rank 0] step:1981/10000 train_time:140415ms step_avg:70.88ms +[2025-09-02 18:31:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:31:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:31:40] [Rank 0] PRINT: step:2000/10000 val_loss:4.7059 svd_entropy: attn_qk:H=0.6581,top10E=0.39,eRank=90.4,q75/q25=44.05 attn_vo:H=0.7355,top10E=0.26,eRank=162.4,q75/q25=89.74 mlp_w1:H=0.6311,top10E=0.47,eRank=94.0,q75/q25=7.41 mlp_w2:H=0.7770,top10E=0.23,eRank=179.9,q75/q25=27.53 vo_prod:H=0.6333,top10E=0.39,eRank=70.3,q75/q25=8108.42 train_time:141990ms step_avg:70.99ms +[2025-09-02 18:31:40] [Rank 0] PRINT: step:2000/10000 val_loss:4.7059 svd_entropy: attn_qk:H=0.6581,top10E=0.39,eRank=90.4,q75/q25=44.05 attn_vo:H=0.7355,top10E=0.26,eRank=162.4,q75/q25=89.74 mlp_w1:H=0.6311,top10E=0.47,eRank=94.0,q75/q25=7.41 mlp_w2:H=0.7770,top10E=0.23,eRank=179.9,q75/q25=27.53 vo_prod:H=0.6333,top10E=0.39,eRank=70.3,q75/q25=8108.42 train_time:141990ms step_avg:70.99ms +[2025-09-02 18:31:40] [Rank 0] step:2001/10000 train_time:142002ms step_avg:70.97ms +[2025-09-02 18:31:40] [Rank 0] step:2001/10000 train_time:142002ms step_avg:70.97ms +[2025-09-02 18:31:42] [Rank 0] step:2021/10000 train_time:143304ms step_avg:70.91ms +[2025-09-02 18:31:42] [Rank 0] step:2021/10000 train_time:143304ms step_avg:70.91ms +[2025-09-02 18:31:43] [Rank 0] step:2041/10000 train_time:144862ms step_avg:70.98ms +[2025-09-02 18:31:43] [Rank 0] step:2041/10000 train_time:144862ms step_avg:70.98ms +[2025-09-02 18:31:45] [Rank 0] step:2061/10000 train_time:146293ms step_avg:70.98ms +[2025-09-02 18:31:45] [Rank 0] step:2061/10000 train_time:146293ms step_avg:70.98ms +[2025-09-02 18:31:46] [Rank 0] step:2081/10000 train_time:147723ms step_avg:70.99ms +[2025-09-02 18:31:46] [Rank 0] step:2081/10000 train_time:147723ms step_avg:70.99ms +[2025-09-02 18:31:48] [Rank 0] step:2101/10000 train_time:149156ms step_avg:70.99ms +[2025-09-02 18:31:48] [Rank 0] step:2101/10000 train_time:149156ms step_avg:70.99ms +[2025-09-02 18:31:49] [Rank 0] step:2121/10000 train_time:150587ms step_avg:71.00ms +[2025-09-02 18:31:49] [Rank 0] step:2121/10000 train_time:150587ms step_avg:71.00ms +[2025-09-02 18:31:51] [Rank 0] step:2141/10000 train_time:152019ms step_avg:71.00ms +[2025-09-02 18:31:51] [Rank 0] step:2141/10000 train_time:152019ms step_avg:71.00ms +[2025-09-02 18:31:52] [Rank 0] step:2161/10000 train_time:153450ms step_avg:71.01ms +[2025-09-02 18:31:52] [Rank 0] step:2161/10000 train_time:153450ms step_avg:71.01ms +[2025-09-02 18:31:53] [Rank 0] step:2181/10000 train_time:154882ms step_avg:71.01ms +[2025-09-02 18:31:53] [Rank 0] step:2181/10000 train_time:154882ms step_avg:71.01ms +[2025-09-02 18:31:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:31:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:32:07] [Rank 0] PRINT: step:2200/10000 val_loss:4.6293 svd_entropy: attn_qk:H=0.6657,top10E=0.38,eRank=94.1,q75/q25=49.84 attn_vo:H=0.7448,top10E=0.25,eRank=170.4,q75/q25=95.01 mlp_w1:H=0.6420,top10E=0.46,eRank=99.4,q75/q25=8.16 mlp_w2:H=0.7848,top10E=0.21,eRank=189.7,q75/q25=29.73 vo_prod:H=0.6433,top10E=0.37,eRank=75.2,q75/q25=9724.72 train_time:156456ms step_avg:71.12ms +[2025-09-02 18:32:07] [Rank 0] PRINT: step:2200/10000 val_loss:4.6293 svd_entropy: attn_qk:H=0.6657,top10E=0.38,eRank=94.1,q75/q25=49.84 attn_vo:H=0.7448,top10E=0.25,eRank=170.4,q75/q25=95.01 mlp_w1:H=0.6420,top10E=0.46,eRank=99.4,q75/q25=8.16 mlp_w2:H=0.7848,top10E=0.21,eRank=189.7,q75/q25=29.73 vo_prod:H=0.6433,top10E=0.37,eRank=75.2,q75/q25=9724.72 train_time:156456ms step_avg:71.12ms +[2025-09-02 18:32:07] [Rank 0] step:2201/10000 train_time:156468ms step_avg:71.09ms +[2025-09-02 18:32:07] [Rank 0] step:2201/10000 train_time:156468ms step_avg:71.09ms +[2025-09-02 18:32:08] [Rank 0] step:2221/10000 train_time:157786ms step_avg:71.04ms +[2025-09-02 18:32:08] [Rank 0] step:2221/10000 train_time:157786ms step_avg:71.04ms +[2025-09-02 18:32:10] [Rank 0] step:2241/10000 train_time:159248ms step_avg:71.06ms +[2025-09-02 18:32:10] [Rank 0] step:2241/10000 train_time:159248ms step_avg:71.06ms +[2025-09-02 18:32:11] [Rank 0] step:2261/10000 train_time:160719ms step_avg:71.08ms +[2025-09-02 18:32:11] [Rank 0] step:2261/10000 train_time:160719ms step_avg:71.08ms +[2025-09-02 18:32:13] [Rank 0] step:2281/10000 train_time:162192ms step_avg:71.11ms +[2025-09-02 18:32:13] [Rank 0] step:2281/10000 train_time:162192ms step_avg:71.11ms +[2025-09-02 18:32:14] [Rank 0] step:2301/10000 train_time:163665ms step_avg:71.13ms +[2025-09-02 18:32:14] [Rank 0] step:2301/10000 train_time:163665ms step_avg:71.13ms +[2025-09-02 18:32:16] [Rank 0] step:2321/10000 train_time:165139ms step_avg:71.15ms +[2025-09-02 18:32:16] [Rank 0] step:2321/10000 train_time:165139ms step_avg:71.15ms +[2025-09-02 18:32:17] [Rank 0] step:2341/10000 train_time:166613ms step_avg:71.17ms +[2025-09-02 18:32:17] [Rank 0] step:2341/10000 train_time:166613ms step_avg:71.17ms +[2025-09-02 18:32:19] [Rank 0] step:2361/10000 train_time:168087ms step_avg:71.19ms +[2025-09-02 18:32:19] [Rank 0] step:2361/10000 train_time:168087ms step_avg:71.19ms +[2025-09-02 18:32:20] [Rank 0] step:2381/10000 train_time:169562ms step_avg:71.21ms +[2025-09-02 18:32:20] [Rank 0] step:2381/10000 train_time:169562ms step_avg:71.21ms +[2025-09-02 18:32:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:32:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:32:33] [Rank 0] PRINT: step:2400/10000 val_loss:4.5544 svd_entropy: attn_qk:H=0.6720,top10E=0.37,eRank=97.4,q75/q25=55.68 attn_vo:H=0.7531,top10E=0.24,eRank=178.1,q75/q25=98.96 mlp_w1:H=0.6518,top10E=0.44,eRank=104.6,q75/q25=8.94 mlp_w2:H=0.7915,top10E=0.20,eRank=198.6,q75/q25=32.34 vo_prod:H=0.6525,top10E=0.36,eRank=79.9,q75/q25=11192.67 train_time:171184ms step_avg:71.33ms +[2025-09-02 18:32:33] [Rank 0] PRINT: step:2400/10000 val_loss:4.5544 svd_entropy: attn_qk:H=0.6720,top10E=0.37,eRank=97.4,q75/q25=55.68 attn_vo:H=0.7531,top10E=0.24,eRank=178.1,q75/q25=98.96 mlp_w1:H=0.6518,top10E=0.44,eRank=104.6,q75/q25=8.94 mlp_w2:H=0.7915,top10E=0.20,eRank=198.6,q75/q25=32.34 vo_prod:H=0.6525,top10E=0.36,eRank=79.9,q75/q25=11192.67 train_time:171184ms step_avg:71.33ms +[2025-09-02 18:32:33] [Rank 0] step:2401/10000 train_time:171196ms step_avg:71.30ms +[2025-09-02 18:32:33] [Rank 0] step:2401/10000 train_time:171196ms step_avg:71.30ms +[2025-09-02 18:32:35] [Rank 0] step:2421/10000 train_time:172555ms step_avg:71.27ms +[2025-09-02 18:32:35] [Rank 0] step:2421/10000 train_time:172555ms step_avg:71.27ms +[2025-09-02 18:32:36] [Rank 0] step:2441/10000 train_time:174028ms step_avg:71.29ms +[2025-09-02 18:32:36] [Rank 0] step:2441/10000 train_time:174028ms step_avg:71.29ms +[2025-09-02 18:32:38] [Rank 0] step:2461/10000 train_time:175502ms step_avg:71.31ms +[2025-09-02 18:32:38] [Rank 0] step:2461/10000 train_time:175502ms step_avg:71.31ms +[2025-09-02 18:32:39] [Rank 0] step:2481/10000 train_time:176975ms step_avg:71.33ms +[2025-09-02 18:32:39] [Rank 0] step:2481/10000 train_time:176975ms step_avg:71.33ms +[2025-09-02 18:32:41] [Rank 0] step:2501/10000 train_time:178450ms step_avg:71.35ms +[2025-09-02 18:32:41] [Rank 0] step:2501/10000 train_time:178450ms step_avg:71.35ms +[2025-09-02 18:32:42] [Rank 0] step:2521/10000 train_time:179927ms step_avg:71.37ms +[2025-09-02 18:32:42] [Rank 0] step:2521/10000 train_time:179927ms step_avg:71.37ms +[2025-09-02 18:32:44] [Rank 0] step:2541/10000 train_time:181402ms step_avg:71.39ms +[2025-09-02 18:32:44] [Rank 0] step:2541/10000 train_time:181402ms step_avg:71.39ms +[2025-09-02 18:32:45] [Rank 0] step:2561/10000 train_time:182877ms step_avg:71.41ms +[2025-09-02 18:32:45] [Rank 0] step:2561/10000 train_time:182877ms step_avg:71.41ms +[2025-09-02 18:32:47] [Rank 0] step:2581/10000 train_time:184353ms step_avg:71.43ms +[2025-09-02 18:32:47] [Rank 0] step:2581/10000 train_time:184353ms step_avg:71.43ms +[2025-09-02 18:32:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:32:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:33:00] [Rank 0] PRINT: step:2600/10000 val_loss:4.4963 svd_entropy: attn_qk:H=0.6783,top10E=0.36,eRank=100.8,q75/q25=61.42 attn_vo:H=0.7605,top10E=0.23,eRank=185.3,q75/q25=101.41 mlp_w1:H=0.6605,top10E=0.43,eRank=109.4,q75/q25=9.77 mlp_w2:H=0.7973,top10E=0.20,eRank=206.6,q75/q25=34.47 vo_prod:H=0.6608,top10E=0.34,eRank=84.5,q75/q25=12164.30 train_time:185978ms step_avg:71.53ms +[2025-09-02 18:33:00] [Rank 0] PRINT: step:2600/10000 val_loss:4.4963 svd_entropy: attn_qk:H=0.6783,top10E=0.36,eRank=100.8,q75/q25=61.42 attn_vo:H=0.7605,top10E=0.23,eRank=185.3,q75/q25=101.41 mlp_w1:H=0.6605,top10E=0.43,eRank=109.4,q75/q25=9.77 mlp_w2:H=0.7973,top10E=0.20,eRank=206.6,q75/q25=34.47 vo_prod:H=0.6608,top10E=0.34,eRank=84.5,q75/q25=12164.30 train_time:185978ms step_avg:71.53ms +[2025-09-02 18:33:00] [Rank 0] step:2601/10000 train_time:185990ms step_avg:71.51ms +[2025-09-02 18:33:00] [Rank 0] step:2601/10000 train_time:185990ms step_avg:71.51ms +[2025-09-02 18:33:02] [Rank 0] step:2621/10000 train_time:187337ms step_avg:71.48ms +[2025-09-02 18:33:02] [Rank 0] step:2621/10000 train_time:187337ms step_avg:71.48ms +[2025-09-02 18:33:03] [Rank 0] step:2641/10000 train_time:188812ms step_avg:71.49ms +[2025-09-02 18:33:03] [Rank 0] step:2641/10000 train_time:188812ms step_avg:71.49ms +[2025-09-02 18:33:05] [Rank 0] step:2661/10000 train_time:190286ms step_avg:71.51ms +[2025-09-02 18:33:05] [Rank 0] step:2661/10000 train_time:190286ms step_avg:71.51ms +[2025-09-02 18:33:06] [Rank 0] step:2681/10000 train_time:191761ms step_avg:71.53ms +[2025-09-02 18:33:06] [Rank 0] step:2681/10000 train_time:191761ms step_avg:71.53ms +[2025-09-02 18:33:08] [Rank 0] step:2701/10000 train_time:193287ms step_avg:71.56ms +[2025-09-02 18:33:08] [Rank 0] step:2701/10000 train_time:193287ms step_avg:71.56ms +[2025-09-02 18:33:09] [Rank 0] step:2721/10000 train_time:194735ms step_avg:71.57ms +[2025-09-02 18:33:09] [Rank 0] step:2721/10000 train_time:194735ms step_avg:71.57ms +[2025-09-02 18:33:10] [Rank 0] step:2741/10000 train_time:196210ms step_avg:71.58ms +[2025-09-02 18:33:10] [Rank 0] step:2741/10000 train_time:196210ms step_avg:71.58ms +[2025-09-02 18:33:12] [Rank 0] step:2761/10000 train_time:197686ms step_avg:71.60ms +[2025-09-02 18:33:12] [Rank 0] step:2761/10000 train_time:197686ms step_avg:71.60ms +[2025-09-02 18:33:13] [Rank 0] step:2781/10000 train_time:199162ms step_avg:71.62ms +[2025-09-02 18:33:13] [Rank 0] step:2781/10000 train_time:199162ms step_avg:71.62ms +[2025-09-02 18:33:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:33:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:33:27] [Rank 0] PRINT: step:2800/10000 val_loss:4.4578 svd_entropy: attn_qk:H=0.6842,top10E=0.35,eRank=104.0,q75/q25=67.05 attn_vo:H=0.7672,top10E=0.22,eRank=192.2,q75/q25=102.85 mlp_w1:H=0.6687,top10E=0.42,eRank=114.1,q75/q25=10.60 mlp_w2:H=0.8025,top10E=0.19,eRank=214.1,q75/q25=36.20 vo_prod:H=0.6684,top10E=0.33,eRank=88.9,q75/q25=13190.70 train_time:200787ms step_avg:71.71ms +[2025-09-02 18:33:27] [Rank 0] PRINT: step:2800/10000 val_loss:4.4578 svd_entropy: attn_qk:H=0.6842,top10E=0.35,eRank=104.0,q75/q25=67.05 attn_vo:H=0.7672,top10E=0.22,eRank=192.2,q75/q25=102.85 mlp_w1:H=0.6687,top10E=0.42,eRank=114.1,q75/q25=10.60 mlp_w2:H=0.8025,top10E=0.19,eRank=214.1,q75/q25=36.20 vo_prod:H=0.6684,top10E=0.33,eRank=88.9,q75/q25=13190.70 train_time:200787ms step_avg:71.71ms +[2025-09-02 18:33:27] [Rank 0] step:2801/10000 train_time:200799ms step_avg:71.69ms +[2025-09-02 18:33:27] [Rank 0] step:2801/10000 train_time:200799ms step_avg:71.69ms +[2025-09-02 18:33:28] [Rank 0] step:2821/10000 train_time:202128ms step_avg:71.65ms +[2025-09-02 18:33:28] [Rank 0] step:2821/10000 train_time:202128ms step_avg:71.65ms +[2025-09-02 18:33:30] [Rank 0] step:2841/10000 train_time:203602ms step_avg:71.67ms +[2025-09-02 18:33:30] [Rank 0] step:2841/10000 train_time:203602ms step_avg:71.67ms +[2025-09-02 18:33:31] [Rank 0] step:2861/10000 train_time:205076ms step_avg:71.68ms +[2025-09-02 18:33:31] [Rank 0] step:2861/10000 train_time:205076ms step_avg:71.68ms +[2025-09-02 18:33:33] [Rank 0] step:2881/10000 train_time:206549ms step_avg:71.69ms +[2025-09-02 18:33:33] [Rank 0] step:2881/10000 train_time:206549ms step_avg:71.69ms +[2025-09-02 18:33:34] [Rank 0] step:2901/10000 train_time:208024ms step_avg:71.71ms +[2025-09-02 18:33:34] [Rank 0] step:2901/10000 train_time:208024ms step_avg:71.71ms +[2025-09-02 18:33:36] [Rank 0] step:2921/10000 train_time:209501ms step_avg:71.72ms +[2025-09-02 18:33:36] [Rank 0] step:2921/10000 train_time:209501ms step_avg:71.72ms +[2025-09-02 18:33:37] [Rank 0] step:2941/10000 train_time:210975ms step_avg:71.74ms +[2025-09-02 18:33:37] [Rank 0] step:2941/10000 train_time:210975ms step_avg:71.74ms +[2025-09-02 18:33:39] [Rank 0] step:2961/10000 train_time:212450ms step_avg:71.75ms +[2025-09-02 18:33:39] [Rank 0] step:2961/10000 train_time:212450ms step_avg:71.75ms +[2025-09-02 18:33:40] [Rank 0] step:2981/10000 train_time:213929ms step_avg:71.76ms +[2025-09-02 18:33:40] [Rank 0] step:2981/10000 train_time:213929ms step_avg:71.76ms +[2025-09-02 18:33:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:33:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:33:53] [Rank 0] PRINT: step:3000/10000 val_loss:4.4135 svd_entropy: attn_qk:H=0.6893,top10E=0.34,eRank=107.0,q75/q25=71.91 attn_vo:H=0.7732,top10E=0.21,eRank=198.5,q75/q25=103.14 mlp_w1:H=0.6759,top10E=0.41,eRank=118.4,q75/q25=11.39 mlp_w2:H=0.8069,top10E=0.18,eRank=220.6,q75/q25=38.06 vo_prod:H=0.6751,top10E=0.32,eRank=92.9,q75/q25=13223.33 train_time:215560ms step_avg:71.85ms +[2025-09-02 18:33:53] [Rank 0] PRINT: step:3000/10000 val_loss:4.4135 svd_entropy: attn_qk:H=0.6893,top10E=0.34,eRank=107.0,q75/q25=71.91 attn_vo:H=0.7732,top10E=0.21,eRank=198.5,q75/q25=103.14 mlp_w1:H=0.6759,top10E=0.41,eRank=118.4,q75/q25=11.39 mlp_w2:H=0.8069,top10E=0.18,eRank=220.6,q75/q25=38.06 vo_prod:H=0.6751,top10E=0.32,eRank=92.9,q75/q25=13223.33 train_time:215560ms step_avg:71.85ms +[2025-09-02 18:33:53] [Rank 0] step:3001/10000 train_time:215572ms step_avg:71.83ms +[2025-09-02 18:33:53] [Rank 0] step:3001/10000 train_time:215572ms step_avg:71.83ms +[2025-09-02 18:33:55] [Rank 0] step:3021/10000 train_time:216933ms step_avg:71.81ms +[2025-09-02 18:33:55] [Rank 0] step:3021/10000 train_time:216933ms step_avg:71.81ms +[2025-09-02 18:33:56] [Rank 0] step:3041/10000 train_time:218412ms step_avg:71.82ms +[2025-09-02 18:33:56] [Rank 0] step:3041/10000 train_time:218412ms step_avg:71.82ms +[2025-09-02 18:33:58] [Rank 0] step:3061/10000 train_time:219893ms step_avg:71.84ms +[2025-09-02 18:33:58] [Rank 0] step:3061/10000 train_time:219893ms step_avg:71.84ms +[2025-09-02 18:33:59] [Rank 0] step:3081/10000 train_time:221373ms step_avg:71.85ms +[2025-09-02 18:33:59] [Rank 0] step:3081/10000 train_time:221373ms step_avg:71.85ms +[2025-09-02 18:34:01] [Rank 0] step:3101/10000 train_time:222855ms step_avg:71.87ms +[2025-09-02 18:34:01] [Rank 0] step:3101/10000 train_time:222855ms step_avg:71.87ms +[2025-09-02 18:34:02] [Rank 0] step:3121/10000 train_time:224339ms step_avg:71.88ms +[2025-09-02 18:34:02] [Rank 0] step:3121/10000 train_time:224339ms step_avg:71.88ms +[2025-09-02 18:34:04] [Rank 0] step:3141/10000 train_time:225822ms step_avg:71.89ms +[2025-09-02 18:34:04] [Rank 0] step:3141/10000 train_time:225822ms step_avg:71.89ms +[2025-09-02 18:34:05] [Rank 0] step:3161/10000 train_time:227305ms step_avg:71.91ms +[2025-09-02 18:34:05] [Rank 0] step:3161/10000 train_time:227305ms step_avg:71.91ms +[2025-09-02 18:34:07] [Rank 0] step:3181/10000 train_time:228789ms step_avg:71.92ms +[2025-09-02 18:34:07] [Rank 0] step:3181/10000 train_time:228789ms step_avg:71.92ms +[2025-09-02 18:34:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:34:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:34:20] [Rank 0] PRINT: step:3200/10000 val_loss:4.3798 svd_entropy: attn_qk:H=0.6941,top10E=0.34,eRank=109.9,q75/q25=76.60 attn_vo:H=0.7786,top10E=0.21,eRank=204.5,q75/q25=102.49 mlp_w1:H=0.6827,top10E=0.40,eRank=122.6,q75/q25=12.27 mlp_w2:H=0.8110,top10E=0.18,eRank=226.8,q75/q25=39.60 vo_prod:H=0.6811,top10E=0.32,eRank=96.7,q75/q25=13668.01 train_time:230420ms step_avg:72.01ms +[2025-09-02 18:34:20] [Rank 0] PRINT: step:3200/10000 val_loss:4.3798 svd_entropy: attn_qk:H=0.6941,top10E=0.34,eRank=109.9,q75/q25=76.60 attn_vo:H=0.7786,top10E=0.21,eRank=204.5,q75/q25=102.49 mlp_w1:H=0.6827,top10E=0.40,eRank=122.6,q75/q25=12.27 mlp_w2:H=0.8110,top10E=0.18,eRank=226.8,q75/q25=39.60 vo_prod:H=0.6811,top10E=0.32,eRank=96.7,q75/q25=13668.01 train_time:230420ms step_avg:72.01ms +[2025-09-02 18:34:20] [Rank 0] step:3201/10000 train_time:230433ms step_avg:71.99ms +[2025-09-02 18:34:20] [Rank 0] step:3201/10000 train_time:230433ms step_avg:71.99ms +[2025-09-02 18:34:22] [Rank 0] step:3221/10000 train_time:231791ms step_avg:71.96ms +[2025-09-02 18:34:22] [Rank 0] step:3221/10000 train_time:231791ms step_avg:71.96ms +[2025-09-02 18:34:23] [Rank 0] step:3241/10000 train_time:233272ms step_avg:71.98ms +[2025-09-02 18:34:23] [Rank 0] step:3241/10000 train_time:233272ms step_avg:71.98ms +[2025-09-02 18:34:25] [Rank 0] step:3261/10000 train_time:234753ms step_avg:71.99ms +[2025-09-02 18:34:25] [Rank 0] step:3261/10000 train_time:234753ms step_avg:71.99ms +[2025-09-02 18:34:26] [Rank 0] step:3281/10000 train_time:236236ms step_avg:72.00ms +[2025-09-02 18:34:26] [Rank 0] step:3281/10000 train_time:236236ms step_avg:72.00ms +[2025-09-02 18:34:27] [Rank 0] step:3301/10000 train_time:237719ms step_avg:72.01ms +[2025-09-02 18:34:27] [Rank 0] step:3301/10000 train_time:237719ms step_avg:72.01ms +[2025-09-02 18:34:29] [Rank 0] step:3321/10000 train_time:239201ms step_avg:72.03ms +[2025-09-02 18:34:29] [Rank 0] step:3321/10000 train_time:239201ms step_avg:72.03ms +[2025-09-02 18:34:30] [Rank 0] step:3341/10000 train_time:240685ms step_avg:72.04ms +[2025-09-02 18:34:30] [Rank 0] step:3341/10000 train_time:240685ms step_avg:72.04ms +[2025-09-02 18:34:32] [Rank 0] step:3361/10000 train_time:242167ms step_avg:72.05ms +[2025-09-02 18:34:32] [Rank 0] step:3361/10000 train_time:242167ms step_avg:72.05ms +[2025-09-02 18:34:33] [Rank 0] step:3381/10000 train_time:243651ms step_avg:72.06ms +[2025-09-02 18:34:33] [Rank 0] step:3381/10000 train_time:243651ms step_avg:72.06ms +[2025-09-02 18:34:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:34:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:34:47] [Rank 0] PRINT: step:3400/10000 val_loss:4.3370 svd_entropy: attn_qk:H=0.6987,top10E=0.33,eRank=112.8,q75/q25=80.97 attn_vo:H=0.7837,top10E=0.20,eRank=210.3,q75/q25=101.20 mlp_w1:H=0.6889,top10E=0.39,eRank=126.7,q75/q25=13.07 mlp_w2:H=0.8147,top10E=0.17,eRank=232.6,q75/q25=41.10 vo_prod:H=0.6869,top10E=0.31,eRank=100.6,q75/q25=13590.99 train_time:245284ms step_avg:72.14ms +[2025-09-02 18:34:47] [Rank 0] PRINT: step:3400/10000 val_loss:4.3370 svd_entropy: attn_qk:H=0.6987,top10E=0.33,eRank=112.8,q75/q25=80.97 attn_vo:H=0.7837,top10E=0.20,eRank=210.3,q75/q25=101.20 mlp_w1:H=0.6889,top10E=0.39,eRank=126.7,q75/q25=13.07 mlp_w2:H=0.8147,top10E=0.17,eRank=232.6,q75/q25=41.10 vo_prod:H=0.6869,top10E=0.31,eRank=100.6,q75/q25=13590.99 train_time:245284ms step_avg:72.14ms +[2025-09-02 18:34:47] [Rank 0] step:3401/10000 train_time:245296ms step_avg:72.12ms +[2025-09-02 18:34:47] [Rank 0] step:3401/10000 train_time:245296ms step_avg:72.12ms +[2025-09-02 18:34:48] [Rank 0] step:3421/10000 train_time:246653ms step_avg:72.10ms +[2025-09-02 18:34:48] [Rank 0] step:3421/10000 train_time:246653ms step_avg:72.10ms +[2025-09-02 18:34:50] [Rank 0] step:3441/10000 train_time:248136ms step_avg:72.11ms +[2025-09-02 18:34:50] [Rank 0] step:3441/10000 train_time:248136ms step_avg:72.11ms +[2025-09-02 18:34:51] [Rank 0] step:3461/10000 train_time:249619ms step_avg:72.12ms +[2025-09-02 18:34:51] [Rank 0] step:3461/10000 train_time:249619ms step_avg:72.12ms +[2025-09-02 18:34:53] [Rank 0] step:3481/10000 train_time:251105ms step_avg:72.14ms +[2025-09-02 18:34:53] [Rank 0] step:3481/10000 train_time:251105ms step_avg:72.14ms +[2025-09-02 18:34:54] [Rank 0] step:3501/10000 train_time:252590ms step_avg:72.15ms +[2025-09-02 18:34:54] [Rank 0] step:3501/10000 train_time:252590ms step_avg:72.15ms +[2025-09-02 18:34:56] [Rank 0] step:3521/10000 train_time:254075ms step_avg:72.16ms +[2025-09-02 18:34:56] [Rank 0] step:3521/10000 train_time:254075ms step_avg:72.16ms +[2025-09-02 18:34:57] [Rank 0] step:3541/10000 train_time:255560ms step_avg:72.17ms +[2025-09-02 18:34:57] [Rank 0] step:3541/10000 train_time:255560ms step_avg:72.17ms +[2025-09-02 18:34:59] [Rank 0] step:3561/10000 train_time:257045ms step_avg:72.18ms +[2025-09-02 18:34:59] [Rank 0] step:3561/10000 train_time:257045ms step_avg:72.18ms +[2025-09-02 18:35:00] [Rank 0] step:3581/10000 train_time:258528ms step_avg:72.19ms +[2025-09-02 18:35:00] [Rank 0] step:3581/10000 train_time:258528ms step_avg:72.19ms +[2025-09-02 18:35:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:35:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:35:13] [Rank 0] PRINT: step:3600/10000 val_loss:4.3287 svd_entropy: attn_qk:H=0.7028,top10E=0.32,eRank=115.5,q75/q25=84.65 attn_vo:H=0.7883,top10E=0.20,eRank=215.7,q75/q25=100.26 mlp_w1:H=0.6947,top10E=0.38,eRank=130.6,q75/q25=13.87 mlp_w2:H=0.8178,top10E=0.17,eRank=237.7,q75/q25=42.52 vo_prod:H=0.6921,top10E=0.30,eRank=104.1,q75/q25=13402.53 train_time:260163ms step_avg:72.27ms +[2025-09-02 18:35:13] [Rank 0] PRINT: step:3600/10000 val_loss:4.3287 svd_entropy: attn_qk:H=0.7028,top10E=0.32,eRank=115.5,q75/q25=84.65 attn_vo:H=0.7883,top10E=0.20,eRank=215.7,q75/q25=100.26 mlp_w1:H=0.6947,top10E=0.38,eRank=130.6,q75/q25=13.87 mlp_w2:H=0.8178,top10E=0.17,eRank=237.7,q75/q25=42.52 vo_prod:H=0.6921,top10E=0.30,eRank=104.1,q75/q25=13402.53 train_time:260163ms step_avg:72.27ms +[2025-09-02 18:35:14] [Rank 0] step:3601/10000 train_time:260175ms step_avg:72.25ms +[2025-09-02 18:35:14] [Rank 0] step:3601/10000 train_time:260175ms step_avg:72.25ms +[2025-09-02 18:35:15] [Rank 0] step:3621/10000 train_time:261536ms step_avg:72.23ms +[2025-09-02 18:35:15] [Rank 0] step:3621/10000 train_time:261536ms step_avg:72.23ms +[2025-09-02 18:35:17] [Rank 0] step:3641/10000 train_time:263016ms step_avg:72.24ms +[2025-09-02 18:35:17] [Rank 0] step:3641/10000 train_time:263016ms step_avg:72.24ms +[2025-09-02 18:35:18] [Rank 0] step:3661/10000 train_time:264497ms step_avg:72.25ms +[2025-09-02 18:35:18] [Rank 0] step:3661/10000 train_time:264497ms step_avg:72.25ms +[2025-09-02 18:35:19] [Rank 0] step:3681/10000 train_time:265980ms step_avg:72.26ms +[2025-09-02 18:35:19] [Rank 0] step:3681/10000 train_time:265980ms step_avg:72.26ms +[2025-09-02 18:35:21] [Rank 0] step:3701/10000 train_time:267462ms step_avg:72.27ms +[2025-09-02 18:35:21] [Rank 0] step:3701/10000 train_time:267462ms step_avg:72.27ms +[2025-09-02 18:35:22] [Rank 0] step:3721/10000 train_time:268969ms step_avg:72.28ms +[2025-09-02 18:35:22] [Rank 0] step:3721/10000 train_time:268969ms step_avg:72.28ms +[2025-09-02 18:35:24] [Rank 0] step:3741/10000 train_time:270487ms step_avg:72.30ms +[2025-09-02 18:35:24] [Rank 0] step:3741/10000 train_time:270487ms step_avg:72.30ms +[2025-09-02 18:35:26] [Rank 0] step:3761/10000 train_time:272008ms step_avg:72.32ms +[2025-09-02 18:35:26] [Rank 0] step:3761/10000 train_time:272008ms step_avg:72.32ms +[2025-09-02 18:35:27] [Rank 0] step:3781/10000 train_time:273528ms step_avg:72.34ms +[2025-09-02 18:35:27] [Rank 0] step:3781/10000 train_time:273528ms step_avg:72.34ms +[2025-09-02 18:35:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:35:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:35:40] [Rank 0] PRINT: step:3800/10000 val_loss:4.2713 svd_entropy: attn_qk:H=0.7065,top10E=0.32,eRank=117.9,q75/q25=88.70 attn_vo:H=0.7925,top10E=0.19,eRank=220.8,q75/q25=98.33 mlp_w1:H=0.7002,top10E=0.38,eRank=134.4,q75/q25=14.64 mlp_w2:H=0.8208,top10E=0.17,eRank=242.7,q75/q25=42.93 vo_prod:H=0.6967,top10E=0.29,eRank=107.5,q75/q25=12782.64 train_time:275201ms step_avg:72.42ms +[2025-09-02 18:35:40] [Rank 0] PRINT: step:3800/10000 val_loss:4.2713 svd_entropy: attn_qk:H=0.7065,top10E=0.32,eRank=117.9,q75/q25=88.70 attn_vo:H=0.7925,top10E=0.19,eRank=220.8,q75/q25=98.33 mlp_w1:H=0.7002,top10E=0.38,eRank=134.4,q75/q25=14.64 mlp_w2:H=0.8208,top10E=0.17,eRank=242.7,q75/q25=42.93 vo_prod:H=0.6967,top10E=0.29,eRank=107.5,q75/q25=12782.64 train_time:275201ms step_avg:72.42ms +[2025-09-02 18:35:40] [Rank 0] step:3801/10000 train_time:275213ms step_avg:72.41ms +[2025-09-02 18:35:40] [Rank 0] step:3801/10000 train_time:275213ms step_avg:72.41ms +[2025-09-02 18:35:42] [Rank 0] step:3821/10000 train_time:276604ms step_avg:72.39ms +[2025-09-02 18:35:42] [Rank 0] step:3821/10000 train_time:276604ms step_avg:72.39ms +[2025-09-02 18:35:43] [Rank 0] step:3841/10000 train_time:278124ms step_avg:72.41ms +[2025-09-02 18:35:43] [Rank 0] step:3841/10000 train_time:278124ms step_avg:72.41ms +[2025-09-02 18:35:45] [Rank 0] step:3861/10000 train_time:279644ms step_avg:72.43ms +[2025-09-02 18:35:45] [Rank 0] step:3861/10000 train_time:279644ms step_avg:72.43ms +[2025-09-02 18:35:46] [Rank 0] step:3881/10000 train_time:281162ms step_avg:72.45ms +[2025-09-02 18:35:46] [Rank 0] step:3881/10000 train_time:281162ms step_avg:72.45ms +[2025-09-02 18:35:48] [Rank 0] step:3901/10000 train_time:282680ms step_avg:72.46ms +[2025-09-02 18:35:48] [Rank 0] step:3901/10000 train_time:282680ms step_avg:72.46ms +[2025-09-02 18:35:49] [Rank 0] step:3921/10000 train_time:284198ms step_avg:72.48ms +[2025-09-02 18:35:49] [Rank 0] step:3921/10000 train_time:284198ms step_avg:72.48ms +[2025-09-02 18:35:51] [Rank 0] step:3941/10000 train_time:285717ms step_avg:72.50ms +[2025-09-02 18:35:51] [Rank 0] step:3941/10000 train_time:285717ms step_avg:72.50ms +[2025-09-02 18:35:53] [Rank 0] step:3961/10000 train_time:287234ms step_avg:72.52ms +[2025-09-02 18:35:53] [Rank 0] step:3961/10000 train_time:287234ms step_avg:72.52ms +[2025-09-02 18:35:54] [Rank 0] step:3981/10000 train_time:288752ms step_avg:72.53ms +[2025-09-02 18:35:54] [Rank 0] step:3981/10000 train_time:288752ms step_avg:72.53ms +[2025-09-02 18:35:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:35:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:36:07] [Rank 0] PRINT: step:4000/10000 val_loss:4.2444 svd_entropy: attn_qk:H=0.7101,top10E=0.31,eRank=120.4,q75/q25=91.98 attn_vo:H=0.7963,top10E=0.19,eRank=225.5,q75/q25=96.17 mlp_w1:H=0.7053,top10E=0.37,eRank=138.1,q75/q25=15.42 mlp_w2:H=0.8235,top10E=0.16,eRank=247.1,q75/q25=44.07 vo_prod:H=0.7012,top10E=0.29,eRank=110.7,q75/q25=12190.34 train_time:290421ms step_avg:72.61ms +[2025-09-02 18:36:07] [Rank 0] PRINT: step:4000/10000 val_loss:4.2444 svd_entropy: attn_qk:H=0.7101,top10E=0.31,eRank=120.4,q75/q25=91.98 attn_vo:H=0.7963,top10E=0.19,eRank=225.5,q75/q25=96.17 mlp_w1:H=0.7053,top10E=0.37,eRank=138.1,q75/q25=15.42 mlp_w2:H=0.8235,top10E=0.16,eRank=247.1,q75/q25=44.07 vo_prod:H=0.7012,top10E=0.29,eRank=110.7,q75/q25=12190.34 train_time:290421ms step_avg:72.61ms +[2025-09-02 18:36:07] [Rank 0] step:4001/10000 train_time:290434ms step_avg:72.59ms +[2025-09-02 18:36:07] [Rank 0] step:4001/10000 train_time:290434ms step_avg:72.59ms +[2025-09-02 18:36:09] [Rank 0] step:4021/10000 train_time:291801ms step_avg:72.57ms +[2025-09-02 18:36:09] [Rank 0] step:4021/10000 train_time:291801ms step_avg:72.57ms +[2025-09-02 18:36:10] [Rank 0] step:4041/10000 train_time:293319ms step_avg:72.59ms +[2025-09-02 18:36:10] [Rank 0] step:4041/10000 train_time:293319ms step_avg:72.59ms +[2025-09-02 18:36:12] [Rank 0] step:4061/10000 train_time:294834ms step_avg:72.60ms +[2025-09-02 18:36:12] [Rank 0] step:4061/10000 train_time:294834ms step_avg:72.60ms +[2025-09-02 18:36:13] [Rank 0] step:4081/10000 train_time:296350ms step_avg:72.62ms +[2025-09-02 18:36:13] [Rank 0] step:4081/10000 train_time:296350ms step_avg:72.62ms +[2025-09-02 18:36:15] [Rank 0] step:4101/10000 train_time:297865ms step_avg:72.63ms +[2025-09-02 18:36:15] [Rank 0] step:4101/10000 train_time:297865ms step_avg:72.63ms +[2025-09-02 18:36:16] [Rank 0] step:4121/10000 train_time:299382ms step_avg:72.65ms +[2025-09-02 18:36:16] [Rank 0] step:4121/10000 train_time:299382ms step_avg:72.65ms +[2025-09-02 18:36:18] [Rank 0] step:4141/10000 train_time:300901ms step_avg:72.66ms +[2025-09-02 18:36:18] [Rank 0] step:4141/10000 train_time:300901ms step_avg:72.66ms +[2025-09-02 18:36:19] [Rank 0] step:4161/10000 train_time:302420ms step_avg:72.68ms +[2025-09-02 18:36:19] [Rank 0] step:4161/10000 train_time:302420ms step_avg:72.68ms +[2025-09-02 18:36:21] [Rank 0] step:4181/10000 train_time:303941ms step_avg:72.70ms +[2025-09-02 18:36:21] [Rank 0] step:4181/10000 train_time:303941ms step_avg:72.70ms +[2025-09-02 18:36:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:36:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:36:34] [Rank 0] PRINT: step:4200/10000 val_loss:4.2268 svd_entropy: attn_qk:H=0.7137,top10E=0.31,eRank=122.8,q75/q25=94.72 attn_vo:H=0.7999,top10E=0.18,eRank=230.2,q75/q25=94.15 mlp_w1:H=0.7098,top10E=0.36,eRank=141.5,q75/q25=16.18 mlp_w2:H=0.8259,top10E=0.16,eRank=251.4,q75/q25=44.73 vo_prod:H=0.7053,top10E=0.28,eRank=113.9,q75/q25=11781.61 train_time:305612ms step_avg:72.76ms +[2025-09-02 18:36:34] [Rank 0] PRINT: step:4200/10000 val_loss:4.2268 svd_entropy: attn_qk:H=0.7137,top10E=0.31,eRank=122.8,q75/q25=94.72 attn_vo:H=0.7999,top10E=0.18,eRank=230.2,q75/q25=94.15 mlp_w1:H=0.7098,top10E=0.36,eRank=141.5,q75/q25=16.18 mlp_w2:H=0.8259,top10E=0.16,eRank=251.4,q75/q25=44.73 vo_prod:H=0.7053,top10E=0.28,eRank=113.9,q75/q25=11781.61 train_time:305612ms step_avg:72.76ms +[2025-09-02 18:36:34] [Rank 0] step:4201/10000 train_time:305624ms step_avg:72.75ms +[2025-09-02 18:36:34] [Rank 0] step:4201/10000 train_time:305624ms step_avg:72.75ms +[2025-09-02 18:36:36] [Rank 0] step:4221/10000 train_time:307021ms step_avg:72.74ms +[2025-09-02 18:36:36] [Rank 0] step:4221/10000 train_time:307021ms step_avg:72.74ms +[2025-09-02 18:36:37] [Rank 0] step:4241/10000 train_time:308539ms step_avg:72.75ms +[2025-09-02 18:36:37] [Rank 0] step:4241/10000 train_time:308539ms step_avg:72.75ms +[2025-09-02 18:36:39] [Rank 0] step:4261/10000 train_time:310057ms step_avg:72.77ms +[2025-09-02 18:36:39] [Rank 0] step:4261/10000 train_time:310057ms step_avg:72.77ms +[2025-09-02 18:36:40] [Rank 0] step:4281/10000 train_time:311575ms step_avg:72.78ms +[2025-09-02 18:36:40] [Rank 0] step:4281/10000 train_time:311575ms step_avg:72.78ms +[2025-09-02 18:36:42] [Rank 0] step:4301/10000 train_time:313092ms step_avg:72.80ms +[2025-09-02 18:36:42] [Rank 0] step:4301/10000 train_time:313092ms step_avg:72.80ms +[2025-09-02 18:36:43] [Rank 0] step:4321/10000 train_time:314611ms step_avg:72.81ms +[2025-09-02 18:36:43] [Rank 0] step:4321/10000 train_time:314611ms step_avg:72.81ms +[2025-09-02 18:36:45] [Rank 0] step:4341/10000 train_time:316127ms step_avg:72.82ms +[2025-09-02 18:36:45] [Rank 0] step:4341/10000 train_time:316127ms step_avg:72.82ms +[2025-09-02 18:36:46] [Rank 0] step:4361/10000 train_time:317649ms step_avg:72.84ms +[2025-09-02 18:36:46] [Rank 0] step:4361/10000 train_time:317649ms step_avg:72.84ms +[2025-09-02 18:36:48] [Rank 0] step:4381/10000 train_time:319167ms step_avg:72.85ms +[2025-09-02 18:36:48] [Rank 0] step:4381/10000 train_time:319167ms step_avg:72.85ms +[2025-09-02 18:36:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:36:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:37:01] [Rank 0] PRINT: step:4400/10000 val_loss:4.2024 svd_entropy: attn_qk:H=0.7170,top10E=0.30,eRank=125.3,q75/q25=96.88 attn_vo:H=0.8033,top10E=0.18,eRank=234.6,q75/q25=91.12 mlp_w1:H=0.7142,top10E=0.36,eRank=144.9,q75/q25=16.95 mlp_w2:H=0.8281,top10E=0.16,eRank=255.3,q75/q25=45.81 vo_prod:H=0.7092,top10E=0.28,eRank=116.9,q75/q25=10941.53 train_time:320839ms step_avg:72.92ms +[2025-09-02 18:37:01] [Rank 0] PRINT: step:4400/10000 val_loss:4.2024 svd_entropy: attn_qk:H=0.7170,top10E=0.30,eRank=125.3,q75/q25=96.88 attn_vo:H=0.8033,top10E=0.18,eRank=234.6,q75/q25=91.12 mlp_w1:H=0.7142,top10E=0.36,eRank=144.9,q75/q25=16.95 mlp_w2:H=0.8281,top10E=0.16,eRank=255.3,q75/q25=45.81 vo_prod:H=0.7092,top10E=0.28,eRank=116.9,q75/q25=10941.53 train_time:320839ms step_avg:72.92ms +[2025-09-02 18:37:01] [Rank 0] step:4401/10000 train_time:320852ms step_avg:72.90ms +[2025-09-02 18:37:01] [Rank 0] step:4401/10000 train_time:320852ms step_avg:72.90ms +[2025-09-02 18:37:03] [Rank 0] step:4421/10000 train_time:322219ms step_avg:72.88ms +[2025-09-02 18:37:03] [Rank 0] step:4421/10000 train_time:322219ms step_avg:72.88ms +[2025-09-02 18:37:04] [Rank 0] step:4441/10000 train_time:323736ms step_avg:72.90ms +[2025-09-02 18:37:04] [Rank 0] step:4441/10000 train_time:323736ms step_avg:72.90ms +[2025-09-02 18:37:06] [Rank 0] step:4461/10000 train_time:325260ms step_avg:72.91ms +[2025-09-02 18:37:06] [Rank 0] step:4461/10000 train_time:325260ms step_avg:72.91ms +[2025-09-02 18:37:07] [Rank 0] step:4481/10000 train_time:326787ms step_avg:72.93ms +[2025-09-02 18:37:07] [Rank 0] step:4481/10000 train_time:326787ms step_avg:72.93ms +[2025-09-02 18:37:09] [Rank 0] step:4501/10000 train_time:328314ms step_avg:72.94ms +[2025-09-02 18:37:09] [Rank 0] step:4501/10000 train_time:328314ms step_avg:72.94ms +[2025-09-02 18:37:10] [Rank 0] step:4521/10000 train_time:329840ms step_avg:72.96ms +[2025-09-02 18:37:10] [Rank 0] step:4521/10000 train_time:329840ms step_avg:72.96ms +[2025-09-02 18:37:12] [Rank 0] step:4541/10000 train_time:331367ms step_avg:72.97ms +[2025-09-02 18:37:12] [Rank 0] step:4541/10000 train_time:331367ms step_avg:72.97ms +[2025-09-02 18:37:13] [Rank 0] step:4561/10000 train_time:332897ms step_avg:72.99ms +[2025-09-02 18:37:13] [Rank 0] step:4561/10000 train_time:332897ms step_avg:72.99ms +[2025-09-02 18:37:15] [Rank 0] step:4581/10000 train_time:334426ms step_avg:73.00ms +[2025-09-02 18:37:15] [Rank 0] step:4581/10000 train_time:334426ms step_avg:73.00ms +[2025-09-02 18:37:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:37:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:37:28] [Rank 0] PRINT: step:4600/10000 val_loss:4.1747 svd_entropy: attn_qk:H=0.7202,top10E=0.30,eRank=127.6,q75/q25=99.38 attn_vo:H=0.8065,top10E=0.18,eRank=238.9,q75/q25=89.42 mlp_w1:H=0.7183,top10E=0.35,eRank=148.2,q75/q25=17.71 mlp_w2:H=0.8302,top10E=0.15,eRank=259.1,q75/q25=46.59 vo_prod:H=0.7130,top10E=0.27,eRank=119.9,q75/q25=10507.24 train_time:336108ms step_avg:73.07ms +[2025-09-02 18:37:28] [Rank 0] PRINT: step:4600/10000 val_loss:4.1747 svd_entropy: attn_qk:H=0.7202,top10E=0.30,eRank=127.6,q75/q25=99.38 attn_vo:H=0.8065,top10E=0.18,eRank=238.9,q75/q25=89.42 mlp_w1:H=0.7183,top10E=0.35,eRank=148.2,q75/q25=17.71 mlp_w2:H=0.8302,top10E=0.15,eRank=259.1,q75/q25=46.59 vo_prod:H=0.7130,top10E=0.27,eRank=119.9,q75/q25=10507.24 train_time:336108ms step_avg:73.07ms +[2025-09-02 18:37:28] [Rank 0] step:4601/10000 train_time:336120ms step_avg:73.05ms +[2025-09-02 18:37:28] [Rank 0] step:4601/10000 train_time:336120ms step_avg:73.05ms +[2025-09-02 18:37:30] [Rank 0] step:4621/10000 train_time:337497ms step_avg:73.04ms +[2025-09-02 18:37:30] [Rank 0] step:4621/10000 train_time:337497ms step_avg:73.04ms +[2025-09-02 18:37:31] [Rank 0] step:4641/10000 train_time:339021ms step_avg:73.05ms +[2025-09-02 18:37:31] [Rank 0] step:4641/10000 train_time:339021ms step_avg:73.05ms +[2025-09-02 18:37:33] [Rank 0] step:4661/10000 train_time:340548ms step_avg:73.06ms +[2025-09-02 18:37:33] [Rank 0] step:4661/10000 train_time:340548ms step_avg:73.06ms +[2025-09-02 18:37:34] [Rank 0] step:4681/10000 train_time:342073ms step_avg:73.08ms +[2025-09-02 18:37:34] [Rank 0] step:4681/10000 train_time:342073ms step_avg:73.08ms +[2025-09-02 18:37:36] [Rank 0] step:4701/10000 train_time:343599ms step_avg:73.09ms +[2025-09-02 18:37:36] [Rank 0] step:4701/10000 train_time:343599ms step_avg:73.09ms +[2025-09-02 18:37:37] [Rank 0] step:4721/10000 train_time:345124ms step_avg:73.10ms +[2025-09-02 18:37:37] [Rank 0] step:4721/10000 train_time:345124ms step_avg:73.10ms +[2025-09-02 18:37:39] [Rank 0] step:4741/10000 train_time:346650ms step_avg:73.12ms +[2025-09-02 18:37:39] [Rank 0] step:4741/10000 train_time:346650ms step_avg:73.12ms +[2025-09-02 18:37:40] [Rank 0] step:4761/10000 train_time:348177ms step_avg:73.13ms +[2025-09-02 18:37:40] [Rank 0] step:4761/10000 train_time:348177ms step_avg:73.13ms +[2025-09-02 18:37:42] [Rank 0] step:4781/10000 train_time:349701ms step_avg:73.14ms +[2025-09-02 18:37:42] [Rank 0] step:4781/10000 train_time:349701ms step_avg:73.14ms +[2025-09-02 18:37:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:37:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:37:55] [Rank 0] PRINT: step:4800/10000 val_loss:4.1599 svd_entropy: attn_qk:H=0.7232,top10E=0.29,eRank=129.8,q75/q25=101.21 attn_vo:H=0.8095,top10E=0.17,eRank=243.1,q75/q25=87.03 mlp_w1:H=0.7220,top10E=0.35,eRank=151.3,q75/q25=18.49 mlp_w2:H=0.8320,top10E=0.15,eRank=262.5,q75/q25=47.74 vo_prod:H=0.7164,top10E=0.27,eRank=122.7,q75/q25=9668.81 train_time:351382ms step_avg:73.20ms +[2025-09-02 18:37:55] [Rank 0] PRINT: step:4800/10000 val_loss:4.1599 svd_entropy: attn_qk:H=0.7232,top10E=0.29,eRank=129.8,q75/q25=101.21 attn_vo:H=0.8095,top10E=0.17,eRank=243.1,q75/q25=87.03 mlp_w1:H=0.7220,top10E=0.35,eRank=151.3,q75/q25=18.49 mlp_w2:H=0.8320,top10E=0.15,eRank=262.5,q75/q25=47.74 vo_prod:H=0.7164,top10E=0.27,eRank=122.7,q75/q25=9668.81 train_time:351382ms step_avg:73.20ms +[2025-09-02 18:37:55] [Rank 0] step:4801/10000 train_time:351395ms step_avg:73.19ms +[2025-09-02 18:37:55] [Rank 0] step:4801/10000 train_time:351395ms step_avg:73.19ms +[2025-09-02 18:37:57] [Rank 0] step:4821/10000 train_time:352776ms step_avg:73.17ms +[2025-09-02 18:37:57] [Rank 0] step:4821/10000 train_time:352776ms step_avg:73.17ms +[2025-09-02 18:37:59] [Rank 0] step:4841/10000 train_time:354298ms step_avg:73.19ms +[2025-09-02 18:37:59] [Rank 0] step:4841/10000 train_time:354298ms step_avg:73.19ms +[2025-09-02 18:38:00] [Rank 0] step:4861/10000 train_time:355824ms step_avg:73.20ms +[2025-09-02 18:38:00] [Rank 0] step:4861/10000 train_time:355824ms step_avg:73.20ms +[2025-09-02 18:38:02] [Rank 0] step:4881/10000 train_time:357348ms step_avg:73.21ms +[2025-09-02 18:38:02] [Rank 0] step:4881/10000 train_time:357348ms step_avg:73.21ms +[2025-09-02 18:38:03] [Rank 0] step:4901/10000 train_time:358869ms step_avg:73.22ms +[2025-09-02 18:38:03] [Rank 0] step:4901/10000 train_time:358869ms step_avg:73.22ms +[2025-09-02 18:38:05] [Rank 0] step:4921/10000 train_time:360397ms step_avg:73.24ms +[2025-09-02 18:38:05] [Rank 0] step:4921/10000 train_time:360397ms step_avg:73.24ms +[2025-09-02 18:38:06] [Rank 0] step:4941/10000 train_time:361925ms step_avg:73.25ms +[2025-09-02 18:38:06] [Rank 0] step:4941/10000 train_time:361925ms step_avg:73.25ms +[2025-09-02 18:38:08] [Rank 0] step:4961/10000 train_time:363449ms step_avg:73.26ms +[2025-09-02 18:38:08] [Rank 0] step:4961/10000 train_time:363449ms step_avg:73.26ms +[2025-09-02 18:38:09] [Rank 0] step:4981/10000 train_time:364976ms step_avg:73.27ms +[2025-09-02 18:38:09] [Rank 0] step:4981/10000 train_time:364976ms step_avg:73.27ms +[2025-09-02 18:38:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:38:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:38:23] [Rank 0] PRINT: step:5000/10000 val_loss:4.1392 svd_entropy: attn_qk:H=0.7261,top10E=0.29,eRank=132.0,q75/q25=103.32 attn_vo:H=0.8123,top10E=0.17,eRank=246.9,q75/q25=84.24 mlp_w1:H=0.7254,top10E=0.34,eRank=154.2,q75/q25=19.20 mlp_w2:H=0.8337,top10E=0.15,eRank=265.8,q75/q25=48.74 vo_prod:H=0.7196,top10E=0.27,eRank=125.4,q75/q25=8967.19 train_time:366653ms step_avg:73.33ms +[2025-09-02 18:38:23] [Rank 0] PRINT: step:5000/10000 val_loss:4.1392 svd_entropy: attn_qk:H=0.7261,top10E=0.29,eRank=132.0,q75/q25=103.32 attn_vo:H=0.8123,top10E=0.17,eRank=246.9,q75/q25=84.24 mlp_w1:H=0.7254,top10E=0.34,eRank=154.2,q75/q25=19.20 mlp_w2:H=0.8337,top10E=0.15,eRank=265.8,q75/q25=48.74 vo_prod:H=0.7196,top10E=0.27,eRank=125.4,q75/q25=8967.19 train_time:366653ms step_avg:73.33ms +[2025-09-02 18:38:23] [Rank 0] step:5001/10000 train_time:366667ms step_avg:73.32ms +[2025-09-02 18:38:23] [Rank 0] step:5001/10000 train_time:366667ms step_avg:73.32ms +[2025-09-02 18:38:24] [Rank 0] step:5021/10000 train_time:368052ms step_avg:73.30ms +[2025-09-02 18:38:24] [Rank 0] step:5021/10000 train_time:368052ms step_avg:73.30ms +[2025-09-02 18:38:26] [Rank 0] step:5041/10000 train_time:369576ms step_avg:73.31ms +[2025-09-02 18:38:26] [Rank 0] step:5041/10000 train_time:369576ms step_avg:73.31ms +[2025-09-02 18:38:27] [Rank 0] step:5061/10000 train_time:371101ms step_avg:73.33ms +[2025-09-02 18:38:27] [Rank 0] step:5061/10000 train_time:371101ms step_avg:73.33ms +[2025-09-02 18:38:29] [Rank 0] step:5081/10000 train_time:372626ms step_avg:73.34ms +[2025-09-02 18:38:29] [Rank 0] step:5081/10000 train_time:372626ms step_avg:73.34ms +[2025-09-02 18:38:30] [Rank 0] step:5101/10000 train_time:374149ms step_avg:73.35ms +[2025-09-02 18:38:30] [Rank 0] step:5101/10000 train_time:374149ms step_avg:73.35ms +[2025-09-02 18:38:32] [Rank 0] step:5121/10000 train_time:375676ms step_avg:73.36ms +[2025-09-02 18:38:32] [Rank 0] step:5121/10000 train_time:375676ms step_avg:73.36ms +[2025-09-02 18:38:33] [Rank 0] step:5141/10000 train_time:377203ms step_avg:73.37ms +[2025-09-02 18:38:33] [Rank 0] step:5141/10000 train_time:377203ms step_avg:73.37ms +[2025-09-02 18:38:35] [Rank 0] step:5161/10000 train_time:378731ms step_avg:73.38ms +[2025-09-02 18:38:35] [Rank 0] step:5161/10000 train_time:378731ms step_avg:73.38ms +[2025-09-02 18:38:36] [Rank 0] step:5181/10000 train_time:380257ms step_avg:73.39ms +[2025-09-02 18:38:36] [Rank 0] step:5181/10000 train_time:380257ms step_avg:73.39ms +[2025-09-02 18:38:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:38:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:38:50] [Rank 0] PRINT: step:5200/10000 val_loss:4.1189 svd_entropy: attn_qk:H=0.7288,top10E=0.28,eRank=134.2,q75/q25=104.96 attn_vo:H=0.8149,top10E=0.17,eRank=250.5,q75/q25=81.76 mlp_w1:H=0.7291,top10E=0.34,eRank=157.3,q75/q25=19.97 mlp_w2:H=0.8352,top10E=0.15,eRank=268.7,q75/q25=49.88 vo_prod:H=0.7226,top10E=0.26,eRank=128.0,q75/q25=8293.41 train_time:381962ms step_avg:73.45ms +[2025-09-02 18:38:50] [Rank 0] PRINT: step:5200/10000 val_loss:4.1189 svd_entropy: attn_qk:H=0.7288,top10E=0.28,eRank=134.2,q75/q25=104.96 attn_vo:H=0.8149,top10E=0.17,eRank=250.5,q75/q25=81.76 mlp_w1:H=0.7291,top10E=0.34,eRank=157.3,q75/q25=19.97 mlp_w2:H=0.8352,top10E=0.15,eRank=268.7,q75/q25=49.88 vo_prod:H=0.7226,top10E=0.26,eRank=128.0,q75/q25=8293.41 train_time:381962ms step_avg:73.45ms +[2025-09-02 18:38:50] [Rank 0] step:5201/10000 train_time:381975ms step_avg:73.44ms +[2025-09-02 18:38:50] [Rank 0] step:5201/10000 train_time:381975ms step_avg:73.44ms +[2025-09-02 18:38:52] [Rank 0] step:5221/10000 train_time:383390ms step_avg:73.43ms +[2025-09-02 18:38:52] [Rank 0] step:5221/10000 train_time:383390ms step_avg:73.43ms +[2025-09-02 18:38:53] [Rank 0] step:5241/10000 train_time:384947ms step_avg:73.45ms +[2025-09-02 18:38:53] [Rank 0] step:5241/10000 train_time:384947ms step_avg:73.45ms +[2025-09-02 18:38:55] [Rank 0] step:5261/10000 train_time:386504ms step_avg:73.47ms +[2025-09-02 18:38:55] [Rank 0] step:5261/10000 train_time:386504ms step_avg:73.47ms +[2025-09-02 18:38:56] [Rank 0] step:5281/10000 train_time:388062ms step_avg:73.48ms +[2025-09-02 18:38:56] [Rank 0] step:5281/10000 train_time:388062ms step_avg:73.48ms +[2025-09-02 18:38:58] [Rank 0] step:5301/10000 train_time:389630ms step_avg:73.50ms +[2025-09-02 18:38:58] [Rank 0] step:5301/10000 train_time:389630ms step_avg:73.50ms +[2025-09-02 18:38:59] [Rank 0] step:5321/10000 train_time:391189ms step_avg:73.52ms +[2025-09-02 18:38:59] [Rank 0] step:5321/10000 train_time:391189ms step_avg:73.52ms +[2025-09-02 18:39:01] [Rank 0] step:5341/10000 train_time:392747ms step_avg:73.53ms +[2025-09-02 18:39:01] [Rank 0] step:5341/10000 train_time:392747ms step_avg:73.53ms +[2025-09-02 18:39:02] [Rank 0] step:5361/10000 train_time:394308ms step_avg:73.55ms +[2025-09-02 18:39:02] [Rank 0] step:5361/10000 train_time:394308ms step_avg:73.55ms +[2025-09-02 18:39:04] [Rank 0] step:5381/10000 train_time:395869ms step_avg:73.57ms +[2025-09-02 18:39:04] [Rank 0] step:5381/10000 train_time:395869ms step_avg:73.57ms +[2025-09-02 18:39:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:39:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:39:17] [Rank 0] PRINT: step:5400/10000 val_loss:4.0998 svd_entropy: attn_qk:H=0.7313,top10E=0.28,eRank=136.1,q75/q25=106.22 attn_vo:H=0.8174,top10E=0.16,eRank=254.0,q75/q25=79.47 mlp_w1:H=0.7324,top10E=0.33,eRank=160.2,q75/q25=20.63 mlp_w2:H=0.8368,top10E=0.14,eRank=271.7,q75/q25=50.20 vo_prod:H=0.7255,top10E=0.26,eRank=130.6,q75/q25=7914.90 train_time:397583ms step_avg:73.63ms +[2025-09-02 18:39:17] [Rank 0] PRINT: step:5400/10000 val_loss:4.0998 svd_entropy: attn_qk:H=0.7313,top10E=0.28,eRank=136.1,q75/q25=106.22 attn_vo:H=0.8174,top10E=0.16,eRank=254.0,q75/q25=79.47 mlp_w1:H=0.7324,top10E=0.33,eRank=160.2,q75/q25=20.63 mlp_w2:H=0.8368,top10E=0.14,eRank=271.7,q75/q25=50.20 vo_prod:H=0.7255,top10E=0.26,eRank=130.6,q75/q25=7914.90 train_time:397583ms step_avg:73.63ms +[2025-09-02 18:39:17] [Rank 0] step:5401/10000 train_time:397594ms step_avg:73.61ms +[2025-09-02 18:39:17] [Rank 0] step:5401/10000 train_time:397594ms step_avg:73.61ms +[2025-09-02 18:39:19] [Rank 0] step:5421/10000 train_time:399033ms step_avg:73.61ms +[2025-09-02 18:39:19] [Rank 0] step:5421/10000 train_time:399033ms step_avg:73.61ms +[2025-09-02 18:39:21] [Rank 0] step:5441/10000 train_time:400586ms step_avg:73.62ms +[2025-09-02 18:39:21] [Rank 0] step:5441/10000 train_time:400586ms step_avg:73.62ms +[2025-09-02 18:39:22] [Rank 0] step:5461/10000 train_time:402146ms step_avg:73.64ms +[2025-09-02 18:39:22] [Rank 0] step:5461/10000 train_time:402146ms step_avg:73.64ms +[2025-09-02 18:39:24] [Rank 0] step:5481/10000 train_time:403709ms step_avg:73.66ms +[2025-09-02 18:39:24] [Rank 0] step:5481/10000 train_time:403709ms step_avg:73.66ms +[2025-09-02 18:39:25] [Rank 0] step:5501/10000 train_time:405273ms step_avg:73.67ms +[2025-09-02 18:39:25] [Rank 0] step:5501/10000 train_time:405273ms step_avg:73.67ms +[2025-09-02 18:39:27] [Rank 0] step:5521/10000 train_time:406837ms step_avg:73.69ms +[2025-09-02 18:39:27] [Rank 0] step:5521/10000 train_time:406837ms step_avg:73.69ms +[2025-09-02 18:39:28] [Rank 0] step:5541/10000 train_time:408431ms step_avg:73.71ms +[2025-09-02 18:39:28] [Rank 0] step:5541/10000 train_time:408431ms step_avg:73.71ms +[2025-09-02 18:39:30] [Rank 0] step:5561/10000 train_time:409990ms step_avg:73.73ms +[2025-09-02 18:39:30] [Rank 0] step:5561/10000 train_time:409990ms step_avg:73.73ms +[2025-09-02 18:39:32] [Rank 0] step:5581/10000 train_time:411551ms step_avg:73.74ms +[2025-09-02 18:39:32] [Rank 0] step:5581/10000 train_time:411551ms step_avg:73.74ms +[2025-09-02 18:39:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:39:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:39:45] [Rank 0] PRINT: step:5600/10000 val_loss:4.0869 svd_entropy: attn_qk:H=0.7337,top10E=0.28,eRank=138.1,q75/q25=106.30 attn_vo:H=0.8197,top10E=0.16,eRank=257.4,q75/q25=77.36 mlp_w1:H=0.7353,top10E=0.33,eRank=162.8,q75/q25=21.29 mlp_w2:H=0.8382,top10E=0.14,eRank=274.4,q75/q25=51.12 vo_prod:H=0.7281,top10E=0.26,eRank=132.9,q75/q25=7114.59 train_time:413269ms step_avg:73.80ms +[2025-09-02 18:39:45] [Rank 0] PRINT: step:5600/10000 val_loss:4.0869 svd_entropy: attn_qk:H=0.7337,top10E=0.28,eRank=138.1,q75/q25=106.30 attn_vo:H=0.8197,top10E=0.16,eRank=257.4,q75/q25=77.36 mlp_w1:H=0.7353,top10E=0.33,eRank=162.8,q75/q25=21.29 mlp_w2:H=0.8382,top10E=0.14,eRank=274.4,q75/q25=51.12 vo_prod:H=0.7281,top10E=0.26,eRank=132.9,q75/q25=7114.59 train_time:413269ms step_avg:73.80ms +[2025-09-02 18:39:45] [Rank 0] step:5601/10000 train_time:413280ms step_avg:73.79ms +[2025-09-02 18:39:45] [Rank 0] step:5601/10000 train_time:413280ms step_avg:73.79ms +[2025-09-02 18:39:46] [Rank 0] step:5621/10000 train_time:414716ms step_avg:73.78ms +[2025-09-02 18:39:46] [Rank 0] step:5621/10000 train_time:414716ms step_avg:73.78ms +[2025-09-02 18:39:48] [Rank 0] step:5641/10000 train_time:416273ms step_avg:73.79ms +[2025-09-02 18:39:48] [Rank 0] step:5641/10000 train_time:416273ms step_avg:73.79ms +[2025-09-02 18:39:50] [Rank 0] step:5661/10000 train_time:417825ms step_avg:73.81ms +[2025-09-02 18:39:50] [Rank 0] step:5661/10000 train_time:417825ms step_avg:73.81ms +[2025-09-02 18:39:51] [Rank 0] step:5681/10000 train_time:419388ms step_avg:73.82ms +[2025-09-02 18:39:51] [Rank 0] step:5681/10000 train_time:419388ms step_avg:73.82ms +[2025-09-02 18:39:53] [Rank 0] step:5701/10000 train_time:420944ms step_avg:73.84ms +[2025-09-02 18:39:53] [Rank 0] step:5701/10000 train_time:420944ms step_avg:73.84ms +[2025-09-02 18:39:54] [Rank 0] step:5721/10000 train_time:422505ms step_avg:73.85ms +[2025-09-02 18:39:54] [Rank 0] step:5721/10000 train_time:422505ms step_avg:73.85ms +[2025-09-02 18:39:56] [Rank 0] step:5741/10000 train_time:424064ms step_avg:73.87ms +[2025-09-02 18:39:56] [Rank 0] step:5741/10000 train_time:424064ms step_avg:73.87ms +[2025-09-02 18:39:57] [Rank 0] step:5761/10000 train_time:425622ms step_avg:73.88ms +[2025-09-02 18:39:57] [Rank 0] step:5761/10000 train_time:425622ms step_avg:73.88ms +[2025-09-02 18:39:59] [Rank 0] step:5781/10000 train_time:427179ms step_avg:73.89ms +[2025-09-02 18:39:59] [Rank 0] step:5781/10000 train_time:427179ms step_avg:73.89ms +[2025-09-02 18:40:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:40:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:40:12] [Rank 0] PRINT: step:5800/10000 val_loss:4.0763 svd_entropy: attn_qk:H=0.7360,top10E=0.28,eRank=140.0,q75/q25=107.69 attn_vo:H=0.8219,top10E=0.16,eRank=260.6,q75/q25=74.66 mlp_w1:H=0.7382,top10E=0.32,eRank=165.4,q75/q25=22.06 mlp_w2:H=0.8396,top10E=0.14,eRank=277.1,q75/q25=51.54 vo_prod:H=0.7307,top10E=0.25,eRank=135.2,q75/q25=6585.01 train_time:428895ms step_avg:73.95ms +[2025-09-02 18:40:12] [Rank 0] PRINT: step:5800/10000 val_loss:4.0763 svd_entropy: attn_qk:H=0.7360,top10E=0.28,eRank=140.0,q75/q25=107.69 attn_vo:H=0.8219,top10E=0.16,eRank=260.6,q75/q25=74.66 mlp_w1:H=0.7382,top10E=0.32,eRank=165.4,q75/q25=22.06 mlp_w2:H=0.8396,top10E=0.14,eRank=277.1,q75/q25=51.54 vo_prod:H=0.7307,top10E=0.25,eRank=135.2,q75/q25=6585.01 train_time:428895ms step_avg:73.95ms +[2025-09-02 18:40:12] [Rank 0] step:5801/10000 train_time:428907ms step_avg:73.94ms +[2025-09-02 18:40:12] [Rank 0] step:5801/10000 train_time:428907ms step_avg:73.94ms +[2025-09-02 18:40:14] [Rank 0] step:5821/10000 train_time:430318ms step_avg:73.93ms +[2025-09-02 18:40:14] [Rank 0] step:5821/10000 train_time:430318ms step_avg:73.93ms +[2025-09-02 18:40:15] [Rank 0] step:5841/10000 train_time:431873ms step_avg:73.94ms +[2025-09-02 18:40:15] [Rank 0] step:5841/10000 train_time:431873ms step_avg:73.94ms +[2025-09-02 18:40:17] [Rank 0] step:5861/10000 train_time:433433ms step_avg:73.95ms +[2025-09-02 18:40:17] [Rank 0] step:5861/10000 train_time:433433ms step_avg:73.95ms +[2025-09-02 18:40:19] [Rank 0] step:5881/10000 train_time:434994ms step_avg:73.97ms +[2025-09-02 18:40:19] [Rank 0] step:5881/10000 train_time:434994ms step_avg:73.97ms +[2025-09-02 18:40:20] [Rank 0] step:5901/10000 train_time:436553ms step_avg:73.98ms +[2025-09-02 18:40:20] [Rank 0] step:5901/10000 train_time:436553ms step_avg:73.98ms +[2025-09-02 18:40:22] [Rank 0] step:5921/10000 train_time:438112ms step_avg:73.99ms +[2025-09-02 18:40:22] [Rank 0] step:5921/10000 train_time:438112ms step_avg:73.99ms +[2025-09-02 18:40:23] [Rank 0] step:5941/10000 train_time:439674ms step_avg:74.01ms +[2025-09-02 18:40:23] [Rank 0] step:5941/10000 train_time:439674ms step_avg:74.01ms +[2025-09-02 18:40:25] [Rank 0] step:5961/10000 train_time:441238ms step_avg:74.02ms +[2025-09-02 18:40:25] [Rank 0] step:5961/10000 train_time:441238ms step_avg:74.02ms +[2025-09-02 18:40:26] [Rank 0] step:5981/10000 train_time:442800ms step_avg:74.03ms +[2025-09-02 18:40:26] [Rank 0] step:5981/10000 train_time:442800ms step_avg:74.03ms +[2025-09-02 18:40:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:40:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:40:40] [Rank 0] PRINT: step:6000/10000 val_loss:4.0531 svd_entropy: attn_qk:H=0.7382,top10E=0.27,eRank=141.9,q75/q25=108.45 attn_vo:H=0.8239,top10E=0.16,eRank=263.7,q75/q25=72.58 mlp_w1:H=0.7410,top10E=0.32,eRank=168.1,q75/q25=22.57 mlp_w2:H=0.8410,top10E=0.14,eRank=279.8,q75/q25=51.17 vo_prod:H=0.7330,top10E=0.25,eRank=137.4,q75/q25=6111.58 train_time:444512ms step_avg:74.09ms +[2025-09-02 18:40:40] [Rank 0] PRINT: step:6000/10000 val_loss:4.0531 svd_entropy: attn_qk:H=0.7382,top10E=0.27,eRank=141.9,q75/q25=108.45 attn_vo:H=0.8239,top10E=0.16,eRank=263.7,q75/q25=72.58 mlp_w1:H=0.7410,top10E=0.32,eRank=168.1,q75/q25=22.57 mlp_w2:H=0.8410,top10E=0.14,eRank=279.8,q75/q25=51.17 vo_prod:H=0.7330,top10E=0.25,eRank=137.4,q75/q25=6111.58 train_time:444512ms step_avg:74.09ms +[2025-09-02 18:40:40] [Rank 0] step:6001/10000 train_time:444523ms step_avg:74.07ms +[2025-09-02 18:40:40] [Rank 0] step:6001/10000 train_time:444523ms step_avg:74.07ms +[2025-09-02 18:40:41] [Rank 0] step:6021/10000 train_time:445938ms step_avg:74.06ms +[2025-09-02 18:40:41] [Rank 0] step:6021/10000 train_time:445938ms step_avg:74.06ms +[2025-09-02 18:40:43] [Rank 0] step:6041/10000 train_time:447497ms step_avg:74.08ms +[2025-09-02 18:40:43] [Rank 0] step:6041/10000 train_time:447497ms step_avg:74.08ms +[2025-09-02 18:40:44] [Rank 0] step:6061/10000 train_time:449063ms step_avg:74.09ms +[2025-09-02 18:40:44] [Rank 0] step:6061/10000 train_time:449063ms step_avg:74.09ms +[2025-09-02 18:40:46] [Rank 0] step:6081/10000 train_time:450627ms step_avg:74.10ms +[2025-09-02 18:40:46] [Rank 0] step:6081/10000 train_time:450627ms step_avg:74.10ms +[2025-09-02 18:40:47] [Rank 0] step:6101/10000 train_time:452190ms step_avg:74.12ms +[2025-09-02 18:40:47] [Rank 0] step:6101/10000 train_time:452190ms step_avg:74.12ms +[2025-09-02 18:40:49] [Rank 0] step:6121/10000 train_time:453820ms step_avg:74.14ms +[2025-09-02 18:40:49] [Rank 0] step:6121/10000 train_time:453820ms step_avg:74.14ms +[2025-09-02 18:40:51] [Rank 0] step:6141/10000 train_time:455389ms step_avg:74.16ms +[2025-09-02 18:40:51] [Rank 0] step:6141/10000 train_time:455389ms step_avg:74.16ms +[2025-09-02 18:40:52] [Rank 0] step:6161/10000 train_time:456954ms step_avg:74.17ms +[2025-09-02 18:40:52] [Rank 0] step:6161/10000 train_time:456954ms step_avg:74.17ms +[2025-09-02 18:40:54] [Rank 0] step:6181/10000 train_time:458515ms step_avg:74.18ms +[2025-09-02 18:40:54] [Rank 0] step:6181/10000 train_time:458515ms step_avg:74.18ms +[2025-09-02 18:40:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:40:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:41:07] [Rank 0] PRINT: step:6200/10000 val_loss:4.0369 svd_entropy: attn_qk:H=0.7404,top10E=0.27,eRank=143.7,q75/q25=109.13 attn_vo:H=0.8259,top10E=0.16,eRank=266.7,q75/q25=70.67 mlp_w1:H=0.7436,top10E=0.32,eRank=170.5,q75/q25=23.28 mlp_w2:H=0.8422,top10E=0.14,eRank=282.4,q75/q25=51.28 vo_prod:H=0.7354,top10E=0.25,eRank=139.7,q75/q25=5559.09 train_time:460235ms step_avg:74.23ms +[2025-09-02 18:41:07] [Rank 0] PRINT: step:6200/10000 val_loss:4.0369 svd_entropy: attn_qk:H=0.7404,top10E=0.27,eRank=143.7,q75/q25=109.13 attn_vo:H=0.8259,top10E=0.16,eRank=266.7,q75/q25=70.67 mlp_w1:H=0.7436,top10E=0.32,eRank=170.5,q75/q25=23.28 mlp_w2:H=0.8422,top10E=0.14,eRank=282.4,q75/q25=51.28 vo_prod:H=0.7354,top10E=0.25,eRank=139.7,q75/q25=5559.09 train_time:460235ms step_avg:74.23ms +[2025-09-02 18:41:07] [Rank 0] step:6201/10000 train_time:460247ms step_avg:74.22ms +[2025-09-02 18:41:07] [Rank 0] step:6201/10000 train_time:460247ms step_avg:74.22ms +[2025-09-02 18:41:09] [Rank 0] step:6221/10000 train_time:461680ms step_avg:74.21ms +[2025-09-02 18:41:09] [Rank 0] step:6221/10000 train_time:461680ms step_avg:74.21ms +[2025-09-02 18:41:10] [Rank 0] step:6241/10000 train_time:463239ms step_avg:74.23ms +[2025-09-02 18:41:10] [Rank 0] step:6241/10000 train_time:463239ms step_avg:74.23ms +[2025-09-02 18:41:12] [Rank 0] step:6261/10000 train_time:464802ms step_avg:74.24ms +[2025-09-02 18:41:12] [Rank 0] step:6261/10000 train_time:464802ms step_avg:74.24ms +[2025-09-02 18:41:13] [Rank 0] step:6281/10000 train_time:466369ms step_avg:74.25ms +[2025-09-02 18:41:13] [Rank 0] step:6281/10000 train_time:466369ms step_avg:74.25ms +[2025-09-02 18:41:15] [Rank 0] step:6301/10000 train_time:467937ms step_avg:74.26ms +[2025-09-02 18:41:15] [Rank 0] step:6301/10000 train_time:467937ms step_avg:74.26ms +[2025-09-02 18:41:17] [Rank 0] step:6321/10000 train_time:469501ms step_avg:74.28ms +[2025-09-02 18:41:17] [Rank 0] step:6321/10000 train_time:469501ms step_avg:74.28ms +[2025-09-02 18:41:18] [Rank 0] step:6341/10000 train_time:471070ms step_avg:74.29ms +[2025-09-02 18:41:18] [Rank 0] step:6341/10000 train_time:471070ms step_avg:74.29ms +[2025-09-02 18:41:20] [Rank 0] step:6361/10000 train_time:472641ms step_avg:74.30ms +[2025-09-02 18:41:20] [Rank 0] step:6361/10000 train_time:472641ms step_avg:74.30ms +[2025-09-02 18:41:21] [Rank 0] step:6381/10000 train_time:474213ms step_avg:74.32ms +[2025-09-02 18:41:21] [Rank 0] step:6381/10000 train_time:474213ms step_avg:74.32ms +[2025-09-02 18:41:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:41:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:41:35] [Rank 0] PRINT: step:6400/10000 val_loss:4.0202 svd_entropy: attn_qk:H=0.7423,top10E=0.27,eRank=145.4,q75/q25=109.89 attn_vo:H=0.8276,top10E=0.15,eRank=269.3,q75/q25=68.47 mlp_w1:H=0.7461,top10E=0.31,eRank=172.9,q75/q25=23.48 mlp_w2:H=0.8435,top10E=0.14,eRank=285.0,q75/q25=50.29 vo_prod:H=0.7375,top10E=0.25,eRank=141.7,q75/q25=5159.00 train_time:475935ms step_avg:74.36ms +[2025-09-02 18:41:35] [Rank 0] PRINT: step:6400/10000 val_loss:4.0202 svd_entropy: attn_qk:H=0.7423,top10E=0.27,eRank=145.4,q75/q25=109.89 attn_vo:H=0.8276,top10E=0.15,eRank=269.3,q75/q25=68.47 mlp_w1:H=0.7461,top10E=0.31,eRank=172.9,q75/q25=23.48 mlp_w2:H=0.8435,top10E=0.14,eRank=285.0,q75/q25=50.29 vo_prod:H=0.7375,top10E=0.25,eRank=141.7,q75/q25=5159.00 train_time:475935ms step_avg:74.36ms +[2025-09-02 18:41:35] [Rank 0] step:6401/10000 train_time:475946ms step_avg:74.35ms +[2025-09-02 18:41:35] [Rank 0] step:6401/10000 train_time:475946ms step_avg:74.35ms +[2025-09-02 18:41:36] [Rank 0] step:6421/10000 train_time:477375ms step_avg:74.35ms +[2025-09-02 18:41:36] [Rank 0] step:6421/10000 train_time:477375ms step_avg:74.35ms +[2025-09-02 18:41:38] [Rank 0] step:6441/10000 train_time:478938ms step_avg:74.36ms +[2025-09-02 18:41:38] [Rank 0] step:6441/10000 train_time:478938ms step_avg:74.36ms +[2025-09-02 18:41:39] [Rank 0] step:6461/10000 train_time:480505ms step_avg:74.37ms +[2025-09-02 18:41:39] [Rank 0] step:6461/10000 train_time:480505ms step_avg:74.37ms +[2025-09-02 18:41:41] [Rank 0] step:6481/10000 train_time:482078ms step_avg:74.38ms +[2025-09-02 18:41:41] [Rank 0] step:6481/10000 train_time:482078ms step_avg:74.38ms +[2025-09-02 18:41:43] [Rank 0] step:6501/10000 train_time:483639ms step_avg:74.39ms +[2025-09-02 18:41:43] [Rank 0] step:6501/10000 train_time:483639ms step_avg:74.39ms +[2025-09-02 18:41:44] [Rank 0] step:6521/10000 train_time:485202ms step_avg:74.41ms +[2025-09-02 18:41:44] [Rank 0] step:6521/10000 train_time:485202ms step_avg:74.41ms +[2025-09-02 18:41:46] [Rank 0] step:6541/10000 train_time:486769ms step_avg:74.42ms +[2025-09-02 18:41:46] [Rank 0] step:6541/10000 train_time:486769ms step_avg:74.42ms +[2025-09-02 18:41:47] [Rank 0] step:6561/10000 train_time:488341ms step_avg:74.43ms +[2025-09-02 18:41:47] [Rank 0] step:6561/10000 train_time:488341ms step_avg:74.43ms +[2025-09-02 18:41:49] [Rank 0] step:6581/10000 train_time:489906ms step_avg:74.44ms +[2025-09-02 18:41:49] [Rank 0] step:6581/10000 train_time:489906ms step_avg:74.44ms +[2025-09-02 18:41:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:41:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:42:02] [Rank 0] PRINT: step:6600/10000 val_loss:4.0079 svd_entropy: attn_qk:H=0.7441,top10E=0.26,eRank=146.9,q75/q25=110.51 attn_vo:H=0.8293,top10E=0.15,eRank=271.8,q75/q25=67.04 mlp_w1:H=0.7483,top10E=0.31,eRank=175.0,q75/q25=23.84 mlp_w2:H=0.8446,top10E=0.14,eRank=287.1,q75/q25=50.24 vo_prod:H=0.7395,top10E=0.24,eRank=143.7,q75/q25=4903.64 train_time:491634ms step_avg:74.49ms +[2025-09-02 18:42:02] [Rank 0] PRINT: step:6600/10000 val_loss:4.0079 svd_entropy: attn_qk:H=0.7441,top10E=0.26,eRank=146.9,q75/q25=110.51 attn_vo:H=0.8293,top10E=0.15,eRank=271.8,q75/q25=67.04 mlp_w1:H=0.7483,top10E=0.31,eRank=175.0,q75/q25=23.84 mlp_w2:H=0.8446,top10E=0.14,eRank=287.1,q75/q25=50.24 vo_prod:H=0.7395,top10E=0.24,eRank=143.7,q75/q25=4903.64 train_time:491634ms step_avg:74.49ms +[2025-09-02 18:42:02] [Rank 0] step:6601/10000 train_time:491647ms step_avg:74.48ms +[2025-09-02 18:42:02] [Rank 0] step:6601/10000 train_time:491647ms step_avg:74.48ms +[2025-09-02 18:42:04] [Rank 0] step:6621/10000 train_time:493067ms step_avg:74.47ms +[2025-09-02 18:42:04] [Rank 0] step:6621/10000 train_time:493067ms step_avg:74.47ms +[2025-09-02 18:42:05] [Rank 0] step:6641/10000 train_time:494634ms step_avg:74.48ms +[2025-09-02 18:42:05] [Rank 0] step:6641/10000 train_time:494634ms step_avg:74.48ms +[2025-09-02 18:42:07] [Rank 0] step:6661/10000 train_time:496198ms step_avg:74.49ms +[2025-09-02 18:42:07] [Rank 0] step:6661/10000 train_time:496198ms step_avg:74.49ms +[2025-09-02 18:42:08] [Rank 0] step:6681/10000 train_time:497777ms step_avg:74.51ms +[2025-09-02 18:42:08] [Rank 0] step:6681/10000 train_time:497777ms step_avg:74.51ms +[2025-09-02 18:42:10] [Rank 0] step:6701/10000 train_time:499382ms step_avg:74.52ms +[2025-09-02 18:42:10] [Rank 0] step:6701/10000 train_time:499382ms step_avg:74.52ms +[2025-09-02 18:42:12] [Rank 0] step:6721/10000 train_time:500974ms step_avg:74.54ms +[2025-09-02 18:42:12] [Rank 0] step:6721/10000 train_time:500974ms step_avg:74.54ms +[2025-09-02 18:42:13] [Rank 0] step:6741/10000 train_time:502564ms step_avg:74.55ms +[2025-09-02 18:42:13] [Rank 0] step:6741/10000 train_time:502564ms step_avg:74.55ms +[2025-09-02 18:42:15] [Rank 0] step:6761/10000 train_time:504156ms step_avg:74.57ms +[2025-09-02 18:42:15] [Rank 0] step:6761/10000 train_time:504156ms step_avg:74.57ms +[2025-09-02 18:42:16] [Rank 0] step:6781/10000 train_time:505752ms step_avg:74.58ms +[2025-09-02 18:42:16] [Rank 0] step:6781/10000 train_time:505752ms step_avg:74.58ms +[2025-09-02 18:42:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:42:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:42:30] [Rank 0] PRINT: step:6800/10000 val_loss:3.9914 svd_entropy: attn_qk:H=0.7456,top10E=0.26,eRank=148.3,q75/q25=110.31 attn_vo:H=0.8307,top10E=0.15,eRank=274.0,q75/q25=65.27 mlp_w1:H=0.7502,top10E=0.31,eRank=177.0,q75/q25=24.05 mlp_w2:H=0.8455,top10E=0.13,eRank=289.1,q75/q25=50.21 vo_prod:H=0.7412,top10E=0.24,eRank=145.3,q75/q25=4540.80 train_time:507509ms step_avg:74.63ms +[2025-09-02 18:42:30] [Rank 0] PRINT: step:6800/10000 val_loss:3.9914 svd_entropy: attn_qk:H=0.7456,top10E=0.26,eRank=148.3,q75/q25=110.31 attn_vo:H=0.8307,top10E=0.15,eRank=274.0,q75/q25=65.27 mlp_w1:H=0.7502,top10E=0.31,eRank=177.0,q75/q25=24.05 mlp_w2:H=0.8455,top10E=0.13,eRank=289.1,q75/q25=50.21 vo_prod:H=0.7412,top10E=0.24,eRank=145.3,q75/q25=4540.80 train_time:507509ms step_avg:74.63ms +[2025-09-02 18:42:30] [Rank 0] step:6801/10000 train_time:507522ms step_avg:74.62ms +[2025-09-02 18:42:30] [Rank 0] step:6801/10000 train_time:507522ms step_avg:74.62ms +[2025-09-02 18:42:31] [Rank 0] step:6821/10000 train_time:508976ms step_avg:74.62ms +[2025-09-02 18:42:31] [Rank 0] step:6821/10000 train_time:508976ms step_avg:74.62ms +[2025-09-02 18:42:33] [Rank 0] step:6841/10000 train_time:510561ms step_avg:74.63ms +[2025-09-02 18:42:33] [Rank 0] step:6841/10000 train_time:510561ms step_avg:74.63ms +[2025-09-02 18:42:35] [Rank 0] step:6861/10000 train_time:512151ms step_avg:74.65ms +[2025-09-02 18:42:35] [Rank 0] step:6861/10000 train_time:512151ms step_avg:74.65ms +[2025-09-02 18:42:36] [Rank 0] step:6881/10000 train_time:513740ms step_avg:74.66ms +[2025-09-02 18:42:36] [Rank 0] step:6881/10000 train_time:513740ms step_avg:74.66ms +[2025-09-02 18:42:38] [Rank 0] step:6901/10000 train_time:515331ms step_avg:74.67ms +[2025-09-02 18:42:38] [Rank 0] step:6901/10000 train_time:515331ms step_avg:74.67ms +[2025-09-02 18:42:39] [Rank 0] step:6921/10000 train_time:516918ms step_avg:74.69ms +[2025-09-02 18:42:39] [Rank 0] step:6921/10000 train_time:516918ms step_avg:74.69ms +[2025-09-02 18:42:41] [Rank 0] step:6941/10000 train_time:518515ms step_avg:74.70ms +[2025-09-02 18:42:41] [Rank 0] step:6941/10000 train_time:518515ms step_avg:74.70ms +[2025-09-02 18:42:43] [Rank 0] step:6961/10000 train_time:520120ms step_avg:74.72ms +[2025-09-02 18:42:43] [Rank 0] step:6961/10000 train_time:520120ms step_avg:74.72ms +[2025-09-02 18:42:44] [Rank 0] step:6981/10000 train_time:521718ms step_avg:74.73ms +[2025-09-02 18:42:44] [Rank 0] step:6981/10000 train_time:521718ms step_avg:74.73ms +[2025-09-02 18:42:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:42:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:42:57] [Rank 0] PRINT: step:7000/10000 val_loss:3.9744 svd_entropy: attn_qk:H=0.7471,top10E=0.26,eRank=149.6,q75/q25=110.80 attn_vo:H=0.8321,top10E=0.15,eRank=276.2,q75/q25=63.87 mlp_w1:H=0.7521,top10E=0.30,eRank=178.9,q75/q25=24.33 mlp_w2:H=0.8465,top10E=0.13,eRank=291.0,q75/q25=50.15 vo_prod:H=0.7429,top10E=0.24,eRank=147.1,q75/q25=4296.17 train_time:523472ms step_avg:74.78ms +[2025-09-02 18:42:57] [Rank 0] PRINT: step:7000/10000 val_loss:3.9744 svd_entropy: attn_qk:H=0.7471,top10E=0.26,eRank=149.6,q75/q25=110.80 attn_vo:H=0.8321,top10E=0.15,eRank=276.2,q75/q25=63.87 mlp_w1:H=0.7521,top10E=0.30,eRank=178.9,q75/q25=24.33 mlp_w2:H=0.8465,top10E=0.13,eRank=291.0,q75/q25=50.15 vo_prod:H=0.7429,top10E=0.24,eRank=147.1,q75/q25=4296.17 train_time:523472ms step_avg:74.78ms +[2025-09-02 18:42:58] [Rank 0] step:7001/10000 train_time:523484ms step_avg:74.77ms +[2025-09-02 18:42:58] [Rank 0] step:7001/10000 train_time:523484ms step_avg:74.77ms +[2025-09-02 18:42:59] [Rank 0] step:7021/10000 train_time:524920ms step_avg:74.76ms +[2025-09-02 18:42:59] [Rank 0] step:7021/10000 train_time:524920ms step_avg:74.76ms +[2025-09-02 18:43:01] [Rank 0] step:7041/10000 train_time:526511ms step_avg:74.78ms +[2025-09-02 18:43:01] [Rank 0] step:7041/10000 train_time:526511ms step_avg:74.78ms +[2025-09-02 18:43:02] [Rank 0] step:7061/10000 train_time:528103ms step_avg:74.79ms +[2025-09-02 18:43:02] [Rank 0] step:7061/10000 train_time:528103ms step_avg:74.79ms +[2025-09-02 18:43:04] [Rank 0] step:7081/10000 train_time:529696ms step_avg:74.81ms +[2025-09-02 18:43:04] [Rank 0] step:7081/10000 train_time:529696ms step_avg:74.81ms +[2025-09-02 18:43:05] [Rank 0] step:7101/10000 train_time:531288ms step_avg:74.82ms +[2025-09-02 18:43:05] [Rank 0] step:7101/10000 train_time:531288ms step_avg:74.82ms +[2025-09-02 18:43:07] [Rank 0] step:7121/10000 train_time:532882ms step_avg:74.83ms +[2025-09-02 18:43:07] [Rank 0] step:7121/10000 train_time:532882ms step_avg:74.83ms +[2025-09-02 18:43:09] [Rank 0] step:7141/10000 train_time:534474ms step_avg:74.85ms +[2025-09-02 18:43:09] [Rank 0] step:7141/10000 train_time:534474ms step_avg:74.85ms +[2025-09-02 18:43:10] [Rank 0] step:7161/10000 train_time:536067ms step_avg:74.86ms +[2025-09-02 18:43:10] [Rank 0] step:7161/10000 train_time:536067ms step_avg:74.86ms +[2025-09-02 18:43:12] [Rank 0] step:7181/10000 train_time:537661ms step_avg:74.87ms +[2025-09-02 18:43:12] [Rank 0] step:7181/10000 train_time:537661ms step_avg:74.87ms +[2025-09-02 18:43:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:43:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:43:25] [Rank 0] PRINT: step:7200/10000 val_loss:3.9663 svd_entropy: attn_qk:H=0.7485,top10E=0.26,eRank=150.9,q75/q25=110.96 attn_vo:H=0.8333,top10E=0.15,eRank=278.1,q75/q25=62.41 mlp_w1:H=0.7538,top10E=0.30,eRank=180.7,q75/q25=24.59 mlp_w2:H=0.8474,top10E=0.13,eRank=292.9,q75/q25=49.77 vo_prod:H=0.7445,top10E=0.24,eRank=148.6,q75/q25=3986.88 train_time:539421ms step_avg:74.92ms +[2025-09-02 18:43:25] [Rank 0] PRINT: step:7200/10000 val_loss:3.9663 svd_entropy: attn_qk:H=0.7485,top10E=0.26,eRank=150.9,q75/q25=110.96 attn_vo:H=0.8333,top10E=0.15,eRank=278.1,q75/q25=62.41 mlp_w1:H=0.7538,top10E=0.30,eRank=180.7,q75/q25=24.59 mlp_w2:H=0.8474,top10E=0.13,eRank=292.9,q75/q25=49.77 vo_prod:H=0.7445,top10E=0.24,eRank=148.6,q75/q25=3986.88 train_time:539421ms step_avg:74.92ms +[2025-09-02 18:43:25] [Rank 0] step:7201/10000 train_time:539433ms step_avg:74.91ms +[2025-09-02 18:43:25] [Rank 0] step:7201/10000 train_time:539433ms step_avg:74.91ms +[2025-09-02 18:43:27] [Rank 0] step:7221/10000 train_time:540889ms step_avg:74.90ms +[2025-09-02 18:43:27] [Rank 0] step:7221/10000 train_time:540889ms step_avg:74.90ms +[2025-09-02 18:43:28] [Rank 0] step:7241/10000 train_time:542479ms step_avg:74.92ms +[2025-09-02 18:43:28] [Rank 0] step:7241/10000 train_time:542479ms step_avg:74.92ms +[2025-09-02 18:43:30] [Rank 0] step:7261/10000 train_time:544069ms step_avg:74.93ms +[2025-09-02 18:43:30] [Rank 0] step:7261/10000 train_time:544069ms step_avg:74.93ms +[2025-09-02 18:43:32] [Rank 0] step:7281/10000 train_time:545670ms step_avg:74.94ms +[2025-09-02 18:43:32] [Rank 0] step:7281/10000 train_time:545670ms step_avg:74.94ms +[2025-09-02 18:43:33] [Rank 0] step:7301/10000 train_time:547272ms step_avg:74.96ms +[2025-09-02 18:43:33] [Rank 0] step:7301/10000 train_time:547272ms step_avg:74.96ms +[2025-09-02 18:43:35] [Rank 0] step:7321/10000 train_time:548876ms step_avg:74.97ms +[2025-09-02 18:43:35] [Rank 0] step:7321/10000 train_time:548876ms step_avg:74.97ms +[2025-09-02 18:43:36] [Rank 0] step:7341/10000 train_time:550470ms step_avg:74.99ms +[2025-09-02 18:43:36] [Rank 0] step:7341/10000 train_time:550470ms step_avg:74.99ms +[2025-09-02 18:43:38] [Rank 0] step:7361/10000 train_time:552070ms step_avg:75.00ms +[2025-09-02 18:43:38] [Rank 0] step:7361/10000 train_time:552070ms step_avg:75.00ms +[2025-09-02 18:43:40] [Rank 0] step:7381/10000 train_time:553671ms step_avg:75.01ms +[2025-09-02 18:43:40] [Rank 0] step:7381/10000 train_time:553671ms step_avg:75.01ms +[2025-09-02 18:43:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:43:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:43:53] [Rank 0] PRINT: step:7400/10000 val_loss:3.9454 svd_entropy: attn_qk:H=0.7497,top10E=0.26,eRank=152.0,q75/q25=110.97 attn_vo:H=0.8343,top10E=0.15,eRank=279.7,q75/q25=60.83 mlp_w1:H=0.7553,top10E=0.30,eRank=182.3,q75/q25=24.71 mlp_w2:H=0.8481,top10E=0.13,eRank=294.5,q75/q25=49.57 vo_prod:H=0.7459,top10E=0.24,eRank=150.0,q75/q25=3763.28 train_time:555445ms step_avg:75.06ms +[2025-09-02 18:43:53] [Rank 0] PRINT: step:7400/10000 val_loss:3.9454 svd_entropy: attn_qk:H=0.7497,top10E=0.26,eRank=152.0,q75/q25=110.97 attn_vo:H=0.8343,top10E=0.15,eRank=279.7,q75/q25=60.83 mlp_w1:H=0.7553,top10E=0.30,eRank=182.3,q75/q25=24.71 mlp_w2:H=0.8481,top10E=0.13,eRank=294.5,q75/q25=49.57 vo_prod:H=0.7459,top10E=0.24,eRank=150.0,q75/q25=3763.28 train_time:555445ms step_avg:75.06ms +[2025-09-02 18:43:53] [Rank 0] step:7401/10000 train_time:555457ms step_avg:75.05ms +[2025-09-02 18:43:53] [Rank 0] step:7401/10000 train_time:555457ms step_avg:75.05ms +[2025-09-02 18:43:55] [Rank 0] step:7421/10000 train_time:556920ms step_avg:75.05ms +[2025-09-02 18:43:55] [Rank 0] step:7421/10000 train_time:556920ms step_avg:75.05ms +[2025-09-02 18:43:56] [Rank 0] step:7441/10000 train_time:558513ms step_avg:75.06ms +[2025-09-02 18:43:56] [Rank 0] step:7441/10000 train_time:558513ms step_avg:75.06ms +[2025-09-02 18:43:58] [Rank 0] step:7461/10000 train_time:560107ms step_avg:75.07ms +[2025-09-02 18:43:58] [Rank 0] step:7461/10000 train_time:560107ms step_avg:75.07ms +[2025-09-02 18:44:00] [Rank 0] step:7481/10000 train_time:561707ms step_avg:75.08ms +[2025-09-02 18:44:00] [Rank 0] step:7481/10000 train_time:561707ms step_avg:75.08ms +[2025-09-02 18:44:01] [Rank 0] step:7501/10000 train_time:563308ms step_avg:75.10ms +[2025-09-02 18:44:01] [Rank 0] step:7501/10000 train_time:563308ms step_avg:75.10ms +[2025-09-02 18:44:03] [Rank 0] step:7521/10000 train_time:564908ms step_avg:75.11ms +[2025-09-02 18:44:03] [Rank 0] step:7521/10000 train_time:564908ms step_avg:75.11ms +[2025-09-02 18:44:04] [Rank 0] step:7541/10000 train_time:566521ms step_avg:75.13ms +[2025-09-02 18:44:04] [Rank 0] step:7541/10000 train_time:566521ms step_avg:75.13ms +[2025-09-02 18:44:06] [Rank 0] step:7561/10000 train_time:568108ms step_avg:75.14ms +[2025-09-02 18:44:06] [Rank 0] step:7561/10000 train_time:568108ms step_avg:75.14ms +[2025-09-02 18:44:08] [Rank 0] step:7581/10000 train_time:569719ms step_avg:75.15ms +[2025-09-02 18:44:08] [Rank 0] step:7581/10000 train_time:569719ms step_avg:75.15ms +[2025-09-02 18:44:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:44:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:44:21] [Rank 0] PRINT: step:7600/10000 val_loss:3.9411 svd_entropy: attn_qk:H=0.7508,top10E=0.26,eRank=153.0,q75/q25=110.08 attn_vo:H=0.8352,top10E=0.15,eRank=281.1,q75/q25=59.63 mlp_w1:H=0.7567,top10E=0.30,eRank=183.8,q75/q25=24.87 mlp_w2:H=0.8488,top10E=0.13,eRank=295.9,q75/q25=49.39 vo_prod:H=0.7470,top10E=0.24,eRank=151.2,q75/q25=3550.48 train_time:571487ms step_avg:75.20ms +[2025-09-02 18:44:21] [Rank 0] PRINT: step:7600/10000 val_loss:3.9411 svd_entropy: attn_qk:H=0.7508,top10E=0.26,eRank=153.0,q75/q25=110.08 attn_vo:H=0.8352,top10E=0.15,eRank=281.1,q75/q25=59.63 mlp_w1:H=0.7567,top10E=0.30,eRank=183.8,q75/q25=24.87 mlp_w2:H=0.8488,top10E=0.13,eRank=295.9,q75/q25=49.39 vo_prod:H=0.7470,top10E=0.24,eRank=151.2,q75/q25=3550.48 train_time:571487ms step_avg:75.20ms +[2025-09-02 18:44:21] [Rank 0] step:7601/10000 train_time:571499ms step_avg:75.19ms +[2025-09-02 18:44:21] [Rank 0] step:7601/10000 train_time:571499ms step_avg:75.19ms +[2025-09-02 18:44:23] [Rank 0] step:7621/10000 train_time:572964ms step_avg:75.18ms +[2025-09-02 18:44:23] [Rank 0] step:7621/10000 train_time:572964ms step_avg:75.18ms +[2025-09-02 18:44:24] [Rank 0] step:7641/10000 train_time:574558ms step_avg:75.19ms +[2025-09-02 18:44:24] [Rank 0] step:7641/10000 train_time:574558ms step_avg:75.19ms +[2025-09-02 18:44:26] [Rank 0] step:7661/10000 train_time:576154ms step_avg:75.21ms +[2025-09-02 18:44:26] [Rank 0] step:7661/10000 train_time:576154ms step_avg:75.21ms +[2025-09-02 18:44:27] [Rank 0] step:7681/10000 train_time:577743ms step_avg:75.22ms +[2025-09-02 18:44:27] [Rank 0] step:7681/10000 train_time:577743ms step_avg:75.22ms +[2025-09-02 18:44:29] [Rank 0] step:7701/10000 train_time:579333ms step_avg:75.23ms +[2025-09-02 18:44:29] [Rank 0] step:7701/10000 train_time:579333ms step_avg:75.23ms +[2025-09-02 18:44:31] [Rank 0] step:7721/10000 train_time:580938ms step_avg:75.24ms +[2025-09-02 18:44:31] [Rank 0] step:7721/10000 train_time:580938ms step_avg:75.24ms +[2025-09-02 18:44:32] [Rank 0] step:7741/10000 train_time:582534ms step_avg:75.25ms +[2025-09-02 18:44:32] [Rank 0] step:7741/10000 train_time:582534ms step_avg:75.25ms +[2025-09-02 18:44:34] [Rank 0] step:7761/10000 train_time:584137ms step_avg:75.27ms +[2025-09-02 18:44:34] [Rank 0] step:7761/10000 train_time:584137ms step_avg:75.27ms +[2025-09-02 18:44:35] [Rank 0] step:7781/10000 train_time:585740ms step_avg:75.28ms +[2025-09-02 18:44:35] [Rank 0] step:7781/10000 train_time:585740ms step_avg:75.28ms +[2025-09-02 18:44:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:44:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:44:49] [Rank 0] PRINT: step:7800/10000 val_loss:3.9266 svd_entropy: attn_qk:H=0.7518,top10E=0.26,eRank=153.9,q75/q25=109.72 attn_vo:H=0.8361,top10E=0.14,eRank=282.5,q75/q25=58.73 mlp_w1:H=0.7581,top10E=0.30,eRank=185.3,q75/q25=25.13 mlp_w2:H=0.8495,top10E=0.13,eRank=297.3,q75/q25=49.43 vo_prod:H=0.7482,top10E=0.24,eRank=152.4,q75/q25=3418.30 train_time:587508ms step_avg:75.32ms +[2025-09-02 18:44:49] [Rank 0] PRINT: step:7800/10000 val_loss:3.9266 svd_entropy: attn_qk:H=0.7518,top10E=0.26,eRank=153.9,q75/q25=109.72 attn_vo:H=0.8361,top10E=0.14,eRank=282.5,q75/q25=58.73 mlp_w1:H=0.7581,top10E=0.30,eRank=185.3,q75/q25=25.13 mlp_w2:H=0.8495,top10E=0.13,eRank=297.3,q75/q25=49.43 vo_prod:H=0.7482,top10E=0.24,eRank=152.4,q75/q25=3418.30 train_time:587508ms step_avg:75.32ms +[2025-09-02 18:44:49] [Rank 0] step:7801/10000 train_time:587519ms step_avg:75.31ms +[2025-09-02 18:44:49] [Rank 0] step:7801/10000 train_time:587519ms step_avg:75.31ms +[2025-09-02 18:44:50] [Rank 0] step:7821/10000 train_time:588962ms step_avg:75.31ms +[2025-09-02 18:44:50] [Rank 0] step:7821/10000 train_time:588962ms step_avg:75.31ms +[2025-09-02 18:44:52] [Rank 0] step:7841/10000 train_time:590557ms step_avg:75.32ms +[2025-09-02 18:44:52] [Rank 0] step:7841/10000 train_time:590557ms step_avg:75.32ms +[2025-09-02 18:44:54] [Rank 0] step:7861/10000 train_time:592157ms step_avg:75.33ms +[2025-09-02 18:44:54] [Rank 0] step:7861/10000 train_time:592157ms step_avg:75.33ms +[2025-09-02 18:44:55] [Rank 0] step:7881/10000 train_time:593762ms step_avg:75.34ms +[2025-09-02 18:44:55] [Rank 0] step:7881/10000 train_time:593762ms step_avg:75.34ms +[2025-09-02 18:44:57] [Rank 0] step:7901/10000 train_time:595357ms step_avg:75.35ms +[2025-09-02 18:44:57] [Rank 0] step:7901/10000 train_time:595357ms step_avg:75.35ms +[2025-09-02 18:44:58] [Rank 0] step:7921/10000 train_time:596954ms step_avg:75.36ms +[2025-09-02 18:44:58] [Rank 0] step:7921/10000 train_time:596954ms step_avg:75.36ms +[2025-09-02 18:45:00] [Rank 0] step:7941/10000 train_time:598560ms step_avg:75.38ms +[2025-09-02 18:45:00] [Rank 0] step:7941/10000 train_time:598560ms step_avg:75.38ms +[2025-09-02 18:45:02] [Rank 0] step:7961/10000 train_time:600161ms step_avg:75.39ms +[2025-09-02 18:45:02] [Rank 0] step:7961/10000 train_time:600161ms step_avg:75.39ms +[2025-09-02 18:45:03] [Rank 0] step:7981/10000 train_time:601758ms step_avg:75.40ms +[2025-09-02 18:45:03] [Rank 0] step:7981/10000 train_time:601758ms step_avg:75.40ms +[2025-09-02 18:45:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:45:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:45:17] [Rank 0] PRINT: step:8000/10000 val_loss:3.9113 svd_entropy: attn_qk:H=0.7528,top10E=0.25,eRank=154.9,q75/q25=109.50 attn_vo:H=0.8369,top10E=0.14,eRank=283.9,q75/q25=57.80 mlp_w1:H=0.7592,top10E=0.30,eRank=186.5,q75/q25=25.28 mlp_w2:H=0.8501,top10E=0.13,eRank=298.5,q75/q25=49.25 vo_prod:H=0.7494,top10E=0.23,eRank=153.6,q75/q25=3258.87 train_time:603518ms step_avg:75.44ms +[2025-09-02 18:45:17] [Rank 0] PRINT: step:8000/10000 val_loss:3.9113 svd_entropy: attn_qk:H=0.7528,top10E=0.25,eRank=154.9,q75/q25=109.50 attn_vo:H=0.8369,top10E=0.14,eRank=283.9,q75/q25=57.80 mlp_w1:H=0.7592,top10E=0.30,eRank=186.5,q75/q25=25.28 mlp_w2:H=0.8501,top10E=0.13,eRank=298.5,q75/q25=49.25 vo_prod:H=0.7494,top10E=0.23,eRank=153.6,q75/q25=3258.87 train_time:603518ms step_avg:75.44ms +[2025-09-02 18:45:17] [Rank 0] step:8001/10000 train_time:603530ms step_avg:75.43ms +[2025-09-02 18:45:17] [Rank 0] step:8001/10000 train_time:603530ms step_avg:75.43ms +[2025-09-02 18:45:18] [Rank 0] step:8021/10000 train_time:604986ms step_avg:75.43ms +[2025-09-02 18:45:18] [Rank 0] step:8021/10000 train_time:604986ms step_avg:75.43ms +[2025-09-02 18:45:20] [Rank 0] step:8041/10000 train_time:606592ms step_avg:75.44ms +[2025-09-02 18:45:20] [Rank 0] step:8041/10000 train_time:606592ms step_avg:75.44ms +[2025-09-02 18:45:21] [Rank 0] step:8061/10000 train_time:608189ms step_avg:75.45ms +[2025-09-02 18:45:21] [Rank 0] step:8061/10000 train_time:608189ms step_avg:75.45ms +[2025-09-02 18:45:23] [Rank 0] step:8081/10000 train_time:609774ms step_avg:75.46ms +[2025-09-02 18:45:23] [Rank 0] step:8081/10000 train_time:609774ms step_avg:75.46ms +[2025-09-02 18:45:25] [Rank 0] step:8101/10000 train_time:611379ms step_avg:75.47ms +[2025-09-02 18:45:25] [Rank 0] step:8101/10000 train_time:611379ms step_avg:75.47ms +[2025-09-02 18:45:26] [Rank 0] step:8121/10000 train_time:612977ms step_avg:75.48ms +[2025-09-02 18:45:26] [Rank 0] step:8121/10000 train_time:612977ms step_avg:75.48ms +[2025-09-02 18:45:28] [Rank 0] step:8141/10000 train_time:614579ms step_avg:75.49ms +[2025-09-02 18:45:28] [Rank 0] step:8141/10000 train_time:614579ms step_avg:75.49ms +[2025-09-02 18:45:29] [Rank 0] step:8161/10000 train_time:616190ms step_avg:75.50ms +[2025-09-02 18:45:29] [Rank 0] step:8161/10000 train_time:616190ms step_avg:75.50ms +[2025-09-02 18:45:31] [Rank 0] step:8181/10000 train_time:617819ms step_avg:75.52ms +[2025-09-02 18:45:31] [Rank 0] step:8181/10000 train_time:617819ms step_avg:75.52ms +[2025-09-02 18:45:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:45:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:45:44] [Rank 0] PRINT: step:8200/10000 val_loss:3.9019 svd_entropy: attn_qk:H=0.7536,top10E=0.25,eRank=155.6,q75/q25=109.66 attn_vo:H=0.8376,top10E=0.14,eRank=285.0,q75/q25=56.76 mlp_w1:H=0.7602,top10E=0.29,eRank=187.7,q75/q25=25.38 mlp_w2:H=0.8506,top10E=0.13,eRank=299.7,q75/q25=49.12 vo_prod:H=0.7504,top10E=0.23,eRank=154.7,q75/q25=3076.95 train_time:619637ms step_avg:75.57ms +[2025-09-02 18:45:44] [Rank 0] PRINT: step:8200/10000 val_loss:3.9019 svd_entropy: attn_qk:H=0.7536,top10E=0.25,eRank=155.6,q75/q25=109.66 attn_vo:H=0.8376,top10E=0.14,eRank=285.0,q75/q25=56.76 mlp_w1:H=0.7602,top10E=0.29,eRank=187.7,q75/q25=25.38 mlp_w2:H=0.8506,top10E=0.13,eRank=299.7,q75/q25=49.12 vo_prod:H=0.7504,top10E=0.23,eRank=154.7,q75/q25=3076.95 train_time:619637ms step_avg:75.57ms +[2025-09-02 18:45:44] [Rank 0] step:8201/10000 train_time:619649ms step_avg:75.56ms +[2025-09-02 18:45:44] [Rank 0] step:8201/10000 train_time:619649ms step_avg:75.56ms +[2025-09-02 18:45:46] [Rank 0] step:8221/10000 train_time:621147ms step_avg:75.56ms +[2025-09-02 18:45:46] [Rank 0] step:8221/10000 train_time:621147ms step_avg:75.56ms +[2025-09-02 18:45:48] [Rank 0] step:8241/10000 train_time:622810ms step_avg:75.57ms +[2025-09-02 18:45:48] [Rank 0] step:8241/10000 train_time:622810ms step_avg:75.57ms +[2025-09-02 18:45:49] [Rank 0] step:8261/10000 train_time:624435ms step_avg:75.59ms +[2025-09-02 18:45:49] [Rank 0] step:8261/10000 train_time:624435ms step_avg:75.59ms +[2025-09-02 18:45:51] [Rank 0] step:8281/10000 train_time:626070ms step_avg:75.60ms +[2025-09-02 18:45:51] [Rank 0] step:8281/10000 train_time:626070ms step_avg:75.60ms +[2025-09-02 18:45:53] [Rank 0] step:8301/10000 train_time:627694ms step_avg:75.62ms +[2025-09-02 18:45:53] [Rank 0] step:8301/10000 train_time:627694ms step_avg:75.62ms +[2025-09-02 18:45:54] [Rank 0] step:8321/10000 train_time:629307ms step_avg:75.63ms +[2025-09-02 18:45:54] [Rank 0] step:8321/10000 train_time:629307ms step_avg:75.63ms +[2025-09-02 18:45:56] [Rank 0] step:8341/10000 train_time:630935ms step_avg:75.64ms +[2025-09-02 18:45:56] [Rank 0] step:8341/10000 train_time:630935ms step_avg:75.64ms +[2025-09-02 18:45:58] [Rank 0] step:8361/10000 train_time:632568ms step_avg:75.66ms +[2025-09-02 18:45:58] [Rank 0] step:8361/10000 train_time:632568ms step_avg:75.66ms +[2025-09-02 18:45:59] [Rank 0] step:8381/10000 train_time:634196ms step_avg:75.67ms +[2025-09-02 18:45:59] [Rank 0] step:8381/10000 train_time:634196ms step_avg:75.67ms +[2025-09-02 18:46:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:46:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:46:13] [Rank 0] PRINT: step:8400/10000 val_loss:3.8904 svd_entropy: attn_qk:H=0.7544,top10E=0.25,eRank=156.3,q75/q25=109.56 attn_vo:H=0.8383,top10E=0.14,eRank=286.1,q75/q25=55.93 mlp_w1:H=0.7612,top10E=0.29,eRank=188.8,q75/q25=25.51 mlp_w2:H=0.8511,top10E=0.13,eRank=300.7,q75/q25=49.18 vo_prod:H=0.7514,top10E=0.23,eRank=155.7,q75/q25=2947.35 train_time:635983ms step_avg:75.71ms +[2025-09-02 18:46:13] [Rank 0] PRINT: step:8400/10000 val_loss:3.8904 svd_entropy: attn_qk:H=0.7544,top10E=0.25,eRank=156.3,q75/q25=109.56 attn_vo:H=0.8383,top10E=0.14,eRank=286.1,q75/q25=55.93 mlp_w1:H=0.7612,top10E=0.29,eRank=188.8,q75/q25=25.51 mlp_w2:H=0.8511,top10E=0.13,eRank=300.7,q75/q25=49.18 vo_prod:H=0.7514,top10E=0.23,eRank=155.7,q75/q25=2947.35 train_time:635983ms step_avg:75.71ms +[2025-09-02 18:46:13] [Rank 0] step:8401/10000 train_time:635995ms step_avg:75.70ms +[2025-09-02 18:46:13] [Rank 0] step:8401/10000 train_time:635995ms step_avg:75.70ms +[2025-09-02 18:46:14] [Rank 0] step:8421/10000 train_time:637457ms step_avg:75.70ms +[2025-09-02 18:46:14] [Rank 0] step:8421/10000 train_time:637457ms step_avg:75.70ms +[2025-09-02 18:46:16] [Rank 0] step:8441/10000 train_time:639085ms step_avg:75.71ms +[2025-09-02 18:46:16] [Rank 0] step:8441/10000 train_time:639085ms step_avg:75.71ms +[2025-09-02 18:46:18] [Rank 0] step:8461/10000 train_time:640708ms step_avg:75.72ms +[2025-09-02 18:46:18] [Rank 0] step:8461/10000 train_time:640708ms step_avg:75.72ms +[2025-09-02 18:46:19] [Rank 0] step:8481/10000 train_time:642339ms step_avg:75.74ms +[2025-09-02 18:46:19] [Rank 0] step:8481/10000 train_time:642339ms step_avg:75.74ms +[2025-09-02 18:46:21] [Rank 0] step:8501/10000 train_time:643988ms step_avg:75.75ms +[2025-09-02 18:46:21] [Rank 0] step:8501/10000 train_time:643988ms step_avg:75.75ms +[2025-09-02 18:46:22] [Rank 0] step:8521/10000 train_time:645624ms step_avg:75.77ms +[2025-09-02 18:46:22] [Rank 0] step:8521/10000 train_time:645624ms step_avg:75.77ms +[2025-09-02 18:46:24] [Rank 0] step:8541/10000 train_time:647267ms step_avg:75.78ms +[2025-09-02 18:46:24] [Rank 0] step:8541/10000 train_time:647267ms step_avg:75.78ms +[2025-09-02 18:46:26] [Rank 0] step:8561/10000 train_time:648900ms step_avg:75.80ms +[2025-09-02 18:46:26] [Rank 0] step:8561/10000 train_time:648900ms step_avg:75.80ms +[2025-09-02 18:46:27] [Rank 0] step:8581/10000 train_time:650530ms step_avg:75.81ms +[2025-09-02 18:46:27] [Rank 0] step:8581/10000 train_time:650530ms step_avg:75.81ms +[2025-09-02 18:46:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:46:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:46:41] [Rank 0] PRINT: step:8600/10000 val_loss:3.8816 svd_entropy: attn_qk:H=0.7550,top10E=0.25,eRank=157.0,q75/q25=110.17 attn_vo:H=0.8389,top10E=0.14,eRank=287.0,q75/q25=55.17 mlp_w1:H=0.7621,top10E=0.29,eRank=189.8,q75/q25=25.53 mlp_w2:H=0.8515,top10E=0.13,eRank=301.6,q75/q25=49.17 vo_prod:H=0.7522,top10E=0.23,eRank=156.6,q75/q25=2880.80 train_time:652312ms step_avg:75.85ms +[2025-09-02 18:46:41] [Rank 0] PRINT: step:8600/10000 val_loss:3.8816 svd_entropy: attn_qk:H=0.7550,top10E=0.25,eRank=157.0,q75/q25=110.17 attn_vo:H=0.8389,top10E=0.14,eRank=287.0,q75/q25=55.17 mlp_w1:H=0.7621,top10E=0.29,eRank=189.8,q75/q25=25.53 mlp_w2:H=0.8515,top10E=0.13,eRank=301.6,q75/q25=49.17 vo_prod:H=0.7522,top10E=0.23,eRank=156.6,q75/q25=2880.80 train_time:652312ms step_avg:75.85ms +[2025-09-02 18:46:41] [Rank 0] step:8601/10000 train_time:652324ms step_avg:75.84ms +[2025-09-02 18:46:41] [Rank 0] step:8601/10000 train_time:652324ms step_avg:75.84ms +[2025-09-02 18:46:42] [Rank 0] step:8621/10000 train_time:653805ms step_avg:75.84ms +[2025-09-02 18:46:42] [Rank 0] step:8621/10000 train_time:653805ms step_avg:75.84ms +[2025-09-02 18:46:44] [Rank 0] step:8641/10000 train_time:655428ms step_avg:75.85ms +[2025-09-02 18:46:44] [Rank 0] step:8641/10000 train_time:655428ms step_avg:75.85ms +[2025-09-02 18:46:46] [Rank 0] step:8661/10000 train_time:657052ms step_avg:75.86ms +[2025-09-02 18:46:46] [Rank 0] step:8661/10000 train_time:657052ms step_avg:75.86ms +[2025-09-02 18:46:47] [Rank 0] step:8681/10000 train_time:658676ms step_avg:75.88ms +[2025-09-02 18:46:47] [Rank 0] step:8681/10000 train_time:658676ms step_avg:75.88ms +[2025-09-02 18:46:49] [Rank 0] step:8701/10000 train_time:660295ms step_avg:75.89ms +[2025-09-02 18:46:49] [Rank 0] step:8701/10000 train_time:660295ms step_avg:75.89ms +[2025-09-02 18:46:51] [Rank 0] step:8721/10000 train_time:661946ms step_avg:75.90ms +[2025-09-02 18:46:51] [Rank 0] step:8721/10000 train_time:661946ms step_avg:75.90ms +[2025-09-02 18:46:52] [Rank 0] step:8741/10000 train_time:663563ms step_avg:75.91ms +[2025-09-02 18:46:52] [Rank 0] step:8741/10000 train_time:663563ms step_avg:75.91ms +[2025-09-02 18:46:54] [Rank 0] step:8761/10000 train_time:665180ms step_avg:75.93ms +[2025-09-02 18:46:54] [Rank 0] step:8761/10000 train_time:665180ms step_avg:75.93ms +[2025-09-02 18:46:55] [Rank 0] step:8781/10000 train_time:666811ms step_avg:75.94ms +[2025-09-02 18:46:55] [Rank 0] step:8781/10000 train_time:666811ms step_avg:75.94ms +[2025-09-02 18:46:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:46:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:47:09] [Rank 0] PRINT: step:8800/10000 val_loss:3.8718 svd_entropy: attn_qk:H=0.7556,top10E=0.25,eRank=157.6,q75/q25=110.25 attn_vo:H=0.8394,top10E=0.14,eRank=287.8,q75/q25=54.58 mlp_w1:H=0.7630,top10E=0.29,eRank=190.7,q75/q25=25.63 mlp_w2:H=0.8519,top10E=0.13,eRank=302.5,q75/q25=49.07 vo_prod:H=0.7529,top10E=0.23,eRank=157.4,q75/q25=2783.46 train_time:668607ms step_avg:75.98ms +[2025-09-02 18:47:09] [Rank 0] PRINT: step:8800/10000 val_loss:3.8718 svd_entropy: attn_qk:H=0.7556,top10E=0.25,eRank=157.6,q75/q25=110.25 attn_vo:H=0.8394,top10E=0.14,eRank=287.8,q75/q25=54.58 mlp_w1:H=0.7630,top10E=0.29,eRank=190.7,q75/q25=25.63 mlp_w2:H=0.8519,top10E=0.13,eRank=302.5,q75/q25=49.07 vo_prod:H=0.7529,top10E=0.23,eRank=157.4,q75/q25=2783.46 train_time:668607ms step_avg:75.98ms +[2025-09-02 18:47:09] [Rank 0] step:8801/10000 train_time:668618ms step_avg:75.97ms +[2025-09-02 18:47:09] [Rank 0] step:8801/10000 train_time:668618ms step_avg:75.97ms +[2025-09-02 18:47:11] [Rank 0] step:8821/10000 train_time:670075ms step_avg:75.96ms +[2025-09-02 18:47:11] [Rank 0] step:8821/10000 train_time:670075ms step_avg:75.96ms +[2025-09-02 18:47:12] [Rank 0] step:8841/10000 train_time:671719ms step_avg:75.98ms +[2025-09-02 18:47:12] [Rank 0] step:8841/10000 train_time:671719ms step_avg:75.98ms +[2025-09-02 18:47:14] [Rank 0] step:8861/10000 train_time:673344ms step_avg:75.99ms +[2025-09-02 18:47:14] [Rank 0] step:8861/10000 train_time:673344ms step_avg:75.99ms +[2025-09-02 18:47:16] [Rank 0] step:8881/10000 train_time:674967ms step_avg:76.00ms +[2025-09-02 18:47:16] [Rank 0] step:8881/10000 train_time:674967ms step_avg:76.00ms +[2025-09-02 18:47:17] [Rank 0] step:8901/10000 train_time:676596ms step_avg:76.01ms +[2025-09-02 18:47:17] [Rank 0] step:8901/10000 train_time:676596ms step_avg:76.01ms +[2025-09-02 18:47:19] [Rank 0] step:8921/10000 train_time:678229ms step_avg:76.03ms +[2025-09-02 18:47:19] [Rank 0] step:8921/10000 train_time:678229ms step_avg:76.03ms +[2025-09-02 18:47:21] [Rank 0] step:8941/10000 train_time:679868ms step_avg:76.04ms +[2025-09-02 18:47:21] [Rank 0] step:8941/10000 train_time:679868ms step_avg:76.04ms +[2025-09-02 18:47:22] [Rank 0] step:8961/10000 train_time:681489ms step_avg:76.05ms +[2025-09-02 18:47:22] [Rank 0] step:8961/10000 train_time:681489ms step_avg:76.05ms +[2025-09-02 18:47:24] [Rank 0] step:8981/10000 train_time:683109ms step_avg:76.06ms +[2025-09-02 18:47:24] [Rank 0] step:8981/10000 train_time:683109ms step_avg:76.06ms +[2025-09-02 18:47:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:47:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:47:37] [Rank 0] PRINT: step:9000/10000 val_loss:3.8632 svd_entropy: attn_qk:H=0.7562,top10E=0.25,eRank=158.1,q75/q25=110.12 attn_vo:H=0.8398,top10E=0.14,eRank=288.5,q75/q25=53.90 mlp_w1:H=0.7637,top10E=0.29,eRank=191.5,q75/q25=25.64 mlp_w2:H=0.8522,top10E=0.13,eRank=303.2,q75/q25=49.02 vo_prod:H=0.7536,top10E=0.23,eRank=158.1,q75/q25=2724.39 train_time:684895ms step_avg:76.10ms +[2025-09-02 18:47:37] [Rank 0] PRINT: step:9000/10000 val_loss:3.8632 svd_entropy: attn_qk:H=0.7562,top10E=0.25,eRank=158.1,q75/q25=110.12 attn_vo:H=0.8398,top10E=0.14,eRank=288.5,q75/q25=53.90 mlp_w1:H=0.7637,top10E=0.29,eRank=191.5,q75/q25=25.64 mlp_w2:H=0.8522,top10E=0.13,eRank=303.2,q75/q25=49.02 vo_prod:H=0.7536,top10E=0.23,eRank=158.1,q75/q25=2724.39 train_time:684895ms step_avg:76.10ms +[2025-09-02 18:47:37] [Rank 0] step:9001/10000 train_time:684907ms step_avg:76.09ms +[2025-09-02 18:47:37] [Rank 0] step:9001/10000 train_time:684907ms step_avg:76.09ms +[2025-09-02 18:47:39] [Rank 0] step:9021/10000 train_time:686393ms step_avg:76.09ms +[2025-09-02 18:47:39] [Rank 0] step:9021/10000 train_time:686393ms step_avg:76.09ms +[2025-09-02 18:47:41] [Rank 0] step:9041/10000 train_time:688011ms step_avg:76.10ms +[2025-09-02 18:47:41] [Rank 0] step:9041/10000 train_time:688011ms step_avg:76.10ms +[2025-09-02 18:47:42] [Rank 0] step:9061/10000 train_time:689649ms step_avg:76.11ms +[2025-09-02 18:47:42] [Rank 0] step:9061/10000 train_time:689649ms step_avg:76.11ms +[2025-09-02 18:47:44] [Rank 0] step:9081/10000 train_time:691284ms step_avg:76.12ms +[2025-09-02 18:47:44] [Rank 0] step:9081/10000 train_time:691284ms step_avg:76.12ms +[2025-09-02 18:47:46] [Rank 0] step:9101/10000 train_time:692934ms step_avg:76.14ms +[2025-09-02 18:47:46] [Rank 0] step:9101/10000 train_time:692934ms step_avg:76.14ms +[2025-09-02 18:47:47] [Rank 0] step:9121/10000 train_time:694567ms step_avg:76.15ms +[2025-09-02 18:47:47] [Rank 0] step:9121/10000 train_time:694567ms step_avg:76.15ms +[2025-09-02 18:47:49] [Rank 0] step:9141/10000 train_time:696182ms step_avg:76.16ms +[2025-09-02 18:47:49] [Rank 0] step:9141/10000 train_time:696182ms step_avg:76.16ms +[2025-09-02 18:47:50] [Rank 0] step:9161/10000 train_time:697801ms step_avg:76.17ms +[2025-09-02 18:47:50] [Rank 0] step:9161/10000 train_time:697801ms step_avg:76.17ms +[2025-09-02 18:47:52] [Rank 0] step:9181/10000 train_time:699461ms step_avg:76.19ms +[2025-09-02 18:47:52] [Rank 0] step:9181/10000 train_time:699461ms step_avg:76.19ms +[2025-09-02 18:47:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:47:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:48:06] [Rank 0] PRINT: step:9200/10000 val_loss:3.8557 svd_entropy: attn_qk:H=0.7566,top10E=0.25,eRank=158.6,q75/q25=109.62 attn_vo:H=0.8402,top10E=0.14,eRank=289.2,q75/q25=53.35 mlp_w1:H=0.7643,top10E=0.29,eRank=192.2,q75/q25=25.68 mlp_w2:H=0.8526,top10E=0.13,eRank=303.9,q75/q25=49.15 vo_prod:H=0.7542,top10E=0.23,eRank=158.7,q75/q25=2649.99 train_time:701251ms step_avg:76.22ms +[2025-09-02 18:48:06] [Rank 0] PRINT: step:9200/10000 val_loss:3.8557 svd_entropy: attn_qk:H=0.7566,top10E=0.25,eRank=158.6,q75/q25=109.62 attn_vo:H=0.8402,top10E=0.14,eRank=289.2,q75/q25=53.35 mlp_w1:H=0.7643,top10E=0.29,eRank=192.2,q75/q25=25.68 mlp_w2:H=0.8526,top10E=0.13,eRank=303.9,q75/q25=49.15 vo_prod:H=0.7542,top10E=0.23,eRank=158.7,q75/q25=2649.99 train_time:701251ms step_avg:76.22ms +[2025-09-02 18:48:06] [Rank 0] step:9201/10000 train_time:701263ms step_avg:76.22ms +[2025-09-02 18:48:06] [Rank 0] step:9201/10000 train_time:701263ms step_avg:76.22ms +[2025-09-02 18:48:07] [Rank 0] step:9221/10000 train_time:702743ms step_avg:76.21ms +[2025-09-02 18:48:07] [Rank 0] step:9221/10000 train_time:702743ms step_avg:76.21ms +[2025-09-02 18:48:09] [Rank 0] step:9241/10000 train_time:704382ms step_avg:76.22ms +[2025-09-02 18:48:09] [Rank 0] step:9241/10000 train_time:704382ms step_avg:76.22ms +[2025-09-02 18:48:11] [Rank 0] step:9261/10000 train_time:706021ms step_avg:76.24ms +[2025-09-02 18:48:11] [Rank 0] step:9261/10000 train_time:706021ms step_avg:76.24ms +[2025-09-02 18:48:12] [Rank 0] step:9281/10000 train_time:707637ms step_avg:76.25ms +[2025-09-02 18:48:12] [Rank 0] step:9281/10000 train_time:707637ms step_avg:76.25ms +[2025-09-02 18:48:14] [Rank 0] step:9301/10000 train_time:709264ms step_avg:76.26ms +[2025-09-02 18:48:14] [Rank 0] step:9301/10000 train_time:709264ms step_avg:76.26ms +[2025-09-02 18:48:16] [Rank 0] step:9321/10000 train_time:710897ms step_avg:76.27ms +[2025-09-02 18:48:16] [Rank 0] step:9321/10000 train_time:710897ms step_avg:76.27ms +[2025-09-02 18:48:17] [Rank 0] step:9341/10000 train_time:712529ms step_avg:76.28ms +[2025-09-02 18:48:17] [Rank 0] step:9341/10000 train_time:712529ms step_avg:76.28ms +[2025-09-02 18:48:19] [Rank 0] step:9361/10000 train_time:714163ms step_avg:76.29ms +[2025-09-02 18:48:19] [Rank 0] step:9361/10000 train_time:714163ms step_avg:76.29ms +[2025-09-02 18:48:20] [Rank 0] step:9381/10000 train_time:715812ms step_avg:76.30ms +[2025-09-02 18:48:20] [Rank 0] step:9381/10000 train_time:715812ms step_avg:76.30ms +[2025-09-02 18:48:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:48:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:48:34] [Rank 0] PRINT: step:9400/10000 val_loss:3.8481 svd_entropy: attn_qk:H=0.7570,top10E=0.25,eRank=158.9,q75/q25=109.76 attn_vo:H=0.8405,top10E=0.14,eRank=289.7,q75/q25=52.98 mlp_w1:H=0.7648,top10E=0.29,eRank=192.8,q75/q25=25.74 mlp_w2:H=0.8528,top10E=0.13,eRank=304.4,q75/q25=49.07 vo_prod:H=0.7547,top10E=0.23,eRank=159.2,q75/q25=2608.99 train_time:717612ms step_avg:76.34ms +[2025-09-02 18:48:34] [Rank 0] PRINT: step:9400/10000 val_loss:3.8481 svd_entropy: attn_qk:H=0.7570,top10E=0.25,eRank=158.9,q75/q25=109.76 attn_vo:H=0.8405,top10E=0.14,eRank=289.7,q75/q25=52.98 mlp_w1:H=0.7648,top10E=0.29,eRank=192.8,q75/q25=25.74 mlp_w2:H=0.8528,top10E=0.13,eRank=304.4,q75/q25=49.07 vo_prod:H=0.7547,top10E=0.23,eRank=159.2,q75/q25=2608.99 train_time:717612ms step_avg:76.34ms +[2025-09-02 18:48:34] [Rank 0] step:9401/10000 train_time:717624ms step_avg:76.33ms +[2025-09-02 18:48:34] [Rank 0] step:9401/10000 train_time:717624ms step_avg:76.33ms +[2025-09-02 18:48:36] [Rank 0] step:9421/10000 train_time:719099ms step_avg:76.33ms +[2025-09-02 18:48:36] [Rank 0] step:9421/10000 train_time:719099ms step_avg:76.33ms +[2025-09-02 18:48:37] [Rank 0] step:9441/10000 train_time:720730ms step_avg:76.34ms +[2025-09-02 18:48:37] [Rank 0] step:9441/10000 train_time:720730ms step_avg:76.34ms +[2025-09-02 18:48:39] [Rank 0] step:9461/10000 train_time:722365ms step_avg:76.35ms +[2025-09-02 18:48:39] [Rank 0] step:9461/10000 train_time:722365ms step_avg:76.35ms +[2025-09-02 18:48:41] [Rank 0] step:9481/10000 train_time:723997ms step_avg:76.36ms +[2025-09-02 18:48:41] [Rank 0] step:9481/10000 train_time:723997ms step_avg:76.36ms +[2025-09-02 18:48:42] [Rank 0] step:9501/10000 train_time:725644ms step_avg:76.38ms +[2025-09-02 18:48:42] [Rank 0] step:9501/10000 train_time:725644ms step_avg:76.38ms +[2025-09-02 18:48:44] [Rank 0] step:9521/10000 train_time:727272ms step_avg:76.39ms +[2025-09-02 18:48:44] [Rank 0] step:9521/10000 train_time:727272ms step_avg:76.39ms +[2025-09-02 18:48:46] [Rank 0] step:9541/10000 train_time:728901ms step_avg:76.40ms +[2025-09-02 18:48:46] [Rank 0] step:9541/10000 train_time:728901ms step_avg:76.40ms +[2025-09-02 18:48:47] [Rank 0] step:9561/10000 train_time:730529ms step_avg:76.41ms +[2025-09-02 18:48:47] [Rank 0] step:9561/10000 train_time:730529ms step_avg:76.41ms +[2025-09-02 18:48:49] [Rank 0] step:9581/10000 train_time:732161ms step_avg:76.42ms +[2025-09-02 18:48:49] [Rank 0] step:9581/10000 train_time:732161ms step_avg:76.42ms +[2025-09-02 18:48:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:48:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:49:02] [Rank 0] PRINT: step:9600/10000 val_loss:3.8422 svd_entropy: attn_qk:H=0.7574,top10E=0.25,eRank=159.2,q75/q25=109.63 attn_vo:H=0.8408,top10E=0.14,eRank=290.2,q75/q25=52.61 mlp_w1:H=0.7652,top10E=0.29,eRank=193.3,q75/q25=25.68 mlp_w2:H=0.8530,top10E=0.13,eRank=304.9,q75/q25=48.97 vo_prod:H=0.7551,top10E=0.23,eRank=159.7,q75/q25=2564.39 train_time:733970ms step_avg:76.46ms +[2025-09-02 18:49:02] [Rank 0] PRINT: step:9600/10000 val_loss:3.8422 svd_entropy: attn_qk:H=0.7574,top10E=0.25,eRank=159.2,q75/q25=109.63 attn_vo:H=0.8408,top10E=0.14,eRank=290.2,q75/q25=52.61 mlp_w1:H=0.7652,top10E=0.29,eRank=193.3,q75/q25=25.68 mlp_w2:H=0.8530,top10E=0.13,eRank=304.9,q75/q25=48.97 vo_prod:H=0.7551,top10E=0.23,eRank=159.7,q75/q25=2564.39 train_time:733970ms step_avg:76.46ms +[2025-09-02 18:49:02] [Rank 0] step:9601/10000 train_time:733982ms step_avg:76.45ms +[2025-09-02 18:49:02] [Rank 0] step:9601/10000 train_time:733982ms step_avg:76.45ms +[2025-09-02 18:49:04] [Rank 0] step:9621/10000 train_time:735465ms step_avg:76.44ms +[2025-09-02 18:49:04] [Rank 0] step:9621/10000 train_time:735465ms step_avg:76.44ms +[2025-09-02 18:49:06] [Rank 0] step:9641/10000 train_time:737102ms step_avg:76.45ms +[2025-09-02 18:49:06] [Rank 0] step:9641/10000 train_time:737102ms step_avg:76.45ms +[2025-09-02 18:49:07] [Rank 0] step:9661/10000 train_time:738764ms step_avg:76.47ms +[2025-09-02 18:49:07] [Rank 0] step:9661/10000 train_time:738764ms step_avg:76.47ms +[2025-09-02 18:49:09] [Rank 0] step:9681/10000 train_time:740421ms step_avg:76.48ms +[2025-09-02 18:49:09] [Rank 0] step:9681/10000 train_time:740421ms step_avg:76.48ms +[2025-09-02 18:49:11] [Rank 0] step:9701/10000 train_time:742090ms step_avg:76.50ms +[2025-09-02 18:49:11] [Rank 0] step:9701/10000 train_time:742090ms step_avg:76.50ms +[2025-09-02 18:49:12] [Rank 0] step:9721/10000 train_time:743744ms step_avg:76.51ms +[2025-09-02 18:49:12] [Rank 0] step:9721/10000 train_time:743744ms step_avg:76.51ms +[2025-09-02 18:49:14] [Rank 0] step:9741/10000 train_time:745420ms step_avg:76.52ms +[2025-09-02 18:49:14] [Rank 0] step:9741/10000 train_time:745420ms step_avg:76.52ms +[2025-09-02 18:49:16] [Rank 0] step:9761/10000 train_time:747078ms step_avg:76.54ms +[2025-09-02 18:49:16] [Rank 0] step:9761/10000 train_time:747078ms step_avg:76.54ms +[2025-09-02 18:49:17] [Rank 0] step:9781/10000 train_time:748749ms step_avg:76.55ms +[2025-09-02 18:49:17] [Rank 0] step:9781/10000 train_time:748749ms step_avg:76.55ms +[2025-09-02 18:49:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:49:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:49:31] [Rank 0] PRINT: step:9800/10000 val_loss:3.8358 svd_entropy: attn_qk:H=0.7576,top10E=0.25,eRank=159.4,q75/q25=109.74 attn_vo:H=0.8410,top10E=0.14,eRank=290.5,q75/q25=52.33 mlp_w1:H=0.7656,top10E=0.29,eRank=193.7,q75/q25=25.73 mlp_w2:H=0.8532,top10E=0.13,eRank=305.2,q75/q25=48.87 vo_prod:H=0.7555,top10E=0.23,eRank=160.0,q75/q25=2523.62 train_time:750589ms step_avg:76.59ms +[2025-09-02 18:49:31] [Rank 0] PRINT: step:9800/10000 val_loss:3.8358 svd_entropy: attn_qk:H=0.7576,top10E=0.25,eRank=159.4,q75/q25=109.74 attn_vo:H=0.8410,top10E=0.14,eRank=290.5,q75/q25=52.33 mlp_w1:H=0.7656,top10E=0.29,eRank=193.7,q75/q25=25.73 mlp_w2:H=0.8532,top10E=0.13,eRank=305.2,q75/q25=48.87 vo_prod:H=0.7555,top10E=0.23,eRank=160.0,q75/q25=2523.62 train_time:750589ms step_avg:76.59ms +[2025-09-02 18:49:31] [Rank 0] step:9801/10000 train_time:750600ms step_avg:76.58ms +[2025-09-02 18:49:31] [Rank 0] step:9801/10000 train_time:750600ms step_avg:76.58ms +[2025-09-02 18:49:33] [Rank 0] step:9821/10000 train_time:752108ms step_avg:76.58ms +[2025-09-02 18:49:33] [Rank 0] step:9821/10000 train_time:752108ms step_avg:76.58ms +[2025-09-02 18:49:34] [Rank 0] step:9841/10000 train_time:753777ms step_avg:76.60ms +[2025-09-02 18:49:34] [Rank 0] step:9841/10000 train_time:753777ms step_avg:76.60ms +[2025-09-02 18:49:36] [Rank 0] step:9861/10000 train_time:755422ms step_avg:76.61ms +[2025-09-02 18:49:36] [Rank 0] step:9861/10000 train_time:755422ms step_avg:76.61ms +[2025-09-02 18:49:38] [Rank 0] step:9881/10000 train_time:757065ms step_avg:76.62ms +[2025-09-02 18:49:38] [Rank 0] step:9881/10000 train_time:757065ms step_avg:76.62ms +[2025-09-02 18:49:39] [Rank 0] step:9901/10000 train_time:758726ms step_avg:76.63ms +[2025-09-02 18:49:39] [Rank 0] step:9901/10000 train_time:758726ms step_avg:76.63ms +[2025-09-02 18:49:41] [Rank 0] step:9921/10000 train_time:760377ms step_avg:76.64ms +[2025-09-02 18:49:41] [Rank 0] step:9921/10000 train_time:760377ms step_avg:76.64ms +[2025-09-02 18:49:43] [Rank 0] step:9941/10000 train_time:762039ms step_avg:76.66ms +[2025-09-02 18:49:43] [Rank 0] step:9941/10000 train_time:762039ms step_avg:76.66ms +[2025-09-02 18:49:44] [Rank 0] step:9961/10000 train_time:763693ms step_avg:76.67ms +[2025-09-02 18:49:44] [Rank 0] step:9961/10000 train_time:763693ms step_avg:76.67ms +[2025-09-02 18:49:46] [Rank 0] step:9981/10000 train_time:765349ms step_avg:76.68ms +[2025-09-02 18:49:46] [Rank 0] step:9981/10000 train_time:765349ms step_avg:76.68ms +[2025-09-02 18:49:48] [Rank 0] step:10000/10000 train_time:766927ms step_avg:76.69ms +[2025-09-02 18:49:48] [Rank 0] step:10000/10000 train_time:766927ms step_avg:76.69ms +[2025-09-02 18:49:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:49:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-02 18:50:00] [Rank 0] PRINT: step:10000/10000 val_loss:3.8302 svd_entropy: attn_qk:H=0.7577,top10E=0.25,eRank=159.6,q75/q25=109.91 attn_vo:H=0.8412,top10E=0.14,eRank=290.7,q75/q25=52.14 mlp_w1:H=0.7658,top10E=0.29,eRank=194.0,q75/q25=25.72 mlp_w2:H=0.8533,top10E=0.13,eRank=305.5,q75/q25=48.85 vo_prod:H=0.7557,top10E=0.23,eRank=160.3,q75/q25=2504.05 train_time:767184ms step_avg:76.72ms +[2025-09-02 18:50:00] [Rank 0] PRINT: step:10000/10000 val_loss:3.8302 svd_entropy: attn_qk:H=0.7577,top10E=0.25,eRank=159.6,q75/q25=109.91 attn_vo:H=0.8412,top10E=0.14,eRank=290.7,q75/q25=52.14 mlp_w1:H=0.7658,top10E=0.29,eRank=194.0,q75/q25=25.72 mlp_w2:H=0.8533,top10E=0.13,eRank=305.5,q75/q25=48.85 vo_prod:H=0.7557,top10E=0.23,eRank=160.3,q75/q25=2504.05 train_time:767184ms step_avg:76.72ms +[2025-09-02 18:50:00] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 18:50:00 2025 --- +[2025-09-02 18:50:00] [Rank 0] PRINT: --- Training Finished: Tue Sep 2 18:50:00 2025 --- +[2025-09-02 18:50:00] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14436 MiB +[2025-09-02 18:50:00] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14436 MiB diff --git a/logs_svd_qkvo/mode_15_param_qkvo_seed_50/config.json b/logs_svd_qkvo/mode_15_param_qkvo_seed_50/config.json new file mode 100644 index 0000000000000000000000000000000000000000..a4c31551a808a46751d15213c46d7b37df85f491 --- /dev/null +++ b/logs_svd_qkvo/mode_15_param_qkvo_seed_50/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 50, + "optimizer_mode": 15, + "model_parameterization": "qkvo", + "adam_lr": 0.008, + "muon_lr": 0.05, + "base_dir": "logs_svd_qkvo" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 200, + "save_checkpoint": false + }, + "run_uuid_for_log": "0dfa5a7f-4375-4cac-8216-960f079fc0a8", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_svd_qkvo/mode_15_param_qkvo_seed_50/training_log_0dfa5a7f-4375-4cac-8216-960f079fc0a8.txt b/logs_svd_qkvo/mode_15_param_qkvo_seed_50/training_log_0dfa5a7f-4375-4cac-8216-960f079fc0a8.txt new file mode 100644 index 0000000000000000000000000000000000000000..65f4750b8b35c7f31e89a3ab6ff7f51a65a3e893 --- /dev/null +++ b/logs_svd_qkvo/mode_15_param_qkvo_seed_50/training_log_0dfa5a7f-4375-4cac-8216-960f079fc0a8.txt @@ -0,0 +1,2984 @@ +[2025-09-03 05:07:42] [Rank 0] PRINT: --- Script Start: Wed Sep 3 05:07:42 2025 --- +[2025-09-03 05:07:42] [Rank 0] PRINT: --- Script Start: Wed Sep 3 05:07:42 2025 --- +[2025-09-03 05:07:42] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=15, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-03 05:07:42] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=15, model_parameterization='qkvo', adam_lr=0.008, muon_lr=0.05, base_dir='logs_svd_qkvo') +[2025-09-03 05:07:42] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-03 05:07:42] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-03 05:07:42] [Rank 0] PRINT: Using fixed seed: 50 +[2025-09-03 05:07:42] [Rank 0] PRINT: Using fixed seed: 50 +[2025-09-03 05:07:42] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_15_param_qkvo_seed_50 +[2025-09-03 05:07:42] [Rank 0] PRINT: Run directory: logs_svd_qkvo/mode_15_param_qkvo_seed_50 +[2025-09-03 05:07:42] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-03 05:07:42] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON_new import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + +# ---- ADD: spectral metrics helper right after calculate_svd_entropy ---- +def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10): + """ + Returns dict with: + - entropy_norm: normalized SVD entropy (same normalization as your function) + - erank: effective rank = exp(Shannon entropy of p) + - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values) + - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2) + """ + with torch.no_grad(): + s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32)) + s = s[s > 1e-9] + n = s.numel() + if n == 0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + s2 = s * s + S2_sum = float(torch.sum(s2)) + if S2_sum == 0.0: + return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf')) + + p = s2 / S2_sum # energy distribution + # Shannon entropy H (natural log) + H = float(torch.sum(torch.special.entr(p))) + entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy + erank = float(np.exp(H)) + + k = min(topk, n) + topk_energy = float(torch.topk(p, k).values.sum()) + + # eigenvalues = s^2, use quantiles on s^2 + q25 = float(torch.quantile(s2, 0.25)) + q75 = float(torch.quantile(s2, 0.75)) + q75_q25 = (q75 / q25) if q25 > 0 else float('inf') + + return dict( + entropy_norm=entropy_norm, + erank=erank, + topk_energy=topk_energy, + q75_q25=q75_q25, + ) + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + "11: Muon(W_1)/Adam(O Attn, QK Attn)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"]) +parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo import GPT + +elif exp_args.model_parameterization == "norope": + print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_norope import GPT + +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w") + from models.nano_GPT_gated import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + val_tokens = 1966080 + #val_tokens = 10485760 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 200 # Original: 125 + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + +######################################## +# Construct model and optimizer # +######################################## +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # Muon on V Attn, MLP + print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + all_mlp_matrices + adam_matrix_target_list = attn_o_params + attn_qk_group + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w1_group + adam_matrix_target_list = all_attn_matrices + mlp_w2_group + elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others + print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w1_group + adam_matrix_target_list = attn_qk_group + mlp_w2_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels # +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) +for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled +model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + + +params_to_analyze = [] + +if exp_args.model_parameterization == "whole": + params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad] +elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated": + params_to_analyze = all_attn_matrices + all_mlp_matrices + matrix_groups_for_svd = {} + if master_process: + matrix_groups_for_svd = { + "attn_qk": attn_qk_group, + "attn_vo": attn_vo_group, + "mlp_w1": mlp_w1_group, + "mlp_w2": mlp_proj_params + } + + + +######################################## +# Training and validation # +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + +for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0) + if step > 0 : # For step 0, t0 hasn't started a training segment yet + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() # Use model_compiled + val_batch_size = world_size * args.val_seq_len + # Ensure val_tokens is divisible by val_batch_size, or handle remainder + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + val_num_steps = args.val_tokens // val_batch_size + + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device + actual_val_steps = 0 + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break # Stop if data runs out + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue) + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader # Clean up + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss + + svd_log_str = "" + if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd: + TOPK = 10 + svd_results_by_category = {} + + with torch.no_grad(): + # per-category metrics (average over matrices in the group) + for name, group_params in matrix_groups_for_svd.items(): + if not group_params: + continue + mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params] + if mets: + avg_entropy = float(np.mean([m['entropy_norm'] for m in mets])) + avg_erank = float(np.mean([m['erank'] for m in mets])) + avg_topkE = float(np.mean([m['topk_energy'] for m in mets])) + avg_qratio = float(np.mean([m['q75_q25'] for m in mets])) + svd_results_by_category[name] = dict( + entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio + ) + + # VO product as another category + vo_mets = [] + num_layers = len(attn_v_params) + for i in range(num_layers): + w_v = attn_v_params[i] + w_o = attn_o_params[i] + w_ov_product = torch.matmul(w_o, w_v) + vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK)) + if vo_mets: + svd_results_by_category['vo_prod'] = dict( + entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])), + erank=float(np.mean([m['erank'] for m in vo_mets])), + topkE=float(np.mean([m['topk_energy'] for m in vo_mets])), + q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])), + ) + + # format logging string (append metrics after entropy) + svd_log_parts = [] + for name, vals in svd_results_by_category.items(): + svd_log_parts.append( + f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}" + ) + svd_log_str = " ".join(svd_log_parts) + + + # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative. + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + model_compiled.train() # Switch back to train mode + torch.cuda.synchronize() + t0 = time.perf_counter() # Reset timer for the next training segment + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-03 05:07:42] [Rank 0] PRINT: Constructing model... +[2025-09-03 05:07:42] [Rank 0] PRINT: Constructing model... +[2025-09-03 05:07:44] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-03 05:07:44] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-03 05:07:44] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-03 05:07:44] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-03 05:07:44] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-03 05:07:44] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-03 05:07:44] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 15 +[2025-09-03 05:07:44] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 15 +[2025-09-03 05:07:44] [Rank 0] PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-03 05:07:44] [Rank 0] PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: 0.008). +[2025-09-03 05:07:44] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-03 05:07:44] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-03 05:07:44] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-03 05:07:44] [Rank 0] PRINT: Muon optimizer is active with 11 parameters. +[2025-09-03 05:07:44] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-03 05:07:44] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-03 05:07:44] [Rank 0] PRINT: Model compilation complete. +[2025-09-03 05:07:44] [Rank 0] PRINT: Model compilation complete. +[2025-09-03 05:07:44] [Rank 0] PRINT: Starting warmup... +[2025-09-03 05:07:44] [Rank 0] PRINT: Starting warmup... +[2025-09-03 05:09:23] [Rank 0] PRINT: Warmup complete. +[2025-09-03 05:09:23] [Rank 0] PRINT: Warmup complete. +[2025-09-03 05:09:24] [Rank 0] PRINT: Starting training... +[2025-09-03 05:09:24] [Rank 0] PRINT: Starting training... +[2025-09-03 05:09:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:09:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:09:40] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9248,top10E=0.05,eRank=465.9,q75/q25=10.25 attn_vo:H=0.4623,top10E=0.02,eRank=232.8,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.6,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-03 05:09:40] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 svd_entropy: attn_qk:H=0.9248,top10E=0.05,eRank=465.9,q75/q25=10.25 attn_vo:H=0.4623,top10E=0.02,eRank=232.8,q75/q25=inf mlp_w1:H=0.9812,top10E=0.03,eRank=677.6,q75/q25=2.37 mlp_w2:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf vo_prod:H=0.0000,top10E=0.00,eRank=0.0,q75/q25=inf train_time:0ms step_avg:0.00ms +[2025-09-03 05:09:42] [Rank 0] step:21/10000 train_time:1311ms step_avg:62.41ms +[2025-09-03 05:09:42] [Rank 0] step:21/10000 train_time:1311ms step_avg:62.41ms +[2025-09-03 05:09:43] [Rank 0] step:41/10000 train_time:2701ms step_avg:65.87ms +[2025-09-03 05:09:43] [Rank 0] step:41/10000 train_time:2701ms step_avg:65.87ms +[2025-09-03 05:09:45] [Rank 0] step:61/10000 train_time:4096ms step_avg:67.14ms +[2025-09-03 05:09:45] [Rank 0] step:61/10000 train_time:4096ms step_avg:67.14ms +[2025-09-03 05:09:46] [Rank 0] step:81/10000 train_time:5492ms step_avg:67.81ms +[2025-09-03 05:09:46] [Rank 0] step:81/10000 train_time:5492ms step_avg:67.81ms +[2025-09-03 05:09:48] [Rank 0] step:101/10000 train_time:6891ms step_avg:68.23ms +[2025-09-03 05:09:48] [Rank 0] step:101/10000 train_time:6891ms step_avg:68.23ms +[2025-09-03 05:09:49] [Rank 0] step:121/10000 train_time:8287ms step_avg:68.49ms +[2025-09-03 05:09:49] [Rank 0] step:121/10000 train_time:8287ms step_avg:68.49ms +[2025-09-03 05:09:50] [Rank 0] step:141/10000 train_time:9686ms step_avg:68.70ms +[2025-09-03 05:09:50] [Rank 0] step:141/10000 train_time:9686ms step_avg:68.70ms +[2025-09-03 05:09:52] [Rank 0] step:161/10000 train_time:11086ms step_avg:68.85ms +[2025-09-03 05:09:52] [Rank 0] step:161/10000 train_time:11086ms step_avg:68.85ms +[2025-09-03 05:09:53] [Rank 0] step:181/10000 train_time:12485ms step_avg:68.98ms +[2025-09-03 05:09:53] [Rank 0] step:181/10000 train_time:12485ms step_avg:68.98ms +[2025-09-03 05:09:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:09:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:10:06] [Rank 0] PRINT: step:200/10000 val_loss:6.4869 svd_entropy: attn_qk:H=0.4200,top10E=0.82,eRank=33.1,q75/q25=12.16 attn_vo:H=0.5424,top10E=0.64,eRank=109.7,q75/q25=161.26 mlp_w1:H=0.4157,top10E=0.75,eRank=23.8,q75/q25=2.75 mlp_w2:H=0.1476,top10E=0.96,eRank=4.3,q75/q25=627.61 vo_prod:H=0.2520,top10E=0.96,eRank=7.1,q75/q25=1035.09 train_time:14027ms step_avg:70.13ms +[2025-09-03 05:10:06] [Rank 0] PRINT: step:200/10000 val_loss:6.4869 svd_entropy: attn_qk:H=0.4200,top10E=0.82,eRank=33.1,q75/q25=12.16 attn_vo:H=0.5424,top10E=0.64,eRank=109.7,q75/q25=161.26 mlp_w1:H=0.4157,top10E=0.75,eRank=23.8,q75/q25=2.75 mlp_w2:H=0.1476,top10E=0.96,eRank=4.3,q75/q25=627.61 vo_prod:H=0.2520,top10E=0.96,eRank=7.1,q75/q25=1035.09 train_time:14027ms step_avg:70.13ms +[2025-09-03 05:10:07] [Rank 0] step:201/10000 train_time:14040ms step_avg:69.85ms +[2025-09-03 05:10:07] [Rank 0] step:201/10000 train_time:14040ms step_avg:69.85ms +[2025-09-03 05:10:08] [Rank 0] step:221/10000 train_time:15319ms step_avg:69.31ms +[2025-09-03 05:10:08] [Rank 0] step:221/10000 train_time:15319ms step_avg:69.31ms +[2025-09-03 05:10:09] [Rank 0] step:241/10000 train_time:16718ms step_avg:69.37ms +[2025-09-03 05:10:09] [Rank 0] step:241/10000 train_time:16718ms step_avg:69.37ms +[2025-09-03 05:10:11] [Rank 0] step:261/10000 train_time:18119ms step_avg:69.42ms +[2025-09-03 05:10:11] [Rank 0] step:261/10000 train_time:18119ms step_avg:69.42ms +[2025-09-03 05:10:12] [Rank 0] step:281/10000 train_time:19520ms step_avg:69.46ms +[2025-09-03 05:10:12] [Rank 0] step:281/10000 train_time:19520ms step_avg:69.46ms +[2025-09-03 05:10:14] [Rank 0] step:301/10000 train_time:20920ms step_avg:69.50ms +[2025-09-03 05:10:14] [Rank 0] step:301/10000 train_time:20920ms step_avg:69.50ms +[2025-09-03 05:10:15] [Rank 0] step:321/10000 train_time:22322ms step_avg:69.54ms +[2025-09-03 05:10:15] [Rank 0] step:321/10000 train_time:22322ms step_avg:69.54ms +[2025-09-03 05:10:16] [Rank 0] step:341/10000 train_time:23724ms step_avg:69.57ms +[2025-09-03 05:10:16] [Rank 0] step:341/10000 train_time:23724ms step_avg:69.57ms +[2025-09-03 05:10:18] [Rank 0] step:361/10000 train_time:25151ms step_avg:69.67ms +[2025-09-03 05:10:18] [Rank 0] step:361/10000 train_time:25151ms step_avg:69.67ms +[2025-09-03 05:10:19] [Rank 0] step:381/10000 train_time:26553ms step_avg:69.69ms +[2025-09-03 05:10:19] [Rank 0] step:381/10000 train_time:26553ms step_avg:69.69ms +[2025-09-03 05:10:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:10:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:10:32] [Rank 0] PRINT: step:400/10000 val_loss:5.9946 svd_entropy: attn_qk:H=0.4914,top10E=0.72,eRank=42.1,q75/q25=13.62 attn_vo:H=0.5637,top10E=0.56,eRank=85.3,q75/q25=47.58 mlp_w1:H=0.4516,top10E=0.69,eRank=37.5,q75/q25=3.21 mlp_w2:H=0.5411,top10E=0.60,eRank=37.3,q75/q25=13.34 vo_prod:H=0.3850,top10E=0.86,eRank=14.9,q75/q25=343.48 train_time:28097ms step_avg:70.24ms +[2025-09-03 05:10:32] [Rank 0] PRINT: step:400/10000 val_loss:5.9946 svd_entropy: attn_qk:H=0.4914,top10E=0.72,eRank=42.1,q75/q25=13.62 attn_vo:H=0.5637,top10E=0.56,eRank=85.3,q75/q25=47.58 mlp_w1:H=0.4516,top10E=0.69,eRank=37.5,q75/q25=3.21 mlp_w2:H=0.5411,top10E=0.60,eRank=37.3,q75/q25=13.34 vo_prod:H=0.3850,top10E=0.86,eRank=14.9,q75/q25=343.48 train_time:28097ms step_avg:70.24ms +[2025-09-03 05:10:32] [Rank 0] step:401/10000 train_time:28110ms step_avg:70.10ms +[2025-09-03 05:10:32] [Rank 0] step:401/10000 train_time:28110ms step_avg:70.10ms +[2025-09-03 05:10:34] [Rank 0] step:421/10000 train_time:29382ms step_avg:69.79ms +[2025-09-03 05:10:34] [Rank 0] step:421/10000 train_time:29382ms step_avg:69.79ms +[2025-09-03 05:10:35] [Rank 0] step:441/10000 train_time:30785ms step_avg:69.81ms +[2025-09-03 05:10:35] [Rank 0] step:441/10000 train_time:30785ms step_avg:69.81ms +[2025-09-03 05:10:37] [Rank 0] step:461/10000 train_time:32187ms step_avg:69.82ms +[2025-09-03 05:10:37] [Rank 0] step:461/10000 train_time:32187ms step_avg:69.82ms +[2025-09-03 05:10:38] [Rank 0] step:481/10000 train_time:33593ms step_avg:69.84ms +[2025-09-03 05:10:38] [Rank 0] step:481/10000 train_time:33593ms step_avg:69.84ms +[2025-09-03 05:10:39] [Rank 0] step:501/10000 train_time:34995ms step_avg:69.85ms +[2025-09-03 05:10:39] [Rank 0] step:501/10000 train_time:34995ms step_avg:69.85ms +[2025-09-03 05:10:41] [Rank 0] step:521/10000 train_time:36398ms step_avg:69.86ms +[2025-09-03 05:10:41] [Rank 0] step:521/10000 train_time:36398ms step_avg:69.86ms +[2025-09-03 05:10:42] [Rank 0] step:541/10000 train_time:37800ms step_avg:69.87ms +[2025-09-03 05:10:42] [Rank 0] step:541/10000 train_time:37800ms step_avg:69.87ms +[2025-09-03 05:10:44] [Rank 0] step:561/10000 train_time:39210ms step_avg:69.89ms +[2025-09-03 05:10:44] [Rank 0] step:561/10000 train_time:39210ms step_avg:69.89ms +[2025-09-03 05:10:45] [Rank 0] step:581/10000 train_time:40613ms step_avg:69.90ms +[2025-09-03 05:10:45] [Rank 0] step:581/10000 train_time:40613ms step_avg:69.90ms +[2025-09-03 05:10:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:10:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:10:58] [Rank 0] PRINT: step:600/10000 val_loss:5.6936 svd_entropy: attn_qk:H=0.5340,top10E=0.63,eRank=50.1,q75/q25=15.39 attn_vo:H=0.6012,top10E=0.47,eRank=92.7,q75/q25=30.08 mlp_w1:H=0.4926,top10E=0.64,eRank=48.2,q75/q25=3.60 mlp_w2:H=0.6405,top10E=0.44,eRank=71.9,q75/q25=9.77 vo_prod:H=0.4654,top10E=0.71,eRank=24.1,q75/q25=248.90 train_time:42158ms step_avg:70.26ms +[2025-09-03 05:10:58] [Rank 0] PRINT: step:600/10000 val_loss:5.6936 svd_entropy: attn_qk:H=0.5340,top10E=0.63,eRank=50.1,q75/q25=15.39 attn_vo:H=0.6012,top10E=0.47,eRank=92.7,q75/q25=30.08 mlp_w1:H=0.4926,top10E=0.64,eRank=48.2,q75/q25=3.60 mlp_w2:H=0.6405,top10E=0.44,eRank=71.9,q75/q25=9.77 vo_prod:H=0.4654,top10E=0.71,eRank=24.1,q75/q25=248.90 train_time:42158ms step_avg:70.26ms +[2025-09-03 05:10:58] [Rank 0] step:601/10000 train_time:42171ms step_avg:70.17ms +[2025-09-03 05:10:58] [Rank 0] step:601/10000 train_time:42171ms step_avg:70.17ms +[2025-09-03 05:11:00] [Rank 0] step:621/10000 train_time:43456ms step_avg:69.98ms +[2025-09-03 05:11:00] [Rank 0] step:621/10000 train_time:43456ms step_avg:69.98ms +[2025-09-03 05:11:01] [Rank 0] step:641/10000 train_time:44858ms step_avg:69.98ms +[2025-09-03 05:11:01] [Rank 0] step:641/10000 train_time:44858ms step_avg:69.98ms +[2025-09-03 05:11:03] [Rank 0] step:661/10000 train_time:46262ms step_avg:69.99ms +[2025-09-03 05:11:03] [Rank 0] step:661/10000 train_time:46262ms step_avg:69.99ms +[2025-09-03 05:11:04] [Rank 0] step:681/10000 train_time:47667ms step_avg:70.00ms +[2025-09-03 05:11:04] [Rank 0] step:681/10000 train_time:47667ms step_avg:70.00ms +[2025-09-03 05:11:05] [Rank 0] step:701/10000 train_time:49072ms step_avg:70.00ms +[2025-09-03 05:11:05] [Rank 0] step:701/10000 train_time:49072ms step_avg:70.00ms +[2025-09-03 05:11:07] [Rank 0] step:721/10000 train_time:50477ms step_avg:70.01ms +[2025-09-03 05:11:07] [Rank 0] step:721/10000 train_time:50477ms step_avg:70.01ms +[2025-09-03 05:11:08] [Rank 0] step:741/10000 train_time:51882ms step_avg:70.02ms +[2025-09-03 05:11:08] [Rank 0] step:741/10000 train_time:51882ms step_avg:70.02ms +[2025-09-03 05:11:10] [Rank 0] step:761/10000 train_time:53298ms step_avg:70.04ms +[2025-09-03 05:11:10] [Rank 0] step:761/10000 train_time:53298ms step_avg:70.04ms +[2025-09-03 05:11:11] [Rank 0] step:781/10000 train_time:54716ms step_avg:70.06ms +[2025-09-03 05:11:11] [Rank 0] step:781/10000 train_time:54716ms step_avg:70.06ms +[2025-09-03 05:11:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:11:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:11:24] [Rank 0] PRINT: step:800/10000 val_loss:5.4705 svd_entropy: attn_qk:H=0.5630,top10E=0.57,eRank=56.5,q75/q25=17.47 attn_vo:H=0.6308,top10E=0.41,eRank=102.0,q75/q25=27.81 mlp_w1:H=0.5273,top10E=0.60,eRank=57.1,q75/q25=3.98 mlp_w2:H=0.6885,top10E=0.36,eRank=97.9,q75/q25=9.79 vo_prod:H=0.5102,top10E=0.62,eRank=31.9,q75/q25=299.85 train_time:56277ms step_avg:70.35ms +[2025-09-03 05:11:24] [Rank 0] PRINT: step:800/10000 val_loss:5.4705 svd_entropy: attn_qk:H=0.5630,top10E=0.57,eRank=56.5,q75/q25=17.47 attn_vo:H=0.6308,top10E=0.41,eRank=102.0,q75/q25=27.81 mlp_w1:H=0.5273,top10E=0.60,eRank=57.1,q75/q25=3.98 mlp_w2:H=0.6885,top10E=0.36,eRank=97.9,q75/q25=9.79 vo_prod:H=0.5102,top10E=0.62,eRank=31.9,q75/q25=299.85 train_time:56277ms step_avg:70.35ms +[2025-09-03 05:11:24] [Rank 0] step:801/10000 train_time:56290ms step_avg:70.27ms +[2025-09-03 05:11:24] [Rank 0] step:801/10000 train_time:56290ms step_avg:70.27ms +[2025-09-03 05:11:26] [Rank 0] step:821/10000 train_time:57588ms step_avg:70.14ms +[2025-09-03 05:11:26] [Rank 0] step:821/10000 train_time:57588ms step_avg:70.14ms +[2025-09-03 05:11:27] [Rank 0] step:841/10000 train_time:59003ms step_avg:70.16ms +[2025-09-03 05:11:27] [Rank 0] step:841/10000 train_time:59003ms step_avg:70.16ms +[2025-09-03 05:11:28] [Rank 0] step:861/10000 train_time:60419ms step_avg:70.17ms +[2025-09-03 05:11:28] [Rank 0] step:861/10000 train_time:60419ms step_avg:70.17ms +[2025-09-03 05:11:30] [Rank 0] step:881/10000 train_time:61836ms step_avg:70.19ms +[2025-09-03 05:11:30] [Rank 0] step:881/10000 train_time:61836ms step_avg:70.19ms +[2025-09-03 05:11:31] [Rank 0] step:901/10000 train_time:63252ms step_avg:70.20ms +[2025-09-03 05:11:31] [Rank 0] step:901/10000 train_time:63252ms step_avg:70.20ms +[2025-09-03 05:11:33] [Rank 0] step:921/10000 train_time:64669ms step_avg:70.22ms +[2025-09-03 05:11:33] [Rank 0] step:921/10000 train_time:64669ms step_avg:70.22ms +[2025-09-03 05:11:34] [Rank 0] step:941/10000 train_time:66087ms step_avg:70.23ms +[2025-09-03 05:11:34] [Rank 0] step:941/10000 train_time:66087ms step_avg:70.23ms +[2025-09-03 05:11:36] [Rank 0] step:961/10000 train_time:67504ms step_avg:70.24ms +[2025-09-03 05:11:36] [Rank 0] step:961/10000 train_time:67504ms step_avg:70.24ms +[2025-09-03 05:11:37] [Rank 0] step:981/10000 train_time:68923ms step_avg:70.26ms +[2025-09-03 05:11:37] [Rank 0] step:981/10000 train_time:68923ms step_avg:70.26ms +[2025-09-03 05:11:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:11:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:11:50] [Rank 0] PRINT: step:1000/10000 val_loss:5.3127 svd_entropy: attn_qk:H=0.5855,top10E=0.53,eRank=62.5,q75/q25=20.04 attn_vo:H=0.6545,top10E=0.37,eRank=111.6,q75/q25=31.76 mlp_w1:H=0.5535,top10E=0.56,eRank=64.4,q75/q25=4.39 mlp_w2:H=0.7183,top10E=0.31,eRank=119.1,q75/q25=10.71 vo_prod:H=0.5397,top10E=0.55,eRank=38.5,q75/q25=523.65 train_time:70482ms step_avg:70.48ms +[2025-09-03 05:11:50] [Rank 0] PRINT: step:1000/10000 val_loss:5.3127 svd_entropy: attn_qk:H=0.5855,top10E=0.53,eRank=62.5,q75/q25=20.04 attn_vo:H=0.6545,top10E=0.37,eRank=111.6,q75/q25=31.76 mlp_w1:H=0.5535,top10E=0.56,eRank=64.4,q75/q25=4.39 mlp_w2:H=0.7183,top10E=0.31,eRank=119.1,q75/q25=10.71 vo_prod:H=0.5397,top10E=0.55,eRank=38.5,q75/q25=523.65 train_time:70482ms step_avg:70.48ms +[2025-09-03 05:11:50] [Rank 0] step:1001/10000 train_time:70495ms step_avg:70.42ms +[2025-09-03 05:11:50] [Rank 0] step:1001/10000 train_time:70495ms step_avg:70.42ms +[2025-09-03 05:11:52] [Rank 0] step:1021/10000 train_time:71786ms step_avg:70.31ms +[2025-09-03 05:11:52] [Rank 0] step:1021/10000 train_time:71786ms step_avg:70.31ms +[2025-09-03 05:11:53] [Rank 0] step:1041/10000 train_time:73202ms step_avg:70.32ms +[2025-09-03 05:11:53] [Rank 0] step:1041/10000 train_time:73202ms step_avg:70.32ms +[2025-09-03 05:11:54] [Rank 0] step:1061/10000 train_time:74619ms step_avg:70.33ms +[2025-09-03 05:11:54] [Rank 0] step:1061/10000 train_time:74619ms step_avg:70.33ms +[2025-09-03 05:11:56] [Rank 0] step:1081/10000 train_time:76036ms step_avg:70.34ms +[2025-09-03 05:11:56] [Rank 0] step:1081/10000 train_time:76036ms step_avg:70.34ms +[2025-09-03 05:11:57] [Rank 0] step:1101/10000 train_time:77454ms step_avg:70.35ms +[2025-09-03 05:11:57] [Rank 0] step:1101/10000 train_time:77454ms step_avg:70.35ms +[2025-09-03 05:11:59] [Rank 0] step:1121/10000 train_time:78871ms step_avg:70.36ms +[2025-09-03 05:11:59] [Rank 0] step:1121/10000 train_time:78871ms step_avg:70.36ms +[2025-09-03 05:12:00] [Rank 0] step:1141/10000 train_time:80290ms step_avg:70.37ms +[2025-09-03 05:12:00] [Rank 0] step:1141/10000 train_time:80290ms step_avg:70.37ms +[2025-09-03 05:12:01] [Rank 0] step:1161/10000 train_time:81709ms step_avg:70.38ms +[2025-09-03 05:12:01] [Rank 0] step:1161/10000 train_time:81709ms step_avg:70.38ms +[2025-09-03 05:12:03] [Rank 0] step:1181/10000 train_time:83126ms step_avg:70.39ms +[2025-09-03 05:12:03] [Rank 0] step:1181/10000 train_time:83126ms step_avg:70.39ms +[2025-09-03 05:12:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:12:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:12:16] [Rank 0] PRINT: step:1200/10000 val_loss:5.1779 svd_entropy: attn_qk:H=0.6042,top10E=0.49,eRank=68.4,q75/q25=23.15 attn_vo:H=0.6748,top10E=0.34,eRank=121.6,q75/q25=41.36 mlp_w1:H=0.5772,top10E=0.53,eRank=71.6,q75/q25=4.84 mlp_w2:H=0.7405,top10E=0.27,eRank=137.9,q75/q25=12.37 vo_prod:H=0.5643,top10E=0.50,eRank=45.1,q75/q25=1125.58 train_time:84687ms step_avg:70.57ms +[2025-09-03 05:12:16] [Rank 0] PRINT: step:1200/10000 val_loss:5.1779 svd_entropy: attn_qk:H=0.6042,top10E=0.49,eRank=68.4,q75/q25=23.15 attn_vo:H=0.6748,top10E=0.34,eRank=121.6,q75/q25=41.36 mlp_w1:H=0.5772,top10E=0.53,eRank=71.6,q75/q25=4.84 mlp_w2:H=0.7405,top10E=0.27,eRank=137.9,q75/q25=12.37 vo_prod:H=0.5643,top10E=0.50,eRank=45.1,q75/q25=1125.58 train_time:84687ms step_avg:70.57ms +[2025-09-03 05:12:16] [Rank 0] step:1201/10000 train_time:84700ms step_avg:70.52ms +[2025-09-03 05:12:16] [Rank 0] step:1201/10000 train_time:84700ms step_avg:70.52ms +[2025-09-03 05:12:17] [Rank 0] step:1221/10000 train_time:85978ms step_avg:70.42ms +[2025-09-03 05:12:17] [Rank 0] step:1221/10000 train_time:85978ms step_avg:70.42ms +[2025-09-03 05:12:19] [Rank 0] step:1241/10000 train_time:87394ms step_avg:70.42ms +[2025-09-03 05:12:19] [Rank 0] step:1241/10000 train_time:87394ms step_avg:70.42ms +[2025-09-03 05:12:20] [Rank 0] step:1261/10000 train_time:88812ms step_avg:70.43ms +[2025-09-03 05:12:20] [Rank 0] step:1261/10000 train_time:88812ms step_avg:70.43ms +[2025-09-03 05:12:22] [Rank 0] step:1281/10000 train_time:90230ms step_avg:70.44ms +[2025-09-03 05:12:22] [Rank 0] step:1281/10000 train_time:90230ms step_avg:70.44ms +[2025-09-03 05:12:23] [Rank 0] step:1301/10000 train_time:91648ms step_avg:70.44ms +[2025-09-03 05:12:23] [Rank 0] step:1301/10000 train_time:91648ms step_avg:70.44ms +[2025-09-03 05:12:25] [Rank 0] step:1321/10000 train_time:93129ms step_avg:70.50ms +[2025-09-03 05:12:25] [Rank 0] step:1321/10000 train_time:93129ms step_avg:70.50ms +[2025-09-03 05:12:26] [Rank 0] step:1341/10000 train_time:94547ms step_avg:70.50ms +[2025-09-03 05:12:26] [Rank 0] step:1341/10000 train_time:94547ms step_avg:70.50ms +[2025-09-03 05:12:27] [Rank 0] step:1361/10000 train_time:95965ms step_avg:70.51ms +[2025-09-03 05:12:27] [Rank 0] step:1361/10000 train_time:95965ms step_avg:70.51ms +[2025-09-03 05:12:29] [Rank 0] step:1381/10000 train_time:97385ms step_avg:70.52ms +[2025-09-03 05:12:29] [Rank 0] step:1381/10000 train_time:97385ms step_avg:70.52ms +[2025-09-03 05:12:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:12:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:12:42] [Rank 0] PRINT: step:1400/10000 val_loss:5.0712 svd_entropy: attn_qk:H=0.6203,top10E=0.46,eRank=74.2,q75/q25=27.35 attn_vo:H=0.6922,top10E=0.31,eRank=131.3,q75/q25=54.72 mlp_w1:H=0.5980,top10E=0.51,eRank=78.6,q75/q25=5.35 mlp_w2:H=0.7587,top10E=0.25,eRank=155.6,q75/q25=14.32 vo_prod:H=0.5849,top10E=0.46,eRank=51.6,q75/q25=2295.86 train_time:98945ms step_avg:70.68ms +[2025-09-03 05:12:42] [Rank 0] PRINT: step:1400/10000 val_loss:5.0712 svd_entropy: attn_qk:H=0.6203,top10E=0.46,eRank=74.2,q75/q25=27.35 attn_vo:H=0.6922,top10E=0.31,eRank=131.3,q75/q25=54.72 mlp_w1:H=0.5980,top10E=0.51,eRank=78.6,q75/q25=5.35 mlp_w2:H=0.7587,top10E=0.25,eRank=155.6,q75/q25=14.32 vo_prod:H=0.5849,top10E=0.46,eRank=51.6,q75/q25=2295.86 train_time:98945ms step_avg:70.68ms +[2025-09-03 05:12:42] [Rank 0] step:1401/10000 train_time:98959ms step_avg:70.63ms +[2025-09-03 05:12:42] [Rank 0] step:1401/10000 train_time:98959ms step_avg:70.63ms +[2025-09-03 05:12:44] [Rank 0] step:1421/10000 train_time:100259ms step_avg:70.56ms +[2025-09-03 05:12:44] [Rank 0] step:1421/10000 train_time:100259ms step_avg:70.56ms +[2025-09-03 05:12:45] [Rank 0] step:1441/10000 train_time:101675ms step_avg:70.56ms +[2025-09-03 05:12:45] [Rank 0] step:1441/10000 train_time:101675ms step_avg:70.56ms +[2025-09-03 05:12:46] [Rank 0] step:1461/10000 train_time:103093ms step_avg:70.56ms +[2025-09-03 05:12:46] [Rank 0] step:1461/10000 train_time:103093ms step_avg:70.56ms +[2025-09-03 05:12:48] [Rank 0] step:1481/10000 train_time:104510ms step_avg:70.57ms +[2025-09-03 05:12:48] [Rank 0] step:1481/10000 train_time:104510ms step_avg:70.57ms +[2025-09-03 05:12:49] [Rank 0] step:1501/10000 train_time:105937ms step_avg:70.58ms +[2025-09-03 05:12:49] [Rank 0] step:1501/10000 train_time:105937ms step_avg:70.58ms +[2025-09-03 05:12:51] [Rank 0] step:1521/10000 train_time:107367ms step_avg:70.59ms +[2025-09-03 05:12:51] [Rank 0] step:1521/10000 train_time:107367ms step_avg:70.59ms +[2025-09-03 05:12:52] [Rank 0] step:1541/10000 train_time:108796ms step_avg:70.60ms +[2025-09-03 05:12:52] [Rank 0] step:1541/10000 train_time:108796ms step_avg:70.60ms +[2025-09-03 05:12:53] [Rank 0] step:1561/10000 train_time:110226ms step_avg:70.61ms +[2025-09-03 05:12:53] [Rank 0] step:1561/10000 train_time:110226ms step_avg:70.61ms +[2025-09-03 05:12:55] [Rank 0] step:1581/10000 train_time:111655ms step_avg:70.62ms +[2025-09-03 05:12:55] [Rank 0] step:1581/10000 train_time:111655ms step_avg:70.62ms +[2025-09-03 05:12:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:12:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:13:08] [Rank 0] PRINT: step:1600/10000 val_loss:4.9400 svd_entropy: attn_qk:H=0.6336,top10E=0.44,eRank=79.3,q75/q25=32.72 attn_vo:H=0.7075,top10E=0.29,eRank=140.8,q75/q25=69.07 mlp_w1:H=0.6163,top10E=0.48,eRank=85.7,q75/q25=5.96 mlp_w2:H=0.7730,top10E=0.22,eRank=171.2,q75/q25=16.58 vo_prod:H=0.6016,top10E=0.43,eRank=57.5,q75/q25=4232.90 train_time:113229ms step_avg:70.77ms +[2025-09-03 05:13:08] [Rank 0] PRINT: step:1600/10000 val_loss:4.9400 svd_entropy: attn_qk:H=0.6336,top10E=0.44,eRank=79.3,q75/q25=32.72 attn_vo:H=0.7075,top10E=0.29,eRank=140.8,q75/q25=69.07 mlp_w1:H=0.6163,top10E=0.48,eRank=85.7,q75/q25=5.96 mlp_w2:H=0.7730,top10E=0.22,eRank=171.2,q75/q25=16.58 vo_prod:H=0.6016,top10E=0.43,eRank=57.5,q75/q25=4232.90 train_time:113229ms step_avg:70.77ms +[2025-09-03 05:13:08] [Rank 0] step:1601/10000 train_time:113241ms step_avg:70.73ms +[2025-09-03 05:13:08] [Rank 0] step:1601/10000 train_time:113241ms step_avg:70.73ms +[2025-09-03 05:13:10] [Rank 0] step:1621/10000 train_time:114550ms step_avg:70.67ms +[2025-09-03 05:13:10] [Rank 0] step:1621/10000 train_time:114550ms step_avg:70.67ms +[2025-09-03 05:13:11] [Rank 0] step:1641/10000 train_time:115976ms step_avg:70.67ms +[2025-09-03 05:13:11] [Rank 0] step:1641/10000 train_time:115976ms step_avg:70.67ms +[2025-09-03 05:13:12] [Rank 0] step:1661/10000 train_time:117403ms step_avg:70.68ms +[2025-09-03 05:13:12] [Rank 0] step:1661/10000 train_time:117403ms step_avg:70.68ms +[2025-09-03 05:13:14] [Rank 0] step:1681/10000 train_time:118830ms step_avg:70.69ms +[2025-09-03 05:13:14] [Rank 0] step:1681/10000 train_time:118830ms step_avg:70.69ms +[2025-09-03 05:13:15] [Rank 0] step:1701/10000 train_time:120258ms step_avg:70.70ms +[2025-09-03 05:13:15] [Rank 0] step:1701/10000 train_time:120258ms step_avg:70.70ms +[2025-09-03 05:13:17] [Rank 0] step:1721/10000 train_time:121686ms step_avg:70.71ms +[2025-09-03 05:13:17] [Rank 0] step:1721/10000 train_time:121686ms step_avg:70.71ms +[2025-09-03 05:13:18] [Rank 0] step:1741/10000 train_time:123113ms step_avg:70.71ms +[2025-09-03 05:13:18] [Rank 0] step:1741/10000 train_time:123113ms step_avg:70.71ms +[2025-09-03 05:13:20] [Rank 0] step:1761/10000 train_time:124542ms step_avg:70.72ms +[2025-09-03 05:13:20] [Rank 0] step:1761/10000 train_time:124542ms step_avg:70.72ms +[2025-09-03 05:13:21] [Rank 0] step:1781/10000 train_time:125971ms step_avg:70.73ms +[2025-09-03 05:13:21] [Rank 0] step:1781/10000 train_time:125971ms step_avg:70.73ms +[2025-09-03 05:13:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:13:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:13:34] [Rank 0] PRINT: step:1800/10000 val_loss:4.8161 svd_entropy: attn_qk:H=0.6451,top10E=0.42,eRank=84.2,q75/q25=39.04 attn_vo:H=0.7205,top10E=0.27,eRank=150.0,q75/q25=81.99 mlp_w1:H=0.6339,top10E=0.46,eRank=93.2,q75/q25=6.65 mlp_w2:H=0.7851,top10E=0.21,eRank=185.4,q75/q25=18.66 vo_prod:H=0.6150,top10E=0.41,eRank=62.8,q75/q25=6690.40 train_time:127543ms step_avg:70.86ms +[2025-09-03 05:13:34] [Rank 0] PRINT: step:1800/10000 val_loss:4.8161 svd_entropy: attn_qk:H=0.6451,top10E=0.42,eRank=84.2,q75/q25=39.04 attn_vo:H=0.7205,top10E=0.27,eRank=150.0,q75/q25=81.99 mlp_w1:H=0.6339,top10E=0.46,eRank=93.2,q75/q25=6.65 mlp_w2:H=0.7851,top10E=0.21,eRank=185.4,q75/q25=18.66 vo_prod:H=0.6150,top10E=0.41,eRank=62.8,q75/q25=6690.40 train_time:127543ms step_avg:70.86ms +[2025-09-03 05:13:34] [Rank 0] step:1801/10000 train_time:127556ms step_avg:70.82ms +[2025-09-03 05:13:34] [Rank 0] step:1801/10000 train_time:127556ms step_avg:70.82ms +[2025-09-03 05:13:36] [Rank 0] step:1821/10000 train_time:128841ms step_avg:70.75ms +[2025-09-03 05:13:36] [Rank 0] step:1821/10000 train_time:128841ms step_avg:70.75ms +[2025-09-03 05:13:37] [Rank 0] step:1841/10000 train_time:130267ms step_avg:70.76ms +[2025-09-03 05:13:37] [Rank 0] step:1841/10000 train_time:130267ms step_avg:70.76ms +[2025-09-03 05:13:38] [Rank 0] step:1861/10000 train_time:131695ms step_avg:70.77ms +[2025-09-03 05:13:38] [Rank 0] step:1861/10000 train_time:131695ms step_avg:70.77ms +[2025-09-03 05:13:40] [Rank 0] step:1881/10000 train_time:133123ms step_avg:70.77ms +[2025-09-03 05:13:40] [Rank 0] step:1881/10000 train_time:133123ms step_avg:70.77ms +[2025-09-03 05:13:41] [Rank 0] step:1901/10000 train_time:134550ms step_avg:70.78ms +[2025-09-03 05:13:41] [Rank 0] step:1901/10000 train_time:134550ms step_avg:70.78ms +[2025-09-03 05:13:43] [Rank 0] step:1921/10000 train_time:135980ms step_avg:70.79ms +[2025-09-03 05:13:43] [Rank 0] step:1921/10000 train_time:135980ms step_avg:70.79ms +[2025-09-03 05:13:44] [Rank 0] step:1941/10000 train_time:137411ms step_avg:70.79ms +[2025-09-03 05:13:44] [Rank 0] step:1941/10000 train_time:137411ms step_avg:70.79ms +[2025-09-03 05:13:46] [Rank 0] step:1961/10000 train_time:138839ms step_avg:70.80ms +[2025-09-03 05:13:46] [Rank 0] step:1961/10000 train_time:138839ms step_avg:70.80ms +[2025-09-03 05:13:47] [Rank 0] step:1981/10000 train_time:140268ms step_avg:70.81ms +[2025-09-03 05:13:47] [Rank 0] step:1981/10000 train_time:140268ms step_avg:70.81ms +[2025-09-03 05:13:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:13:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:14:00] [Rank 0] PRINT: step:2000/10000 val_loss:4.7361 svd_entropy: attn_qk:H=0.6552,top10E=0.40,eRank=88.9,q75/q25=45.83 attn_vo:H=0.7316,top10E=0.26,eRank=158.5,q75/q25=93.75 mlp_w1:H=0.6481,top10E=0.44,eRank=99.9,q75/q25=7.37 mlp_w2:H=0.7952,top10E=0.19,eRank=198.2,q75/q25=20.61 vo_prod:H=0.6272,top10E=0.39,eRank=68.0,q75/q25=9507.31 train_time:141841ms step_avg:70.92ms +[2025-09-03 05:14:00] [Rank 0] PRINT: step:2000/10000 val_loss:4.7361 svd_entropy: attn_qk:H=0.6552,top10E=0.40,eRank=88.9,q75/q25=45.83 attn_vo:H=0.7316,top10E=0.26,eRank=158.5,q75/q25=93.75 mlp_w1:H=0.6481,top10E=0.44,eRank=99.9,q75/q25=7.37 mlp_w2:H=0.7952,top10E=0.19,eRank=198.2,q75/q25=20.61 vo_prod:H=0.6272,top10E=0.39,eRank=68.0,q75/q25=9507.31 train_time:141841ms step_avg:70.92ms +[2025-09-03 05:14:00] [Rank 0] step:2001/10000 train_time:141854ms step_avg:70.89ms +[2025-09-03 05:14:00] [Rank 0] step:2001/10000 train_time:141854ms step_avg:70.89ms +[2025-09-03 05:14:02] [Rank 0] step:2021/10000 train_time:143147ms step_avg:70.83ms +[2025-09-03 05:14:02] [Rank 0] step:2021/10000 train_time:143147ms step_avg:70.83ms +[2025-09-03 05:14:03] [Rank 0] step:2041/10000 train_time:144697ms step_avg:70.90ms +[2025-09-03 05:14:03] [Rank 0] step:2041/10000 train_time:144697ms step_avg:70.90ms +[2025-09-03 05:14:05] [Rank 0] step:2061/10000 train_time:146124ms step_avg:70.90ms +[2025-09-03 05:14:05] [Rank 0] step:2061/10000 train_time:146124ms step_avg:70.90ms +[2025-09-03 05:14:06] [Rank 0] step:2081/10000 train_time:147552ms step_avg:70.90ms +[2025-09-03 05:14:06] [Rank 0] step:2081/10000 train_time:147552ms step_avg:70.90ms +[2025-09-03 05:14:07] [Rank 0] step:2101/10000 train_time:148981ms step_avg:70.91ms +[2025-09-03 05:14:07] [Rank 0] step:2101/10000 train_time:148981ms step_avg:70.91ms +[2025-09-03 05:14:09] [Rank 0] step:2121/10000 train_time:150409ms step_avg:70.91ms +[2025-09-03 05:14:09] [Rank 0] step:2121/10000 train_time:150409ms step_avg:70.91ms +[2025-09-03 05:14:10] [Rank 0] step:2141/10000 train_time:151838ms step_avg:70.92ms +[2025-09-03 05:14:10] [Rank 0] step:2141/10000 train_time:151838ms step_avg:70.92ms +[2025-09-03 05:14:12] [Rank 0] step:2161/10000 train_time:153268ms step_avg:70.92ms +[2025-09-03 05:14:12] [Rank 0] step:2161/10000 train_time:153268ms step_avg:70.92ms +[2025-09-03 05:14:13] [Rank 0] step:2181/10000 train_time:154697ms step_avg:70.93ms +[2025-09-03 05:14:13] [Rank 0] step:2181/10000 train_time:154697ms step_avg:70.93ms +[2025-09-03 05:14:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:14:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:14:26] [Rank 0] PRINT: step:2200/10000 val_loss:4.6515 svd_entropy: attn_qk:H=0.6632,top10E=0.39,eRank=92.8,q75/q25=52.35 attn_vo:H=0.7408,top10E=0.24,eRank=166.3,q75/q25=101.12 mlp_w1:H=0.6610,top10E=0.43,eRank=106.4,q75/q25=8.13 mlp_w2:H=0.8032,top10E=0.18,eRank=209.2,q75/q25=22.73 vo_prod:H=0.6373,top10E=0.38,eRank=72.8,q75/q25=11936.45 train_time:156269ms step_avg:71.03ms +[2025-09-03 05:14:26] [Rank 0] PRINT: step:2200/10000 val_loss:4.6515 svd_entropy: attn_qk:H=0.6632,top10E=0.39,eRank=92.8,q75/q25=52.35 attn_vo:H=0.7408,top10E=0.24,eRank=166.3,q75/q25=101.12 mlp_w1:H=0.6610,top10E=0.43,eRank=106.4,q75/q25=8.13 mlp_w2:H=0.8032,top10E=0.18,eRank=209.2,q75/q25=22.73 vo_prod:H=0.6373,top10E=0.38,eRank=72.8,q75/q25=11936.45 train_time:156269ms step_avg:71.03ms +[2025-09-03 05:14:26] [Rank 0] step:2201/10000 train_time:156282ms step_avg:71.00ms +[2025-09-03 05:14:26] [Rank 0] step:2201/10000 train_time:156282ms step_avg:71.00ms +[2025-09-03 05:14:28] [Rank 0] step:2221/10000 train_time:157588ms step_avg:70.95ms +[2025-09-03 05:14:28] [Rank 0] step:2221/10000 train_time:157588ms step_avg:70.95ms +[2025-09-03 05:14:29] [Rank 0] step:2241/10000 train_time:159052ms step_avg:70.97ms +[2025-09-03 05:14:29] [Rank 0] step:2241/10000 train_time:159052ms step_avg:70.97ms +[2025-09-03 05:14:31] [Rank 0] step:2261/10000 train_time:160568ms step_avg:71.02ms +[2025-09-03 05:14:31] [Rank 0] step:2261/10000 train_time:160568ms step_avg:71.02ms +[2025-09-03 05:14:32] [Rank 0] step:2281/10000 train_time:162039ms step_avg:71.04ms +[2025-09-03 05:14:32] [Rank 0] step:2281/10000 train_time:162039ms step_avg:71.04ms +[2025-09-03 05:14:34] [Rank 0] step:2301/10000 train_time:163511ms step_avg:71.06ms +[2025-09-03 05:14:34] [Rank 0] step:2301/10000 train_time:163511ms step_avg:71.06ms +[2025-09-03 05:14:35] [Rank 0] step:2321/10000 train_time:164984ms step_avg:71.08ms +[2025-09-03 05:14:35] [Rank 0] step:2321/10000 train_time:164984ms step_avg:71.08ms +[2025-09-03 05:14:37] [Rank 0] step:2341/10000 train_time:166457ms step_avg:71.10ms +[2025-09-03 05:14:37] [Rank 0] step:2341/10000 train_time:166457ms step_avg:71.10ms +[2025-09-03 05:14:38] [Rank 0] step:2361/10000 train_time:167929ms step_avg:71.13ms +[2025-09-03 05:14:38] [Rank 0] step:2361/10000 train_time:167929ms step_avg:71.13ms +[2025-09-03 05:14:40] [Rank 0] step:2381/10000 train_time:169402ms step_avg:71.15ms +[2025-09-03 05:14:40] [Rank 0] step:2381/10000 train_time:169402ms step_avg:71.15ms +[2025-09-03 05:14:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:14:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:14:53] [Rank 0] PRINT: step:2400/10000 val_loss:4.5711 svd_entropy: attn_qk:H=0.6698,top10E=0.38,eRank=96.2,q75/q25=59.16 attn_vo:H=0.7492,top10E=0.23,eRank=173.7,q75/q25=106.86 mlp_w1:H=0.6723,top10E=0.41,eRank=112.6,q75/q25=8.94 mlp_w2:H=0.8102,top10E=0.17,eRank=219.4,q75/q25=24.61 vo_prod:H=0.6465,top10E=0.36,eRank=77.4,q75/q25=13889.00 train_time:171023ms step_avg:71.26ms +[2025-09-03 05:14:53] [Rank 0] PRINT: step:2400/10000 val_loss:4.5711 svd_entropy: attn_qk:H=0.6698,top10E=0.38,eRank=96.2,q75/q25=59.16 attn_vo:H=0.7492,top10E=0.23,eRank=173.7,q75/q25=106.86 mlp_w1:H=0.6723,top10E=0.41,eRank=112.6,q75/q25=8.94 mlp_w2:H=0.8102,top10E=0.17,eRank=219.4,q75/q25=24.61 vo_prod:H=0.6465,top10E=0.36,eRank=77.4,q75/q25=13889.00 train_time:171023ms step_avg:71.26ms +[2025-09-03 05:14:53] [Rank 0] step:2401/10000 train_time:171036ms step_avg:71.24ms +[2025-09-03 05:14:53] [Rank 0] step:2401/10000 train_time:171036ms step_avg:71.24ms +[2025-09-03 05:14:54] [Rank 0] step:2421/10000 train_time:172361ms step_avg:71.19ms +[2025-09-03 05:14:54] [Rank 0] step:2421/10000 train_time:172361ms step_avg:71.19ms +[2025-09-03 05:14:56] [Rank 0] step:2441/10000 train_time:173832ms step_avg:71.21ms +[2025-09-03 05:14:56] [Rank 0] step:2441/10000 train_time:173832ms step_avg:71.21ms +[2025-09-03 05:14:57] [Rank 0] step:2461/10000 train_time:175302ms step_avg:71.23ms +[2025-09-03 05:14:57] [Rank 0] step:2461/10000 train_time:175302ms step_avg:71.23ms +[2025-09-03 05:14:59] [Rank 0] step:2481/10000 train_time:176773ms step_avg:71.25ms +[2025-09-03 05:14:59] [Rank 0] step:2481/10000 train_time:176773ms step_avg:71.25ms +[2025-09-03 05:15:00] [Rank 0] step:2501/10000 train_time:178246ms step_avg:71.27ms +[2025-09-03 05:15:00] [Rank 0] step:2501/10000 train_time:178246ms step_avg:71.27ms +[2025-09-03 05:15:02] [Rank 0] step:2521/10000 train_time:179718ms step_avg:71.29ms +[2025-09-03 05:15:02] [Rank 0] step:2521/10000 train_time:179718ms step_avg:71.29ms +[2025-09-03 05:15:03] [Rank 0] step:2541/10000 train_time:181192ms step_avg:71.31ms +[2025-09-03 05:15:03] [Rank 0] step:2541/10000 train_time:181192ms step_avg:71.31ms +[2025-09-03 05:15:05] [Rank 0] step:2561/10000 train_time:182664ms step_avg:71.33ms +[2025-09-03 05:15:05] [Rank 0] step:2561/10000 train_time:182664ms step_avg:71.33ms +[2025-09-03 05:15:06] [Rank 0] step:2581/10000 train_time:184137ms step_avg:71.34ms +[2025-09-03 05:15:06] [Rank 0] step:2581/10000 train_time:184137ms step_avg:71.34ms +[2025-09-03 05:15:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:15:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:15:19] [Rank 0] PRINT: step:2600/10000 val_loss:4.5102 svd_entropy: attn_qk:H=0.6765,top10E=0.37,eRank=99.8,q75/q25=65.71 attn_vo:H=0.7566,top10E=0.22,eRank=180.7,q75/q25=110.75 mlp_w1:H=0.6823,top10E=0.40,eRank=118.5,q75/q25=9.74 mlp_w2:H=0.8161,top10E=0.17,eRank=228.3,q75/q25=26.62 vo_prod:H=0.6549,top10E=0.35,eRank=81.8,q75/q25=15862.66 train_time:185761ms step_avg:71.45ms +[2025-09-03 05:15:19] [Rank 0] PRINT: step:2600/10000 val_loss:4.5102 svd_entropy: attn_qk:H=0.6765,top10E=0.37,eRank=99.8,q75/q25=65.71 attn_vo:H=0.7566,top10E=0.22,eRank=180.7,q75/q25=110.75 mlp_w1:H=0.6823,top10E=0.40,eRank=118.5,q75/q25=9.74 mlp_w2:H=0.8161,top10E=0.17,eRank=228.3,q75/q25=26.62 vo_prod:H=0.6549,top10E=0.35,eRank=81.8,q75/q25=15862.66 train_time:185761ms step_avg:71.45ms +[2025-09-03 05:15:19] [Rank 0] step:2601/10000 train_time:185773ms step_avg:71.42ms +[2025-09-03 05:15:19] [Rank 0] step:2601/10000 train_time:185773ms step_avg:71.42ms +[2025-09-03 05:15:21] [Rank 0] step:2621/10000 train_time:187102ms step_avg:71.39ms +[2025-09-03 05:15:21] [Rank 0] step:2621/10000 train_time:187102ms step_avg:71.39ms +[2025-09-03 05:15:22] [Rank 0] step:2641/10000 train_time:188572ms step_avg:71.40ms +[2025-09-03 05:15:22] [Rank 0] step:2641/10000 train_time:188572ms step_avg:71.40ms +[2025-09-03 05:15:24] [Rank 0] step:2661/10000 train_time:190043ms step_avg:71.42ms +[2025-09-03 05:15:24] [Rank 0] step:2661/10000 train_time:190043ms step_avg:71.42ms +[2025-09-03 05:15:25] [Rank 0] step:2681/10000 train_time:191515ms step_avg:71.43ms +[2025-09-03 05:15:25] [Rank 0] step:2681/10000 train_time:191515ms step_avg:71.43ms +[2025-09-03 05:15:27] [Rank 0] step:2701/10000 train_time:192988ms step_avg:71.45ms +[2025-09-03 05:15:27] [Rank 0] step:2701/10000 train_time:192988ms step_avg:71.45ms +[2025-09-03 05:15:28] [Rank 0] step:2721/10000 train_time:194459ms step_avg:71.47ms +[2025-09-03 05:15:28] [Rank 0] step:2721/10000 train_time:194459ms step_avg:71.47ms +[2025-09-03 05:15:30] [Rank 0] step:2741/10000 train_time:195931ms step_avg:71.48ms +[2025-09-03 05:15:30] [Rank 0] step:2741/10000 train_time:195931ms step_avg:71.48ms +[2025-09-03 05:15:31] [Rank 0] step:2761/10000 train_time:197404ms step_avg:71.50ms +[2025-09-03 05:15:31] [Rank 0] step:2761/10000 train_time:197404ms step_avg:71.50ms +[2025-09-03 05:15:32] [Rank 0] step:2781/10000 train_time:198876ms step_avg:71.51ms +[2025-09-03 05:15:32] [Rank 0] step:2781/10000 train_time:198876ms step_avg:71.51ms +[2025-09-03 05:15:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:15:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:15:46] [Rank 0] PRINT: step:2800/10000 val_loss:4.4631 svd_entropy: attn_qk:H=0.6827,top10E=0.36,eRank=103.3,q75/q25=71.83 attn_vo:H=0.7635,top10E=0.22,eRank=187.5,q75/q25=113.47 mlp_w1:H=0.6913,top10E=0.39,eRank=124.2,q75/q25=10.53 mlp_w2:H=0.8213,top10E=0.16,eRank=236.6,q75/q25=28.31 vo_prod:H=0.6627,top10E=0.33,eRank=86.2,q75/q25=16866.56 train_time:200496ms step_avg:71.61ms +[2025-09-03 05:15:46] [Rank 0] PRINT: step:2800/10000 val_loss:4.4631 svd_entropy: attn_qk:H=0.6827,top10E=0.36,eRank=103.3,q75/q25=71.83 attn_vo:H=0.7635,top10E=0.22,eRank=187.5,q75/q25=113.47 mlp_w1:H=0.6913,top10E=0.39,eRank=124.2,q75/q25=10.53 mlp_w2:H=0.8213,top10E=0.16,eRank=236.6,q75/q25=28.31 vo_prod:H=0.6627,top10E=0.33,eRank=86.2,q75/q25=16866.56 train_time:200496ms step_avg:71.61ms +[2025-09-03 05:15:46] [Rank 0] step:2801/10000 train_time:200508ms step_avg:71.58ms +[2025-09-03 05:15:46] [Rank 0] step:2801/10000 train_time:200508ms step_avg:71.58ms +[2025-09-03 05:15:47] [Rank 0] step:2821/10000 train_time:201833ms step_avg:71.55ms +[2025-09-03 05:15:47] [Rank 0] step:2821/10000 train_time:201833ms step_avg:71.55ms +[2025-09-03 05:15:49] [Rank 0] step:2841/10000 train_time:203302ms step_avg:71.56ms +[2025-09-03 05:15:49] [Rank 0] step:2841/10000 train_time:203302ms step_avg:71.56ms +[2025-09-03 05:15:50] [Rank 0] step:2861/10000 train_time:204773ms step_avg:71.57ms +[2025-09-03 05:15:50] [Rank 0] step:2861/10000 train_time:204773ms step_avg:71.57ms +[2025-09-03 05:15:52] [Rank 0] step:2881/10000 train_time:206243ms step_avg:71.59ms +[2025-09-03 05:15:52] [Rank 0] step:2881/10000 train_time:206243ms step_avg:71.59ms +[2025-09-03 05:15:53] [Rank 0] step:2901/10000 train_time:207715ms step_avg:71.60ms +[2025-09-03 05:15:53] [Rank 0] step:2901/10000 train_time:207715ms step_avg:71.60ms +[2025-09-03 05:15:54] [Rank 0] step:2921/10000 train_time:209187ms step_avg:71.61ms +[2025-09-03 05:15:54] [Rank 0] step:2921/10000 train_time:209187ms step_avg:71.61ms +[2025-09-03 05:15:56] [Rank 0] step:2941/10000 train_time:210658ms step_avg:71.63ms +[2025-09-03 05:15:56] [Rank 0] step:2941/10000 train_time:210658ms step_avg:71.63ms +[2025-09-03 05:15:57] [Rank 0] step:2961/10000 train_time:212130ms step_avg:71.64ms +[2025-09-03 05:15:57] [Rank 0] step:2961/10000 train_time:212130ms step_avg:71.64ms +[2025-09-03 05:15:59] [Rank 0] step:2981/10000 train_time:213607ms step_avg:71.66ms +[2025-09-03 05:15:59] [Rank 0] step:2981/10000 train_time:213607ms step_avg:71.66ms +[2025-09-03 05:16:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:16:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:16:12] [Rank 0] PRINT: step:3000/10000 val_loss:4.4164 svd_entropy: attn_qk:H=0.6882,top10E=0.35,eRank=106.6,q75/q25=77.15 attn_vo:H=0.7696,top10E=0.21,eRank=193.7,q75/q25=114.13 mlp_w1:H=0.6995,top10E=0.38,eRank=129.5,q75/q25=11.33 mlp_w2:H=0.8256,top10E=0.16,eRank=243.7,q75/q25=29.84 vo_prod:H=0.6695,top10E=0.32,eRank=90.2,q75/q25=16742.41 train_time:215236ms step_avg:71.75ms +[2025-09-03 05:16:12] [Rank 0] PRINT: step:3000/10000 val_loss:4.4164 svd_entropy: attn_qk:H=0.6882,top10E=0.35,eRank=106.6,q75/q25=77.15 attn_vo:H=0.7696,top10E=0.21,eRank=193.7,q75/q25=114.13 mlp_w1:H=0.6995,top10E=0.38,eRank=129.5,q75/q25=11.33 mlp_w2:H=0.8256,top10E=0.16,eRank=243.7,q75/q25=29.84 vo_prod:H=0.6695,top10E=0.32,eRank=90.2,q75/q25=16742.41 train_time:215236ms step_avg:71.75ms +[2025-09-03 05:16:12] [Rank 0] step:3001/10000 train_time:215248ms step_avg:71.73ms +[2025-09-03 05:16:12] [Rank 0] step:3001/10000 train_time:215248ms step_avg:71.73ms +[2025-09-03 05:16:14] [Rank 0] step:3021/10000 train_time:216601ms step_avg:71.70ms +[2025-09-03 05:16:14] [Rank 0] step:3021/10000 train_time:216601ms step_avg:71.70ms +[2025-09-03 05:16:15] [Rank 0] step:3041/10000 train_time:218078ms step_avg:71.71ms +[2025-09-03 05:16:15] [Rank 0] step:3041/10000 train_time:218078ms step_avg:71.71ms +[2025-09-03 05:16:17] [Rank 0] step:3061/10000 train_time:219557ms step_avg:71.73ms +[2025-09-03 05:16:17] [Rank 0] step:3061/10000 train_time:219557ms step_avg:71.73ms +[2025-09-03 05:16:18] [Rank 0] step:3081/10000 train_time:221036ms step_avg:71.74ms +[2025-09-03 05:16:18] [Rank 0] step:3081/10000 train_time:221036ms step_avg:71.74ms +[2025-09-03 05:16:20] [Rank 0] step:3101/10000 train_time:222516ms step_avg:71.76ms +[2025-09-03 05:16:20] [Rank 0] step:3101/10000 train_time:222516ms step_avg:71.76ms +[2025-09-03 05:16:21] [Rank 0] step:3121/10000 train_time:223997ms step_avg:71.77ms +[2025-09-03 05:16:21] [Rank 0] step:3121/10000 train_time:223997ms step_avg:71.77ms +[2025-09-03 05:16:23] [Rank 0] step:3141/10000 train_time:225477ms step_avg:71.79ms +[2025-09-03 05:16:23] [Rank 0] step:3141/10000 train_time:225477ms step_avg:71.79ms +[2025-09-03 05:16:24] [Rank 0] step:3161/10000 train_time:226957ms step_avg:71.80ms +[2025-09-03 05:16:24] [Rank 0] step:3161/10000 train_time:226957ms step_avg:71.80ms +[2025-09-03 05:16:25] [Rank 0] step:3181/10000 train_time:228440ms step_avg:71.81ms +[2025-09-03 05:16:25] [Rank 0] step:3181/10000 train_time:228440ms step_avg:71.81ms +[2025-09-03 05:16:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:16:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:16:39] [Rank 0] PRINT: step:3200/10000 val_loss:4.3784 svd_entropy: attn_qk:H=0.6932,top10E=0.34,eRank=109.6,q75/q25=82.22 attn_vo:H=0.7750,top10E=0.20,eRank=199.6,q75/q25=113.31 mlp_w1:H=0.7068,top10E=0.37,eRank=134.6,q75/q25=12.14 mlp_w2:H=0.8296,top10E=0.15,eRank=250.5,q75/q25=31.44 vo_prod:H=0.6758,top10E=0.31,eRank=94.1,q75/q25=16532.20 train_time:230070ms step_avg:71.90ms +[2025-09-03 05:16:39] [Rank 0] PRINT: step:3200/10000 val_loss:4.3784 svd_entropy: attn_qk:H=0.6932,top10E=0.34,eRank=109.6,q75/q25=82.22 attn_vo:H=0.7750,top10E=0.20,eRank=199.6,q75/q25=113.31 mlp_w1:H=0.7068,top10E=0.37,eRank=134.6,q75/q25=12.14 mlp_w2:H=0.8296,top10E=0.15,eRank=250.5,q75/q25=31.44 vo_prod:H=0.6758,top10E=0.31,eRank=94.1,q75/q25=16532.20 train_time:230070ms step_avg:71.90ms +[2025-09-03 05:16:39] [Rank 0] step:3201/10000 train_time:230081ms step_avg:71.88ms +[2025-09-03 05:16:39] [Rank 0] step:3201/10000 train_time:230081ms step_avg:71.88ms +[2025-09-03 05:16:40] [Rank 0] step:3221/10000 train_time:231421ms step_avg:71.85ms +[2025-09-03 05:16:40] [Rank 0] step:3221/10000 train_time:231421ms step_avg:71.85ms +[2025-09-03 05:16:42] [Rank 0] step:3241/10000 train_time:232901ms step_avg:71.86ms +[2025-09-03 05:16:42] [Rank 0] step:3241/10000 train_time:232901ms step_avg:71.86ms +[2025-09-03 05:16:43] [Rank 0] step:3261/10000 train_time:234380ms step_avg:71.87ms +[2025-09-03 05:16:43] [Rank 0] step:3261/10000 train_time:234380ms step_avg:71.87ms +[2025-09-03 05:16:45] [Rank 0] step:3281/10000 train_time:235861ms step_avg:71.89ms +[2025-09-03 05:16:45] [Rank 0] step:3281/10000 train_time:235861ms step_avg:71.89ms +[2025-09-03 05:16:46] [Rank 0] step:3301/10000 train_time:237341ms step_avg:71.90ms +[2025-09-03 05:16:46] [Rank 0] step:3301/10000 train_time:237341ms step_avg:71.90ms +[2025-09-03 05:16:48] [Rank 0] step:3321/10000 train_time:238822ms step_avg:71.91ms +[2025-09-03 05:16:48] [Rank 0] step:3321/10000 train_time:238822ms step_avg:71.91ms +[2025-09-03 05:16:49] [Rank 0] step:3341/10000 train_time:240302ms step_avg:71.93ms +[2025-09-03 05:16:49] [Rank 0] step:3341/10000 train_time:240302ms step_avg:71.93ms +[2025-09-03 05:16:51] [Rank 0] step:3361/10000 train_time:241783ms step_avg:71.94ms +[2025-09-03 05:16:51] [Rank 0] step:3361/10000 train_time:241783ms step_avg:71.94ms +[2025-09-03 05:16:52] [Rank 0] step:3381/10000 train_time:243265ms step_avg:71.95ms +[2025-09-03 05:16:52] [Rank 0] step:3381/10000 train_time:243265ms step_avg:71.95ms +[2025-09-03 05:16:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:16:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:17:05] [Rank 0] PRINT: step:3400/10000 val_loss:4.3378 svd_entropy: attn_qk:H=0.6980,top10E=0.33,eRank=112.6,q75/q25=86.81 attn_vo:H=0.7803,top10E=0.20,eRank=205.4,q75/q25=111.81 mlp_w1:H=0.7137,top10E=0.36,eRank=139.6,q75/q25=12.94 mlp_w2:H=0.8332,top10E=0.15,eRank=256.7,q75/q25=32.94 vo_prod:H=0.6819,top10E=0.30,eRank=98.0,q75/q25=16107.83 train_time:244895ms step_avg:72.03ms +[2025-09-03 05:17:05] [Rank 0] PRINT: step:3400/10000 val_loss:4.3378 svd_entropy: attn_qk:H=0.6980,top10E=0.33,eRank=112.6,q75/q25=86.81 attn_vo:H=0.7803,top10E=0.20,eRank=205.4,q75/q25=111.81 mlp_w1:H=0.7137,top10E=0.36,eRank=139.6,q75/q25=12.94 mlp_w2:H=0.8332,top10E=0.15,eRank=256.7,q75/q25=32.94 vo_prod:H=0.6819,top10E=0.30,eRank=98.0,q75/q25=16107.83 train_time:244895ms step_avg:72.03ms +[2025-09-03 05:17:05] [Rank 0] step:3401/10000 train_time:244906ms step_avg:72.01ms +[2025-09-03 05:17:05] [Rank 0] step:3401/10000 train_time:244906ms step_avg:72.01ms +[2025-09-03 05:17:07] [Rank 0] step:3421/10000 train_time:246256ms step_avg:71.98ms +[2025-09-03 05:17:07] [Rank 0] step:3421/10000 train_time:246256ms step_avg:71.98ms +[2025-09-03 05:17:08] [Rank 0] step:3441/10000 train_time:247734ms step_avg:71.99ms +[2025-09-03 05:17:08] [Rank 0] step:3441/10000 train_time:247734ms step_avg:71.99ms +[2025-09-03 05:17:10] [Rank 0] step:3461/10000 train_time:249213ms step_avg:72.01ms +[2025-09-03 05:17:10] [Rank 0] step:3461/10000 train_time:249213ms step_avg:72.01ms +[2025-09-03 05:17:11] [Rank 0] step:3481/10000 train_time:250693ms step_avg:72.02ms +[2025-09-03 05:17:11] [Rank 0] step:3481/10000 train_time:250693ms step_avg:72.02ms +[2025-09-03 05:17:13] [Rank 0] step:3501/10000 train_time:252174ms step_avg:72.03ms +[2025-09-03 05:17:13] [Rank 0] step:3501/10000 train_time:252174ms step_avg:72.03ms +[2025-09-03 05:17:14] [Rank 0] step:3521/10000 train_time:253654ms step_avg:72.04ms +[2025-09-03 05:17:14] [Rank 0] step:3521/10000 train_time:253654ms step_avg:72.04ms +[2025-09-03 05:17:16] [Rank 0] step:3541/10000 train_time:255134ms step_avg:72.05ms +[2025-09-03 05:17:16] [Rank 0] step:3541/10000 train_time:255134ms step_avg:72.05ms +[2025-09-03 05:17:17] [Rank 0] step:3561/10000 train_time:256613ms step_avg:72.06ms +[2025-09-03 05:17:17] [Rank 0] step:3561/10000 train_time:256613ms step_avg:72.06ms +[2025-09-03 05:17:19] [Rank 0] step:3581/10000 train_time:258094ms step_avg:72.07ms +[2025-09-03 05:17:19] [Rank 0] step:3581/10000 train_time:258094ms step_avg:72.07ms +[2025-09-03 05:17:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:17:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:17:32] [Rank 0] PRINT: step:3600/10000 val_loss:4.3209 svd_entropy: attn_qk:H=0.7024,top10E=0.33,eRank=115.6,q75/q25=90.60 attn_vo:H=0.7849,top10E=0.19,eRank=210.8,q75/q25=110.43 mlp_w1:H=0.7199,top10E=0.35,eRank=144.3,q75/q25=13.64 mlp_w2:H=0.8365,top10E=0.14,eRank=262.6,q75/q25=33.98 vo_prod:H=0.6872,top10E=0.30,eRank=101.5,q75/q25=15389.75 train_time:259723ms step_avg:72.15ms +[2025-09-03 05:17:32] [Rank 0] PRINT: step:3600/10000 val_loss:4.3209 svd_entropy: attn_qk:H=0.7024,top10E=0.33,eRank=115.6,q75/q25=90.60 attn_vo:H=0.7849,top10E=0.19,eRank=210.8,q75/q25=110.43 mlp_w1:H=0.7199,top10E=0.35,eRank=144.3,q75/q25=13.64 mlp_w2:H=0.8365,top10E=0.14,eRank=262.6,q75/q25=33.98 vo_prod:H=0.6872,top10E=0.30,eRank=101.5,q75/q25=15389.75 train_time:259723ms step_avg:72.15ms +[2025-09-03 05:17:32] [Rank 0] step:3601/10000 train_time:259735ms step_avg:72.13ms +[2025-09-03 05:17:32] [Rank 0] step:3601/10000 train_time:259735ms step_avg:72.13ms +[2025-09-03 05:17:33] [Rank 0] step:3621/10000 train_time:261066ms step_avg:72.10ms +[2025-09-03 05:17:33] [Rank 0] step:3621/10000 train_time:261066ms step_avg:72.10ms +[2025-09-03 05:17:35] [Rank 0] step:3641/10000 train_time:262543ms step_avg:72.11ms +[2025-09-03 05:17:35] [Rank 0] step:3641/10000 train_time:262543ms step_avg:72.11ms +[2025-09-03 05:17:36] [Rank 0] step:3661/10000 train_time:264021ms step_avg:72.12ms +[2025-09-03 05:17:36] [Rank 0] step:3661/10000 train_time:264021ms step_avg:72.12ms +[2025-09-03 05:17:38] [Rank 0] step:3681/10000 train_time:265501ms step_avg:72.13ms +[2025-09-03 05:17:38] [Rank 0] step:3681/10000 train_time:265501ms step_avg:72.13ms +[2025-09-03 05:17:39] [Rank 0] step:3701/10000 train_time:266980ms step_avg:72.14ms +[2025-09-03 05:17:39] [Rank 0] step:3701/10000 train_time:266980ms step_avg:72.14ms +[2025-09-03 05:17:41] [Rank 0] step:3721/10000 train_time:268505ms step_avg:72.16ms +[2025-09-03 05:17:41] [Rank 0] step:3721/10000 train_time:268505ms step_avg:72.16ms +[2025-09-03 05:17:42] [Rank 0] step:3741/10000 train_time:270022ms step_avg:72.18ms +[2025-09-03 05:17:42] [Rank 0] step:3741/10000 train_time:270022ms step_avg:72.18ms +[2025-09-03 05:17:44] [Rank 0] step:3761/10000 train_time:271538ms step_avg:72.20ms +[2025-09-03 05:17:44] [Rank 0] step:3761/10000 train_time:271538ms step_avg:72.20ms +[2025-09-03 05:17:45] [Rank 0] step:3781/10000 train_time:273054ms step_avg:72.22ms +[2025-09-03 05:17:45] [Rank 0] step:3781/10000 train_time:273054ms step_avg:72.22ms +[2025-09-03 05:17:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:17:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:17:59] [Rank 0] PRINT: step:3800/10000 val_loss:4.2669 svd_entropy: attn_qk:H=0.7061,top10E=0.32,eRank=118.1,q75/q25=93.67 attn_vo:H=0.7892,top10E=0.19,eRank=216.0,q75/q25=109.07 mlp_w1:H=0.7255,top10E=0.34,eRank=148.7,q75/q25=14.32 mlp_w2:H=0.8394,top10E=0.14,eRank=267.9,q75/q25=34.99 vo_prod:H=0.6922,top10E=0.29,eRank=105.0,q75/q25=15002.69 train_time:274722ms step_avg:72.30ms +[2025-09-03 05:17:59] [Rank 0] PRINT: step:3800/10000 val_loss:4.2669 svd_entropy: attn_qk:H=0.7061,top10E=0.32,eRank=118.1,q75/q25=93.67 attn_vo:H=0.7892,top10E=0.19,eRank=216.0,q75/q25=109.07 mlp_w1:H=0.7255,top10E=0.34,eRank=148.7,q75/q25=14.32 mlp_w2:H=0.8394,top10E=0.14,eRank=267.9,q75/q25=34.99 vo_prod:H=0.6922,top10E=0.29,eRank=105.0,q75/q25=15002.69 train_time:274722ms step_avg:72.30ms +[2025-09-03 05:17:59] [Rank 0] step:3801/10000 train_time:274734ms step_avg:72.28ms +[2025-09-03 05:17:59] [Rank 0] step:3801/10000 train_time:274734ms step_avg:72.28ms +[2025-09-03 05:18:00] [Rank 0] step:3821/10000 train_time:276106ms step_avg:72.26ms +[2025-09-03 05:18:00] [Rank 0] step:3821/10000 train_time:276106ms step_avg:72.26ms +[2025-09-03 05:18:02] [Rank 0] step:3841/10000 train_time:277623ms step_avg:72.28ms +[2025-09-03 05:18:02] [Rank 0] step:3841/10000 train_time:277623ms step_avg:72.28ms +[2025-09-03 05:18:03] [Rank 0] step:3861/10000 train_time:279139ms step_avg:72.30ms +[2025-09-03 05:18:03] [Rank 0] step:3861/10000 train_time:279139ms step_avg:72.30ms +[2025-09-03 05:18:05] [Rank 0] step:3881/10000 train_time:280653ms step_avg:72.31ms +[2025-09-03 05:18:05] [Rank 0] step:3881/10000 train_time:280653ms step_avg:72.31ms +[2025-09-03 05:18:06] [Rank 0] step:3901/10000 train_time:282169ms step_avg:72.33ms +[2025-09-03 05:18:06] [Rank 0] step:3901/10000 train_time:282169ms step_avg:72.33ms +[2025-09-03 05:18:08] [Rank 0] step:3921/10000 train_time:283685ms step_avg:72.35ms +[2025-09-03 05:18:08] [Rank 0] step:3921/10000 train_time:283685ms step_avg:72.35ms +[2025-09-03 05:18:09] [Rank 0] step:3941/10000 train_time:285201ms step_avg:72.37ms +[2025-09-03 05:18:09] [Rank 0] step:3941/10000 train_time:285201ms step_avg:72.37ms +[2025-09-03 05:18:11] [Rank 0] step:3961/10000 train_time:286715ms step_avg:72.38ms +[2025-09-03 05:18:11] [Rank 0] step:3961/10000 train_time:286715ms step_avg:72.38ms +[2025-09-03 05:18:12] [Rank 0] step:3981/10000 train_time:288229ms step_avg:72.40ms +[2025-09-03 05:18:12] [Rank 0] step:3981/10000 train_time:288229ms step_avg:72.40ms +[2025-09-03 05:18:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:18:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:18:26] [Rank 0] PRINT: step:4000/10000 val_loss:4.2407 svd_entropy: attn_qk:H=0.7100,top10E=0.32,eRank=120.8,q75/q25=96.05 attn_vo:H=0.7931,top10E=0.18,eRank=220.7,q75/q25=105.01 mlp_w1:H=0.7309,top10E=0.33,eRank=153.1,q75/q25=15.00 mlp_w2:H=0.8421,top10E=0.14,eRank=273.0,q75/q25=35.89 vo_prod:H=0.6967,top10E=0.28,eRank=108.3,q75/q25=13685.29 train_time:289895ms step_avg:72.47ms +[2025-09-03 05:18:26] [Rank 0] PRINT: step:4000/10000 val_loss:4.2407 svd_entropy: attn_qk:H=0.7100,top10E=0.32,eRank=120.8,q75/q25=96.05 attn_vo:H=0.7931,top10E=0.18,eRank=220.7,q75/q25=105.01 mlp_w1:H=0.7309,top10E=0.33,eRank=153.1,q75/q25=15.00 mlp_w2:H=0.8421,top10E=0.14,eRank=273.0,q75/q25=35.89 vo_prod:H=0.6967,top10E=0.28,eRank=108.3,q75/q25=13685.29 train_time:289895ms step_avg:72.47ms +[2025-09-03 05:18:26] [Rank 0] step:4001/10000 train_time:289907ms step_avg:72.46ms +[2025-09-03 05:18:26] [Rank 0] step:4001/10000 train_time:289907ms step_avg:72.46ms +[2025-09-03 05:18:27] [Rank 0] step:4021/10000 train_time:291291ms step_avg:72.44ms +[2025-09-03 05:18:27] [Rank 0] step:4021/10000 train_time:291291ms step_avg:72.44ms +[2025-09-03 05:18:29] [Rank 0] step:4041/10000 train_time:292807ms step_avg:72.46ms +[2025-09-03 05:18:29] [Rank 0] step:4041/10000 train_time:292807ms step_avg:72.46ms +[2025-09-03 05:18:31] [Rank 0] step:4061/10000 train_time:294323ms step_avg:72.48ms +[2025-09-03 05:18:31] [Rank 0] step:4061/10000 train_time:294323ms step_avg:72.48ms +[2025-09-03 05:18:32] [Rank 0] step:4081/10000 train_time:295946ms step_avg:72.52ms +[2025-09-03 05:18:32] [Rank 0] step:4081/10000 train_time:295946ms step_avg:72.52ms +[2025-09-03 05:18:34] [Rank 0] step:4101/10000 train_time:297462ms step_avg:72.53ms +[2025-09-03 05:18:34] [Rank 0] step:4101/10000 train_time:297462ms step_avg:72.53ms +[2025-09-03 05:18:35] [Rank 0] step:4121/10000 train_time:298978ms step_avg:72.55ms +[2025-09-03 05:18:35] [Rank 0] step:4121/10000 train_time:298978ms step_avg:72.55ms +[2025-09-03 05:18:37] [Rank 0] step:4141/10000 train_time:300495ms step_avg:72.57ms +[2025-09-03 05:18:37] [Rank 0] step:4141/10000 train_time:300495ms step_avg:72.57ms +[2025-09-03 05:18:38] [Rank 0] step:4161/10000 train_time:302010ms step_avg:72.58ms +[2025-09-03 05:18:38] [Rank 0] step:4161/10000 train_time:302010ms step_avg:72.58ms +[2025-09-03 05:18:40] [Rank 0] step:4181/10000 train_time:303529ms step_avg:72.60ms +[2025-09-03 05:18:40] [Rank 0] step:4181/10000 train_time:303529ms step_avg:72.60ms +[2025-09-03 05:18:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:18:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:18:53] [Rank 0] PRINT: step:4200/10000 val_loss:4.2181 svd_entropy: attn_qk:H=0.7134,top10E=0.31,eRank=123.2,q75/q25=98.39 attn_vo:H=0.7968,top10E=0.18,eRank=225.4,q75/q25=102.16 mlp_w1:H=0.7358,top10E=0.33,eRank=157.4,q75/q25=15.56 mlp_w2:H=0.8446,top10E=0.13,eRank=277.8,q75/q25=36.62 vo_prod:H=0.7012,top10E=0.28,eRank=111.5,q75/q25=12695.78 train_time:305199ms step_avg:72.67ms +[2025-09-03 05:18:53] [Rank 0] PRINT: step:4200/10000 val_loss:4.2181 svd_entropy: attn_qk:H=0.7134,top10E=0.31,eRank=123.2,q75/q25=98.39 attn_vo:H=0.7968,top10E=0.18,eRank=225.4,q75/q25=102.16 mlp_w1:H=0.7358,top10E=0.33,eRank=157.4,q75/q25=15.56 mlp_w2:H=0.8446,top10E=0.13,eRank=277.8,q75/q25=36.62 vo_prod:H=0.7012,top10E=0.28,eRank=111.5,q75/q25=12695.78 train_time:305199ms step_avg:72.67ms +[2025-09-03 05:18:53] [Rank 0] step:4201/10000 train_time:305211ms step_avg:72.65ms +[2025-09-03 05:18:53] [Rank 0] step:4201/10000 train_time:305211ms step_avg:72.65ms +[2025-09-03 05:18:55] [Rank 0] step:4221/10000 train_time:306595ms step_avg:72.64ms +[2025-09-03 05:18:55] [Rank 0] step:4221/10000 train_time:306595ms step_avg:72.64ms +[2025-09-03 05:18:56] [Rank 0] step:4241/10000 train_time:308111ms step_avg:72.65ms +[2025-09-03 05:18:56] [Rank 0] step:4241/10000 train_time:308111ms step_avg:72.65ms +[2025-09-03 05:18:58] [Rank 0] step:4261/10000 train_time:309626ms step_avg:72.67ms +[2025-09-03 05:18:58] [Rank 0] step:4261/10000 train_time:309626ms step_avg:72.67ms +[2025-09-03 05:18:59] [Rank 0] step:4281/10000 train_time:311141ms step_avg:72.68ms +[2025-09-03 05:18:59] [Rank 0] step:4281/10000 train_time:311141ms step_avg:72.68ms +[2025-09-03 05:19:01] [Rank 0] step:4301/10000 train_time:312660ms step_avg:72.69ms +[2025-09-03 05:19:01] [Rank 0] step:4301/10000 train_time:312660ms step_avg:72.69ms +[2025-09-03 05:19:02] [Rank 0] step:4321/10000 train_time:314178ms step_avg:72.71ms +[2025-09-03 05:19:02] [Rank 0] step:4321/10000 train_time:314178ms step_avg:72.71ms +[2025-09-03 05:19:04] [Rank 0] step:4341/10000 train_time:315692ms step_avg:72.72ms +[2025-09-03 05:19:04] [Rank 0] step:4341/10000 train_time:315692ms step_avg:72.72ms +[2025-09-03 05:19:05] [Rank 0] step:4361/10000 train_time:317209ms step_avg:72.74ms +[2025-09-03 05:19:05] [Rank 0] step:4361/10000 train_time:317209ms step_avg:72.74ms +[2025-09-03 05:19:07] [Rank 0] step:4381/10000 train_time:318725ms step_avg:72.75ms +[2025-09-03 05:19:07] [Rank 0] step:4381/10000 train_time:318725ms step_avg:72.75ms +[2025-09-03 05:19:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:19:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:19:20] [Rank 0] PRINT: step:4400/10000 val_loss:4.1957 svd_entropy: attn_qk:H=0.7168,top10E=0.31,eRank=125.8,q75/q25=101.19 attn_vo:H=0.8003,top10E=0.17,eRank=229.8,q75/q25=99.13 mlp_w1:H=0.7407,top10E=0.32,eRank=161.6,q75/q25=16.04 mlp_w2:H=0.8471,top10E=0.13,eRank=282.6,q75/q25=36.68 vo_prod:H=0.7052,top10E=0.27,eRank=114.6,q75/q25=11839.41 train_time:320392ms step_avg:72.82ms +[2025-09-03 05:19:20] [Rank 0] PRINT: step:4400/10000 val_loss:4.1957 svd_entropy: attn_qk:H=0.7168,top10E=0.31,eRank=125.8,q75/q25=101.19 attn_vo:H=0.8003,top10E=0.17,eRank=229.8,q75/q25=99.13 mlp_w1:H=0.7407,top10E=0.32,eRank=161.6,q75/q25=16.04 mlp_w2:H=0.8471,top10E=0.13,eRank=282.6,q75/q25=36.68 vo_prod:H=0.7052,top10E=0.27,eRank=114.6,q75/q25=11839.41 train_time:320392ms step_avg:72.82ms +[2025-09-03 05:19:20] [Rank 0] step:4401/10000 train_time:320405ms step_avg:72.80ms +[2025-09-03 05:19:20] [Rank 0] step:4401/10000 train_time:320405ms step_avg:72.80ms +[2025-09-03 05:19:22] [Rank 0] step:4421/10000 train_time:321773ms step_avg:72.78ms +[2025-09-03 05:19:22] [Rank 0] step:4421/10000 train_time:321773ms step_avg:72.78ms +[2025-09-03 05:19:23] [Rank 0] step:4441/10000 train_time:323286ms step_avg:72.80ms +[2025-09-03 05:19:23] [Rank 0] step:4441/10000 train_time:323286ms step_avg:72.80ms +[2025-09-03 05:19:25] [Rank 0] step:4461/10000 train_time:324806ms step_avg:72.81ms +[2025-09-03 05:19:25] [Rank 0] step:4461/10000 train_time:324806ms step_avg:72.81ms +[2025-09-03 05:19:26] [Rank 0] step:4481/10000 train_time:326328ms step_avg:72.82ms +[2025-09-03 05:19:26] [Rank 0] step:4481/10000 train_time:326328ms step_avg:72.82ms +[2025-09-03 05:19:28] [Rank 0] step:4501/10000 train_time:327851ms step_avg:72.84ms +[2025-09-03 05:19:28] [Rank 0] step:4501/10000 train_time:327851ms step_avg:72.84ms +[2025-09-03 05:19:29] [Rank 0] step:4521/10000 train_time:329371ms step_avg:72.85ms +[2025-09-03 05:19:29] [Rank 0] step:4521/10000 train_time:329371ms step_avg:72.85ms +[2025-09-03 05:19:31] [Rank 0] step:4541/10000 train_time:330891ms step_avg:72.87ms +[2025-09-03 05:19:31] [Rank 0] step:4541/10000 train_time:330891ms step_avg:72.87ms +[2025-09-03 05:19:32] [Rank 0] step:4561/10000 train_time:332415ms step_avg:72.88ms +[2025-09-03 05:19:32] [Rank 0] step:4561/10000 train_time:332415ms step_avg:72.88ms +[2025-09-03 05:19:34] [Rank 0] step:4581/10000 train_time:333938ms step_avg:72.90ms +[2025-09-03 05:19:34] [Rank 0] step:4581/10000 train_time:333938ms step_avg:72.90ms +[2025-09-03 05:19:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:19:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:19:47] [Rank 0] PRINT: step:4600/10000 val_loss:4.1657 svd_entropy: attn_qk:H=0.7201,top10E=0.30,eRank=128.1,q75/q25=102.55 attn_vo:H=0.8036,top10E=0.17,eRank=234.2,q75/q25=95.67 mlp_w1:H=0.7450,top10E=0.31,eRank=165.5,q75/q25=16.59 mlp_w2:H=0.8493,top10E=0.13,eRank=287.0,q75/q25=37.22 vo_prod:H=0.7093,top10E=0.27,eRank=117.7,q75/q25=10677.05 train_time:335615ms step_avg:72.96ms +[2025-09-03 05:19:47] [Rank 0] PRINT: step:4600/10000 val_loss:4.1657 svd_entropy: attn_qk:H=0.7201,top10E=0.30,eRank=128.1,q75/q25=102.55 attn_vo:H=0.8036,top10E=0.17,eRank=234.2,q75/q25=95.67 mlp_w1:H=0.7450,top10E=0.31,eRank=165.5,q75/q25=16.59 mlp_w2:H=0.8493,top10E=0.13,eRank=287.0,q75/q25=37.22 vo_prod:H=0.7093,top10E=0.27,eRank=117.7,q75/q25=10677.05 train_time:335615ms step_avg:72.96ms +[2025-09-03 05:19:47] [Rank 0] step:4601/10000 train_time:335627ms step_avg:72.95ms +[2025-09-03 05:19:47] [Rank 0] step:4601/10000 train_time:335627ms step_avg:72.95ms +[2025-09-03 05:19:49] [Rank 0] step:4621/10000 train_time:337011ms step_avg:72.93ms +[2025-09-03 05:19:49] [Rank 0] step:4621/10000 train_time:337011ms step_avg:72.93ms +[2025-09-03 05:19:50] [Rank 0] step:4641/10000 train_time:338532ms step_avg:72.94ms +[2025-09-03 05:19:50] [Rank 0] step:4641/10000 train_time:338532ms step_avg:72.94ms +[2025-09-03 05:19:52] [Rank 0] step:4661/10000 train_time:340055ms step_avg:72.96ms +[2025-09-03 05:19:52] [Rank 0] step:4661/10000 train_time:340055ms step_avg:72.96ms +[2025-09-03 05:19:53] [Rank 0] step:4681/10000 train_time:341577ms step_avg:72.97ms +[2025-09-03 05:19:53] [Rank 0] step:4681/10000 train_time:341577ms step_avg:72.97ms +[2025-09-03 05:19:55] [Rank 0] step:4701/10000 train_time:343101ms step_avg:72.98ms +[2025-09-03 05:19:55] [Rank 0] step:4701/10000 train_time:343101ms step_avg:72.98ms +[2025-09-03 05:19:56] [Rank 0] step:4721/10000 train_time:344622ms step_avg:73.00ms +[2025-09-03 05:19:56] [Rank 0] step:4721/10000 train_time:344622ms step_avg:73.00ms +[2025-09-03 05:19:58] [Rank 0] step:4741/10000 train_time:346155ms step_avg:73.01ms +[2025-09-03 05:19:58] [Rank 0] step:4741/10000 train_time:346155ms step_avg:73.01ms +[2025-09-03 05:19:59] [Rank 0] step:4761/10000 train_time:347677ms step_avg:73.03ms +[2025-09-03 05:19:59] [Rank 0] step:4761/10000 train_time:347677ms step_avg:73.03ms +[2025-09-03 05:20:01] [Rank 0] step:4781/10000 train_time:349200ms step_avg:73.04ms +[2025-09-03 05:20:01] [Rank 0] step:4781/10000 train_time:349200ms step_avg:73.04ms +[2025-09-03 05:20:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:20:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:20:14] [Rank 0] PRINT: step:4800/10000 val_loss:4.1499 svd_entropy: attn_qk:H=0.7230,top10E=0.30,eRank=130.4,q75/q25=103.87 attn_vo:H=0.8067,top10E=0.17,eRank=238.3,q75/q25=92.97 mlp_w1:H=0.7489,top10E=0.31,eRank=169.1,q75/q25=17.05 mlp_w2:H=0.8514,top10E=0.13,eRank=291.3,q75/q25=37.85 vo_prod:H=0.7129,top10E=0.26,eRank=120.6,q75/q25=9810.78 train_time:350878ms step_avg:73.10ms +[2025-09-03 05:20:14] [Rank 0] PRINT: step:4800/10000 val_loss:4.1499 svd_entropy: attn_qk:H=0.7230,top10E=0.30,eRank=130.4,q75/q25=103.87 attn_vo:H=0.8067,top10E=0.17,eRank=238.3,q75/q25=92.97 mlp_w1:H=0.7489,top10E=0.31,eRank=169.1,q75/q25=17.05 mlp_w2:H=0.8514,top10E=0.13,eRank=291.3,q75/q25=37.85 vo_prod:H=0.7129,top10E=0.26,eRank=120.6,q75/q25=9810.78 train_time:350878ms step_avg:73.10ms +[2025-09-03 05:20:14] [Rank 0] step:4801/10000 train_time:350891ms step_avg:73.09ms +[2025-09-03 05:20:14] [Rank 0] step:4801/10000 train_time:350891ms step_avg:73.09ms +[2025-09-03 05:20:16] [Rank 0] step:4821/10000 train_time:352273ms step_avg:73.07ms +[2025-09-03 05:20:16] [Rank 0] step:4821/10000 train_time:352273ms step_avg:73.07ms +[2025-09-03 05:20:17] [Rank 0] step:4841/10000 train_time:353793ms step_avg:73.08ms +[2025-09-03 05:20:17] [Rank 0] step:4841/10000 train_time:353793ms step_avg:73.08ms +[2025-09-03 05:20:19] [Rank 0] step:4861/10000 train_time:355316ms step_avg:73.10ms +[2025-09-03 05:20:19] [Rank 0] step:4861/10000 train_time:355316ms step_avg:73.10ms +[2025-09-03 05:20:20] [Rank 0] step:4881/10000 train_time:356837ms step_avg:73.11ms +[2025-09-03 05:20:20] [Rank 0] step:4881/10000 train_time:356837ms step_avg:73.11ms +[2025-09-03 05:20:22] [Rank 0] step:4901/10000 train_time:358357ms step_avg:73.12ms +[2025-09-03 05:20:22] [Rank 0] step:4901/10000 train_time:358357ms step_avg:73.12ms +[2025-09-03 05:20:23] [Rank 0] step:4921/10000 train_time:359882ms step_avg:73.13ms +[2025-09-03 05:20:23] [Rank 0] step:4921/10000 train_time:359882ms step_avg:73.13ms +[2025-09-03 05:20:25] [Rank 0] step:4941/10000 train_time:361408ms step_avg:73.14ms +[2025-09-03 05:20:25] [Rank 0] step:4941/10000 train_time:361408ms step_avg:73.14ms +[2025-09-03 05:20:26] [Rank 0] step:4961/10000 train_time:362929ms step_avg:73.16ms +[2025-09-03 05:20:26] [Rank 0] step:4961/10000 train_time:362929ms step_avg:73.16ms +[2025-09-03 05:20:28] [Rank 0] step:4981/10000 train_time:364454ms step_avg:73.17ms +[2025-09-03 05:20:28] [Rank 0] step:4981/10000 train_time:364454ms step_avg:73.17ms +[2025-09-03 05:20:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:20:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:20:41] [Rank 0] PRINT: step:5000/10000 val_loss:4.1281 svd_entropy: attn_qk:H=0.7259,top10E=0.29,eRank=132.7,q75/q25=105.66 attn_vo:H=0.8096,top10E=0.16,eRank=242.2,q75/q25=89.55 mlp_w1:H=0.7526,top10E=0.30,eRank=172.6,q75/q25=17.63 mlp_w2:H=0.8533,top10E=0.13,eRank=295.2,q75/q25=38.46 vo_prod:H=0.7164,top10E=0.26,eRank=123.5,q75/q25=8901.86 train_time:366130ms step_avg:73.23ms +[2025-09-03 05:20:41] [Rank 0] PRINT: step:5000/10000 val_loss:4.1281 svd_entropy: attn_qk:H=0.7259,top10E=0.29,eRank=132.7,q75/q25=105.66 attn_vo:H=0.8096,top10E=0.16,eRank=242.2,q75/q25=89.55 mlp_w1:H=0.7526,top10E=0.30,eRank=172.6,q75/q25=17.63 mlp_w2:H=0.8533,top10E=0.13,eRank=295.2,q75/q25=38.46 vo_prod:H=0.7164,top10E=0.26,eRank=123.5,q75/q25=8901.86 train_time:366130ms step_avg:73.23ms +[2025-09-03 05:20:41] [Rank 0] step:5001/10000 train_time:366142ms step_avg:73.21ms +[2025-09-03 05:20:41] [Rank 0] step:5001/10000 train_time:366142ms step_avg:73.21ms +[2025-09-03 05:20:43] [Rank 0] step:5021/10000 train_time:367532ms step_avg:73.20ms +[2025-09-03 05:20:43] [Rank 0] step:5021/10000 train_time:367532ms step_avg:73.20ms +[2025-09-03 05:20:44] [Rank 0] step:5041/10000 train_time:369054ms step_avg:73.21ms +[2025-09-03 05:20:44] [Rank 0] step:5041/10000 train_time:369054ms step_avg:73.21ms +[2025-09-03 05:20:46] [Rank 0] step:5061/10000 train_time:370575ms step_avg:73.22ms +[2025-09-03 05:20:46] [Rank 0] step:5061/10000 train_time:370575ms step_avg:73.22ms +[2025-09-03 05:20:47] [Rank 0] step:5081/10000 train_time:372097ms step_avg:73.23ms +[2025-09-03 05:20:47] [Rank 0] step:5081/10000 train_time:372097ms step_avg:73.23ms +[2025-09-03 05:20:49] [Rank 0] step:5101/10000 train_time:373620ms step_avg:73.24ms +[2025-09-03 05:20:49] [Rank 0] step:5101/10000 train_time:373620ms step_avg:73.24ms +[2025-09-03 05:20:50] [Rank 0] step:5121/10000 train_time:375149ms step_avg:73.26ms +[2025-09-03 05:20:50] [Rank 0] step:5121/10000 train_time:375149ms step_avg:73.26ms +[2025-09-03 05:20:52] [Rank 0] step:5141/10000 train_time:376675ms step_avg:73.27ms +[2025-09-03 05:20:52] [Rank 0] step:5141/10000 train_time:376675ms step_avg:73.27ms +[2025-09-03 05:20:53] [Rank 0] step:5161/10000 train_time:378198ms step_avg:73.28ms +[2025-09-03 05:20:53] [Rank 0] step:5161/10000 train_time:378198ms step_avg:73.28ms +[2025-09-03 05:20:55] [Rank 0] step:5181/10000 train_time:379726ms step_avg:73.29ms +[2025-09-03 05:20:55] [Rank 0] step:5181/10000 train_time:379726ms step_avg:73.29ms +[2025-09-03 05:20:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:20:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:21:08] [Rank 0] PRINT: step:5200/10000 val_loss:4.1075 svd_entropy: attn_qk:H=0.7285,top10E=0.29,eRank=134.7,q75/q25=106.13 attn_vo:H=0.8123,top10E=0.16,eRank=246.0,q75/q25=86.65 mlp_w1:H=0.7561,top10E=0.30,eRank=176.0,q75/q25=18.15 mlp_w2:H=0.8550,top10E=0.12,eRank=298.7,q75/q25=39.16 vo_prod:H=0.7198,top10E=0.25,eRank=126.3,q75/q25=8003.07 train_time:381429ms step_avg:73.35ms +[2025-09-03 05:21:08] [Rank 0] PRINT: step:5200/10000 val_loss:4.1075 svd_entropy: attn_qk:H=0.7285,top10E=0.29,eRank=134.7,q75/q25=106.13 attn_vo:H=0.8123,top10E=0.16,eRank=246.0,q75/q25=86.65 mlp_w1:H=0.7561,top10E=0.30,eRank=176.0,q75/q25=18.15 mlp_w2:H=0.8550,top10E=0.12,eRank=298.7,q75/q25=39.16 vo_prod:H=0.7198,top10E=0.25,eRank=126.3,q75/q25=8003.07 train_time:381429ms step_avg:73.35ms +[2025-09-03 05:21:08] [Rank 0] step:5201/10000 train_time:381441ms step_avg:73.34ms +[2025-09-03 05:21:08] [Rank 0] step:5201/10000 train_time:381441ms step_avg:73.34ms +[2025-09-03 05:21:10] [Rank 0] step:5221/10000 train_time:382847ms step_avg:73.33ms +[2025-09-03 05:21:10] [Rank 0] step:5221/10000 train_time:382847ms step_avg:73.33ms +[2025-09-03 05:21:11] [Rank 0] step:5241/10000 train_time:384399ms step_avg:73.34ms +[2025-09-03 05:21:11] [Rank 0] step:5241/10000 train_time:384399ms step_avg:73.34ms +[2025-09-03 05:21:13] [Rank 0] step:5261/10000 train_time:385953ms step_avg:73.36ms +[2025-09-03 05:21:13] [Rank 0] step:5261/10000 train_time:385953ms step_avg:73.36ms +[2025-09-03 05:21:14] [Rank 0] step:5281/10000 train_time:387509ms step_avg:73.38ms +[2025-09-03 05:21:14] [Rank 0] step:5281/10000 train_time:387509ms step_avg:73.38ms +[2025-09-03 05:21:16] [Rank 0] step:5301/10000 train_time:389073ms step_avg:73.40ms +[2025-09-03 05:21:16] [Rank 0] step:5301/10000 train_time:389073ms step_avg:73.40ms +[2025-09-03 05:21:18] [Rank 0] step:5321/10000 train_time:390624ms step_avg:73.41ms +[2025-09-03 05:21:18] [Rank 0] step:5321/10000 train_time:390624ms step_avg:73.41ms +[2025-09-03 05:21:19] [Rank 0] step:5341/10000 train_time:392176ms step_avg:73.43ms +[2025-09-03 05:21:19] [Rank 0] step:5341/10000 train_time:392176ms step_avg:73.43ms +[2025-09-03 05:21:21] [Rank 0] step:5361/10000 train_time:393736ms step_avg:73.44ms +[2025-09-03 05:21:21] [Rank 0] step:5361/10000 train_time:393736ms step_avg:73.44ms +[2025-09-03 05:21:22] [Rank 0] step:5381/10000 train_time:395294ms step_avg:73.46ms +[2025-09-03 05:21:22] [Rank 0] step:5381/10000 train_time:395294ms step_avg:73.46ms +[2025-09-03 05:21:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:21:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:21:35] [Rank 0] PRINT: step:5400/10000 val_loss:4.0878 svd_entropy: attn_qk:H=0.7309,top10E=0.29,eRank=136.7,q75/q25=107.14 attn_vo:H=0.8148,top10E=0.16,eRank=249.6,q75/q25=83.57 mlp_w1:H=0.7594,top10E=0.30,eRank=179.3,q75/q25=18.74 mlp_w2:H=0.8565,top10E=0.12,eRank=301.9,q75/q25=39.95 vo_prod:H=0.7229,top10E=0.25,eRank=129.0,q75/q25=7482.43 train_time:397002ms step_avg:73.52ms +[2025-09-03 05:21:35] [Rank 0] PRINT: step:5400/10000 val_loss:4.0878 svd_entropy: attn_qk:H=0.7309,top10E=0.29,eRank=136.7,q75/q25=107.14 attn_vo:H=0.8148,top10E=0.16,eRank=249.6,q75/q25=83.57 mlp_w1:H=0.7594,top10E=0.30,eRank=179.3,q75/q25=18.74 mlp_w2:H=0.8565,top10E=0.12,eRank=301.9,q75/q25=39.95 vo_prod:H=0.7229,top10E=0.25,eRank=129.0,q75/q25=7482.43 train_time:397002ms step_avg:73.52ms +[2025-09-03 05:21:35] [Rank 0] step:5401/10000 train_time:397015ms step_avg:73.51ms +[2025-09-03 05:21:35] [Rank 0] step:5401/10000 train_time:397015ms step_avg:73.51ms +[2025-09-03 05:21:37] [Rank 0] step:5421/10000 train_time:398430ms step_avg:73.50ms +[2025-09-03 05:21:37] [Rank 0] step:5421/10000 train_time:398430ms step_avg:73.50ms +[2025-09-03 05:21:39] [Rank 0] step:5441/10000 train_time:399980ms step_avg:73.51ms +[2025-09-03 05:21:39] [Rank 0] step:5441/10000 train_time:399980ms step_avg:73.51ms +[2025-09-03 05:21:40] [Rank 0] step:5461/10000 train_time:401533ms step_avg:73.53ms +[2025-09-03 05:21:40] [Rank 0] step:5461/10000 train_time:401533ms step_avg:73.53ms +[2025-09-03 05:21:42] [Rank 0] step:5481/10000 train_time:403093ms step_avg:73.54ms +[2025-09-03 05:21:42] [Rank 0] step:5481/10000 train_time:403093ms step_avg:73.54ms +[2025-09-03 05:21:43] [Rank 0] step:5501/10000 train_time:404652ms step_avg:73.56ms +[2025-09-03 05:21:43] [Rank 0] step:5501/10000 train_time:404652ms step_avg:73.56ms +[2025-09-03 05:21:45] [Rank 0] step:5521/10000 train_time:406212ms step_avg:73.58ms +[2025-09-03 05:21:45] [Rank 0] step:5521/10000 train_time:406212ms step_avg:73.58ms +[2025-09-03 05:21:46] [Rank 0] step:5541/10000 train_time:407769ms step_avg:73.59ms +[2025-09-03 05:21:46] [Rank 0] step:5541/10000 train_time:407769ms step_avg:73.59ms +[2025-09-03 05:21:48] [Rank 0] step:5561/10000 train_time:409323ms step_avg:73.61ms +[2025-09-03 05:21:48] [Rank 0] step:5561/10000 train_time:409323ms step_avg:73.61ms +[2025-09-03 05:21:50] [Rank 0] step:5581/10000 train_time:410880ms step_avg:73.62ms +[2025-09-03 05:21:50] [Rank 0] step:5581/10000 train_time:410880ms step_avg:73.62ms +[2025-09-03 05:21:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:21:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:22:03] [Rank 0] PRINT: step:5600/10000 val_loss:4.0747 svd_entropy: attn_qk:H=0.7332,top10E=0.28,eRank=138.6,q75/q25=107.33 attn_vo:H=0.8172,top10E=0.16,eRank=253.0,q75/q25=81.15 mlp_w1:H=0.7625,top10E=0.29,eRank=182.4,q75/q25=19.38 mlp_w2:H=0.8578,top10E=0.12,eRank=304.8,q75/q25=40.90 vo_prod:H=0.7258,top10E=0.25,eRank=131.5,q75/q25=6685.29 train_time:412593ms step_avg:73.68ms +[2025-09-03 05:22:03] [Rank 0] PRINT: step:5600/10000 val_loss:4.0747 svd_entropy: attn_qk:H=0.7332,top10E=0.28,eRank=138.6,q75/q25=107.33 attn_vo:H=0.8172,top10E=0.16,eRank=253.0,q75/q25=81.15 mlp_w1:H=0.7625,top10E=0.29,eRank=182.4,q75/q25=19.38 mlp_w2:H=0.8578,top10E=0.12,eRank=304.8,q75/q25=40.90 vo_prod:H=0.7258,top10E=0.25,eRank=131.5,q75/q25=6685.29 train_time:412593ms step_avg:73.68ms +[2025-09-03 05:22:03] [Rank 0] step:5601/10000 train_time:412606ms step_avg:73.67ms +[2025-09-03 05:22:03] [Rank 0] step:5601/10000 train_time:412606ms step_avg:73.67ms +[2025-09-03 05:22:04] [Rank 0] step:5621/10000 train_time:414008ms step_avg:73.65ms +[2025-09-03 05:22:04] [Rank 0] step:5621/10000 train_time:414008ms step_avg:73.65ms +[2025-09-03 05:22:06] [Rank 0] step:5641/10000 train_time:415561ms step_avg:73.67ms +[2025-09-03 05:22:06] [Rank 0] step:5641/10000 train_time:415561ms step_avg:73.67ms +[2025-09-03 05:22:07] [Rank 0] step:5661/10000 train_time:417112ms step_avg:73.68ms +[2025-09-03 05:22:07] [Rank 0] step:5661/10000 train_time:417112ms step_avg:73.68ms +[2025-09-03 05:22:09] [Rank 0] step:5681/10000 train_time:418671ms step_avg:73.70ms +[2025-09-03 05:22:09] [Rank 0] step:5681/10000 train_time:418671ms step_avg:73.70ms +[2025-09-03 05:22:11] [Rank 0] step:5701/10000 train_time:420222ms step_avg:73.71ms +[2025-09-03 05:22:11] [Rank 0] step:5701/10000 train_time:420222ms step_avg:73.71ms +[2025-09-03 05:22:12] [Rank 0] step:5721/10000 train_time:421779ms step_avg:73.72ms +[2025-09-03 05:22:12] [Rank 0] step:5721/10000 train_time:421779ms step_avg:73.72ms +[2025-09-03 05:22:14] [Rank 0] step:5741/10000 train_time:423334ms step_avg:73.74ms +[2025-09-03 05:22:14] [Rank 0] step:5741/10000 train_time:423334ms step_avg:73.74ms +[2025-09-03 05:22:15] [Rank 0] step:5761/10000 train_time:424891ms step_avg:73.75ms +[2025-09-03 05:22:15] [Rank 0] step:5761/10000 train_time:424891ms step_avg:73.75ms +[2025-09-03 05:22:17] [Rank 0] step:5781/10000 train_time:426449ms step_avg:73.77ms +[2025-09-03 05:22:17] [Rank 0] step:5781/10000 train_time:426449ms step_avg:73.77ms +[2025-09-03 05:22:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:22:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:22:30] [Rank 0] PRINT: step:5800/10000 val_loss:4.0633 svd_entropy: attn_qk:H=0.7356,top10E=0.28,eRank=140.7,q75/q25=107.19 attn_vo:H=0.8195,top10E=0.15,eRank=256.4,q75/q25=77.77 mlp_w1:H=0.7654,top10E=0.29,eRank=185.3,q75/q25=19.87 mlp_w2:H=0.8591,top10E=0.12,eRank=307.6,q75/q25=41.97 vo_prod:H=0.7286,top10E=0.24,eRank=133.9,q75/q25=6126.00 train_time:428163ms step_avg:73.82ms +[2025-09-03 05:22:30] [Rank 0] PRINT: step:5800/10000 val_loss:4.0633 svd_entropy: attn_qk:H=0.7356,top10E=0.28,eRank=140.7,q75/q25=107.19 attn_vo:H=0.8195,top10E=0.15,eRank=256.4,q75/q25=77.77 mlp_w1:H=0.7654,top10E=0.29,eRank=185.3,q75/q25=19.87 mlp_w2:H=0.8591,top10E=0.12,eRank=307.6,q75/q25=41.97 vo_prod:H=0.7286,top10E=0.24,eRank=133.9,q75/q25=6126.00 train_time:428163ms step_avg:73.82ms +[2025-09-03 05:22:30] [Rank 0] step:5801/10000 train_time:428176ms step_avg:73.81ms +[2025-09-03 05:22:30] [Rank 0] step:5801/10000 train_time:428176ms step_avg:73.81ms +[2025-09-03 05:22:32] [Rank 0] step:5821/10000 train_time:429591ms step_avg:73.80ms +[2025-09-03 05:22:32] [Rank 0] step:5821/10000 train_time:429591ms step_avg:73.80ms +[2025-09-03 05:22:33] [Rank 0] step:5841/10000 train_time:431143ms step_avg:73.81ms +[2025-09-03 05:22:33] [Rank 0] step:5841/10000 train_time:431143ms step_avg:73.81ms +[2025-09-03 05:22:35] [Rank 0] step:5861/10000 train_time:432698ms step_avg:73.83ms +[2025-09-03 05:22:35] [Rank 0] step:5861/10000 train_time:432698ms step_avg:73.83ms +[2025-09-03 05:22:36] [Rank 0] step:5881/10000 train_time:434253ms step_avg:73.84ms +[2025-09-03 05:22:36] [Rank 0] step:5881/10000 train_time:434253ms step_avg:73.84ms +[2025-09-03 05:22:38] [Rank 0] step:5901/10000 train_time:435809ms step_avg:73.85ms +[2025-09-03 05:22:38] [Rank 0] step:5901/10000 train_time:435809ms step_avg:73.85ms +[2025-09-03 05:22:39] [Rank 0] step:5921/10000 train_time:437364ms step_avg:73.87ms +[2025-09-03 05:22:39] [Rank 0] step:5921/10000 train_time:437364ms step_avg:73.87ms +[2025-09-03 05:22:41] [Rank 0] step:5941/10000 train_time:438921ms step_avg:73.88ms +[2025-09-03 05:22:41] [Rank 0] step:5941/10000 train_time:438921ms step_avg:73.88ms +[2025-09-03 05:22:43] [Rank 0] step:5961/10000 train_time:440484ms step_avg:73.89ms +[2025-09-03 05:22:43] [Rank 0] step:5961/10000 train_time:440484ms step_avg:73.89ms +[2025-09-03 05:22:44] [Rank 0] step:5981/10000 train_time:442043ms step_avg:73.91ms +[2025-09-03 05:22:44] [Rank 0] step:5981/10000 train_time:442043ms step_avg:73.91ms +[2025-09-03 05:22:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:22:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:22:57] [Rank 0] PRINT: step:6000/10000 val_loss:4.0390 svd_entropy: attn_qk:H=0.7378,top10E=0.28,eRank=142.5,q75/q25=107.79 attn_vo:H=0.8216,top10E=0.15,eRank=259.5,q75/q25=75.48 mlp_w1:H=0.7682,top10E=0.28,eRank=188.3,q75/q25=20.54 mlp_w2:H=0.8604,top10E=0.12,eRank=310.4,q75/q25=42.59 vo_prod:H=0.7312,top10E=0.24,eRank=136.4,q75/q25=5541.74 train_time:443754ms step_avg:73.96ms +[2025-09-03 05:22:57] [Rank 0] PRINT: step:6000/10000 val_loss:4.0390 svd_entropy: attn_qk:H=0.7378,top10E=0.28,eRank=142.5,q75/q25=107.79 attn_vo:H=0.8216,top10E=0.15,eRank=259.5,q75/q25=75.48 mlp_w1:H=0.7682,top10E=0.28,eRank=188.3,q75/q25=20.54 mlp_w2:H=0.8604,top10E=0.12,eRank=310.4,q75/q25=42.59 vo_prod:H=0.7312,top10E=0.24,eRank=136.4,q75/q25=5541.74 train_time:443754ms step_avg:73.96ms +[2025-09-03 05:22:57] [Rank 0] step:6001/10000 train_time:443768ms step_avg:73.95ms +[2025-09-03 05:22:57] [Rank 0] step:6001/10000 train_time:443768ms step_avg:73.95ms +[2025-09-03 05:22:59] [Rank 0] step:6021/10000 train_time:445176ms step_avg:73.94ms +[2025-09-03 05:22:59] [Rank 0] step:6021/10000 train_time:445176ms step_avg:73.94ms +[2025-09-03 05:23:00] [Rank 0] step:6041/10000 train_time:446733ms step_avg:73.95ms +[2025-09-03 05:23:00] [Rank 0] step:6041/10000 train_time:446733ms step_avg:73.95ms +[2025-09-03 05:23:02] [Rank 0] step:6061/10000 train_time:448296ms step_avg:73.96ms +[2025-09-03 05:23:02] [Rank 0] step:6061/10000 train_time:448296ms step_avg:73.96ms +[2025-09-03 05:23:04] [Rank 0] step:6081/10000 train_time:449855ms step_avg:73.98ms +[2025-09-03 05:23:04] [Rank 0] step:6081/10000 train_time:449855ms step_avg:73.98ms +[2025-09-03 05:23:05] [Rank 0] step:6101/10000 train_time:451416ms step_avg:73.99ms +[2025-09-03 05:23:05] [Rank 0] step:6101/10000 train_time:451416ms step_avg:73.99ms +[2025-09-03 05:23:07] [Rank 0] step:6121/10000 train_time:453245ms step_avg:74.05ms +[2025-09-03 05:23:07] [Rank 0] step:6121/10000 train_time:453245ms step_avg:74.05ms +[2025-09-03 05:23:09] [Rank 0] step:6141/10000 train_time:454812ms step_avg:74.06ms +[2025-09-03 05:23:09] [Rank 0] step:6141/10000 train_time:454812ms step_avg:74.06ms +[2025-09-03 05:23:10] [Rank 0] step:6161/10000 train_time:456372ms step_avg:74.07ms +[2025-09-03 05:23:10] [Rank 0] step:6161/10000 train_time:456372ms step_avg:74.07ms +[2025-09-03 05:23:12] [Rank 0] step:6181/10000 train_time:457930ms step_avg:74.09ms +[2025-09-03 05:23:12] [Rank 0] step:6181/10000 train_time:457930ms step_avg:74.09ms +[2025-09-03 05:23:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:23:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:23:25] [Rank 0] PRINT: step:6200/10000 val_loss:4.0238 svd_entropy: attn_qk:H=0.7398,top10E=0.27,eRank=144.2,q75/q25=107.15 attn_vo:H=0.8237,top10E=0.15,eRank=262.6,q75/q25=72.91 mlp_w1:H=0.7709,top10E=0.28,eRank=191.2,q75/q25=21.10 mlp_w2:H=0.8616,top10E=0.12,eRank=313.1,q75/q25=43.26 vo_prod:H=0.7338,top10E=0.24,eRank=138.7,q75/q25=5013.09 train_time:459646ms step_avg:74.14ms +[2025-09-03 05:23:25] [Rank 0] PRINT: step:6200/10000 val_loss:4.0238 svd_entropy: attn_qk:H=0.7398,top10E=0.27,eRank=144.2,q75/q25=107.15 attn_vo:H=0.8237,top10E=0.15,eRank=262.6,q75/q25=72.91 mlp_w1:H=0.7709,top10E=0.28,eRank=191.2,q75/q25=21.10 mlp_w2:H=0.8616,top10E=0.12,eRank=313.1,q75/q25=43.26 vo_prod:H=0.7338,top10E=0.24,eRank=138.7,q75/q25=5013.09 train_time:459646ms step_avg:74.14ms +[2025-09-03 05:23:25] [Rank 0] step:6201/10000 train_time:459658ms step_avg:74.13ms +[2025-09-03 05:23:25] [Rank 0] step:6201/10000 train_time:459658ms step_avg:74.13ms +[2025-09-03 05:23:27] [Rank 0] step:6221/10000 train_time:461081ms step_avg:74.12ms +[2025-09-03 05:23:27] [Rank 0] step:6221/10000 train_time:461081ms step_avg:74.12ms +[2025-09-03 05:23:28] [Rank 0] step:6241/10000 train_time:462637ms step_avg:74.13ms +[2025-09-03 05:23:28] [Rank 0] step:6241/10000 train_time:462637ms step_avg:74.13ms +[2025-09-03 05:23:30] [Rank 0] step:6261/10000 train_time:464198ms step_avg:74.14ms +[2025-09-03 05:23:30] [Rank 0] step:6261/10000 train_time:464198ms step_avg:74.14ms +[2025-09-03 05:23:31] [Rank 0] step:6281/10000 train_time:465763ms step_avg:74.15ms +[2025-09-03 05:23:31] [Rank 0] step:6281/10000 train_time:465763ms step_avg:74.15ms +[2025-09-03 05:23:33] [Rank 0] step:6301/10000 train_time:467324ms step_avg:74.17ms +[2025-09-03 05:23:33] [Rank 0] step:6301/10000 train_time:467324ms step_avg:74.17ms +[2025-09-03 05:23:34] [Rank 0] step:6321/10000 train_time:468882ms step_avg:74.18ms +[2025-09-03 05:23:34] [Rank 0] step:6321/10000 train_time:468882ms step_avg:74.18ms +[2025-09-03 05:23:36] [Rank 0] step:6341/10000 train_time:470446ms step_avg:74.19ms +[2025-09-03 05:23:36] [Rank 0] step:6341/10000 train_time:470446ms step_avg:74.19ms +[2025-09-03 05:23:38] [Rank 0] step:6361/10000 train_time:472013ms step_avg:74.20ms +[2025-09-03 05:23:38] [Rank 0] step:6361/10000 train_time:472013ms step_avg:74.20ms +[2025-09-03 05:23:39] [Rank 0] step:6381/10000 train_time:473580ms step_avg:74.22ms +[2025-09-03 05:23:39] [Rank 0] step:6381/10000 train_time:473580ms step_avg:74.22ms +[2025-09-03 05:23:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:23:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:23:52] [Rank 0] PRINT: step:6400/10000 val_loss:4.0072 svd_entropy: attn_qk:H=0.7417,top10E=0.27,eRank=145.9,q75/q25=107.46 attn_vo:H=0.8255,top10E=0.15,eRank=265.3,q75/q25=70.58 mlp_w1:H=0.7733,top10E=0.28,eRank=193.8,q75/q25=21.55 mlp_w2:H=0.8626,top10E=0.12,eRank=315.4,q75/q25=44.17 vo_prod:H=0.7361,top10E=0.24,eRank=140.8,q75/q25=4664.88 train_time:475298ms step_avg:74.27ms +[2025-09-03 05:23:52] [Rank 0] PRINT: step:6400/10000 val_loss:4.0072 svd_entropy: attn_qk:H=0.7417,top10E=0.27,eRank=145.9,q75/q25=107.46 attn_vo:H=0.8255,top10E=0.15,eRank=265.3,q75/q25=70.58 mlp_w1:H=0.7733,top10E=0.28,eRank=193.8,q75/q25=21.55 mlp_w2:H=0.8626,top10E=0.12,eRank=315.4,q75/q25=44.17 vo_prod:H=0.7361,top10E=0.24,eRank=140.8,q75/q25=4664.88 train_time:475298ms step_avg:74.27ms +[2025-09-03 05:23:52] [Rank 0] step:6401/10000 train_time:475310ms step_avg:74.26ms +[2025-09-03 05:23:52] [Rank 0] step:6401/10000 train_time:475310ms step_avg:74.26ms +[2025-09-03 05:23:54] [Rank 0] step:6421/10000 train_time:476739ms step_avg:74.25ms +[2025-09-03 05:23:54] [Rank 0] step:6421/10000 train_time:476739ms step_avg:74.25ms +[2025-09-03 05:23:56] [Rank 0] step:6441/10000 train_time:478300ms step_avg:74.26ms +[2025-09-03 05:23:56] [Rank 0] step:6441/10000 train_time:478300ms step_avg:74.26ms +[2025-09-03 05:23:57] [Rank 0] step:6461/10000 train_time:479863ms step_avg:74.27ms +[2025-09-03 05:23:57] [Rank 0] step:6461/10000 train_time:479863ms step_avg:74.27ms +[2025-09-03 05:23:59] [Rank 0] step:6481/10000 train_time:481430ms step_avg:74.28ms +[2025-09-03 05:23:59] [Rank 0] step:6481/10000 train_time:481430ms step_avg:74.28ms +[2025-09-03 05:24:00] [Rank 0] step:6501/10000 train_time:483009ms step_avg:74.30ms +[2025-09-03 05:24:00] [Rank 0] step:6501/10000 train_time:483009ms step_avg:74.30ms +[2025-09-03 05:24:02] [Rank 0] step:6521/10000 train_time:484564ms step_avg:74.31ms +[2025-09-03 05:24:02] [Rank 0] step:6521/10000 train_time:484564ms step_avg:74.31ms +[2025-09-03 05:24:03] [Rank 0] step:6541/10000 train_time:486125ms step_avg:74.32ms +[2025-09-03 05:24:03] [Rank 0] step:6541/10000 train_time:486125ms step_avg:74.32ms +[2025-09-03 05:24:05] [Rank 0] step:6561/10000 train_time:487691ms step_avg:74.33ms +[2025-09-03 05:24:05] [Rank 0] step:6561/10000 train_time:487691ms step_avg:74.33ms +[2025-09-03 05:24:07] [Rank 0] step:6581/10000 train_time:489250ms step_avg:74.34ms +[2025-09-03 05:24:07] [Rank 0] step:6581/10000 train_time:489250ms step_avg:74.34ms +[2025-09-03 05:24:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:24:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:24:20] [Rank 0] PRINT: step:6600/10000 val_loss:3.9941 svd_entropy: attn_qk:H=0.7434,top10E=0.27,eRank=147.4,q75/q25=107.35 attn_vo:H=0.8272,top10E=0.15,eRank=267.9,q75/q25=68.33 mlp_w1:H=0.7754,top10E=0.27,eRank=196.2,q75/q25=22.01 mlp_w2:H=0.8636,top10E=0.12,eRank=317.5,q75/q25=44.78 vo_prod:H=0.7383,top10E=0.23,eRank=142.9,q75/q25=4298.37 train_time:490971ms step_avg:74.39ms +[2025-09-03 05:24:20] [Rank 0] PRINT: step:6600/10000 val_loss:3.9941 svd_entropy: attn_qk:H=0.7434,top10E=0.27,eRank=147.4,q75/q25=107.35 attn_vo:H=0.8272,top10E=0.15,eRank=267.9,q75/q25=68.33 mlp_w1:H=0.7754,top10E=0.27,eRank=196.2,q75/q25=22.01 mlp_w2:H=0.8636,top10E=0.12,eRank=317.5,q75/q25=44.78 vo_prod:H=0.7383,top10E=0.23,eRank=142.9,q75/q25=4298.37 train_time:490971ms step_avg:74.39ms +[2025-09-03 05:24:20] [Rank 0] step:6601/10000 train_time:490983ms step_avg:74.38ms +[2025-09-03 05:24:20] [Rank 0] step:6601/10000 train_time:490983ms step_avg:74.38ms +[2025-09-03 05:24:21] [Rank 0] step:6621/10000 train_time:492397ms step_avg:74.37ms +[2025-09-03 05:24:21] [Rank 0] step:6621/10000 train_time:492397ms step_avg:74.37ms +[2025-09-03 05:24:23] [Rank 0] step:6641/10000 train_time:493961ms step_avg:74.38ms +[2025-09-03 05:24:23] [Rank 0] step:6641/10000 train_time:493961ms step_avg:74.38ms +[2025-09-03 05:24:25] [Rank 0] step:6661/10000 train_time:495522ms step_avg:74.39ms +[2025-09-03 05:24:25] [Rank 0] step:6661/10000 train_time:495522ms step_avg:74.39ms +[2025-09-03 05:24:26] [Rank 0] step:6681/10000 train_time:497100ms step_avg:74.41ms +[2025-09-03 05:24:26] [Rank 0] step:6681/10000 train_time:497100ms step_avg:74.41ms +[2025-09-03 05:24:28] [Rank 0] step:6701/10000 train_time:498696ms step_avg:74.42ms +[2025-09-03 05:24:28] [Rank 0] step:6701/10000 train_time:498696ms step_avg:74.42ms +[2025-09-03 05:24:29] [Rank 0] step:6721/10000 train_time:500288ms step_avg:74.44ms +[2025-09-03 05:24:29] [Rank 0] step:6721/10000 train_time:500288ms step_avg:74.44ms +[2025-09-03 05:24:31] [Rank 0] step:6741/10000 train_time:501873ms step_avg:74.45ms +[2025-09-03 05:24:31] [Rank 0] step:6741/10000 train_time:501873ms step_avg:74.45ms +[2025-09-03 05:24:33] [Rank 0] step:6761/10000 train_time:503464ms step_avg:74.47ms +[2025-09-03 05:24:33] [Rank 0] step:6761/10000 train_time:503464ms step_avg:74.47ms +[2025-09-03 05:24:34] [Rank 0] step:6781/10000 train_time:505056ms step_avg:74.48ms +[2025-09-03 05:24:34] [Rank 0] step:6781/10000 train_time:505056ms step_avg:74.48ms +[2025-09-03 05:24:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:24:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:24:47] [Rank 0] PRINT: step:6800/10000 val_loss:3.9787 svd_entropy: attn_qk:H=0.7449,top10E=0.27,eRank=148.8,q75/q25=107.06 attn_vo:H=0.8288,top10E=0.14,eRank=270.3,q75/q25=66.28 mlp_w1:H=0.7774,top10E=0.27,eRank=198.5,q75/q25=22.48 mlp_w2:H=0.8645,top10E=0.12,eRank=319.6,q75/q25=45.32 vo_prod:H=0.7402,top10E=0.23,eRank=144.8,q75/q25=3947.91 train_time:506808ms step_avg:74.53ms +[2025-09-03 05:24:47] [Rank 0] PRINT: step:6800/10000 val_loss:3.9787 svd_entropy: attn_qk:H=0.7449,top10E=0.27,eRank=148.8,q75/q25=107.06 attn_vo:H=0.8288,top10E=0.14,eRank=270.3,q75/q25=66.28 mlp_w1:H=0.7774,top10E=0.27,eRank=198.5,q75/q25=22.48 mlp_w2:H=0.8645,top10E=0.12,eRank=319.6,q75/q25=45.32 vo_prod:H=0.7402,top10E=0.23,eRank=144.8,q75/q25=3947.91 train_time:506808ms step_avg:74.53ms +[2025-09-03 05:24:48] [Rank 0] step:6801/10000 train_time:506820ms step_avg:74.52ms +[2025-09-03 05:24:48] [Rank 0] step:6801/10000 train_time:506820ms step_avg:74.52ms +[2025-09-03 05:24:49] [Rank 0] step:6821/10000 train_time:508255ms step_avg:74.51ms +[2025-09-03 05:24:49] [Rank 0] step:6821/10000 train_time:508255ms step_avg:74.51ms +[2025-09-03 05:24:51] [Rank 0] step:6841/10000 train_time:509839ms step_avg:74.53ms +[2025-09-03 05:24:51] [Rank 0] step:6841/10000 train_time:509839ms step_avg:74.53ms +[2025-09-03 05:24:52] [Rank 0] step:6861/10000 train_time:511427ms step_avg:74.54ms +[2025-09-03 05:24:52] [Rank 0] step:6861/10000 train_time:511427ms step_avg:74.54ms +[2025-09-03 05:24:54] [Rank 0] step:6881/10000 train_time:513015ms step_avg:74.56ms +[2025-09-03 05:24:54] [Rank 0] step:6881/10000 train_time:513015ms step_avg:74.56ms +[2025-09-03 05:24:55] [Rank 0] step:6901/10000 train_time:514602ms step_avg:74.57ms +[2025-09-03 05:24:55] [Rank 0] step:6901/10000 train_time:514602ms step_avg:74.57ms +[2025-09-03 05:24:57] [Rank 0] step:6921/10000 train_time:516188ms step_avg:74.58ms +[2025-09-03 05:24:57] [Rank 0] step:6921/10000 train_time:516188ms step_avg:74.58ms +[2025-09-03 05:24:59] [Rank 0] step:6941/10000 train_time:517787ms step_avg:74.60ms +[2025-09-03 05:24:59] [Rank 0] step:6941/10000 train_time:517787ms step_avg:74.60ms +[2025-09-03 05:25:00] [Rank 0] step:6961/10000 train_time:519394ms step_avg:74.61ms +[2025-09-03 05:25:00] [Rank 0] step:6961/10000 train_time:519394ms step_avg:74.61ms +[2025-09-03 05:25:02] [Rank 0] step:6981/10000 train_time:520987ms step_avg:74.63ms +[2025-09-03 05:25:02] [Rank 0] step:6981/10000 train_time:520987ms step_avg:74.63ms +[2025-09-03 05:25:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:25:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:25:15] [Rank 0] PRINT: step:7000/10000 val_loss:3.9612 svd_entropy: attn_qk:H=0.7464,top10E=0.27,eRank=150.1,q75/q25=106.66 attn_vo:H=0.8301,top10E=0.14,eRank=272.4,q75/q25=64.68 mlp_w1:H=0.7793,top10E=0.27,eRank=200.6,q75/q25=22.87 mlp_w2:H=0.8653,top10E=0.11,eRank=321.4,q75/q25=45.89 vo_prod:H=0.7420,top10E=0.23,eRank=146.5,q75/q25=3671.07 train_time:522766ms step_avg:74.68ms +[2025-09-03 05:25:15] [Rank 0] PRINT: step:7000/10000 val_loss:3.9612 svd_entropy: attn_qk:H=0.7464,top10E=0.27,eRank=150.1,q75/q25=106.66 attn_vo:H=0.8301,top10E=0.14,eRank=272.4,q75/q25=64.68 mlp_w1:H=0.7793,top10E=0.27,eRank=200.6,q75/q25=22.87 mlp_w2:H=0.8653,top10E=0.11,eRank=321.4,q75/q25=45.89 vo_prod:H=0.7420,top10E=0.23,eRank=146.5,q75/q25=3671.07 train_time:522766ms step_avg:74.68ms +[2025-09-03 05:25:15] [Rank 0] step:7001/10000 train_time:522779ms step_avg:74.67ms +[2025-09-03 05:25:15] [Rank 0] step:7001/10000 train_time:522779ms step_avg:74.67ms +[2025-09-03 05:25:17] [Rank 0] step:7021/10000 train_time:524220ms step_avg:74.66ms +[2025-09-03 05:25:17] [Rank 0] step:7021/10000 train_time:524220ms step_avg:74.66ms +[2025-09-03 05:25:18] [Rank 0] step:7041/10000 train_time:525812ms step_avg:74.68ms +[2025-09-03 05:25:18] [Rank 0] step:7041/10000 train_time:525812ms step_avg:74.68ms +[2025-09-03 05:25:20] [Rank 0] step:7061/10000 train_time:527401ms step_avg:74.69ms +[2025-09-03 05:25:20] [Rank 0] step:7061/10000 train_time:527401ms step_avg:74.69ms +[2025-09-03 05:25:22] [Rank 0] step:7081/10000 train_time:528994ms step_avg:74.71ms +[2025-09-03 05:25:22] [Rank 0] step:7081/10000 train_time:528994ms step_avg:74.71ms +[2025-09-03 05:25:23] [Rank 0] step:7101/10000 train_time:530584ms step_avg:74.72ms +[2025-09-03 05:25:23] [Rank 0] step:7101/10000 train_time:530584ms step_avg:74.72ms +[2025-09-03 05:25:25] [Rank 0] step:7121/10000 train_time:532176ms step_avg:74.73ms +[2025-09-03 05:25:25] [Rank 0] step:7121/10000 train_time:532176ms step_avg:74.73ms +[2025-09-03 05:25:26] [Rank 0] step:7141/10000 train_time:533768ms step_avg:74.75ms +[2025-09-03 05:25:26] [Rank 0] step:7141/10000 train_time:533768ms step_avg:74.75ms +[2025-09-03 05:25:28] [Rank 0] step:7161/10000 train_time:535361ms step_avg:74.76ms +[2025-09-03 05:25:28] [Rank 0] step:7161/10000 train_time:535361ms step_avg:74.76ms +[2025-09-03 05:25:30] [Rank 0] step:7181/10000 train_time:536954ms step_avg:74.77ms +[2025-09-03 05:25:30] [Rank 0] step:7181/10000 train_time:536954ms step_avg:74.77ms +[2025-09-03 05:25:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:25:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:25:43] [Rank 0] PRINT: step:7200/10000 val_loss:3.9542 svd_entropy: attn_qk:H=0.7477,top10E=0.26,eRank=151.4,q75/q25=107.00 attn_vo:H=0.8314,top10E=0.14,eRank=274.5,q75/q25=63.07 mlp_w1:H=0.7809,top10E=0.27,eRank=202.5,q75/q25=23.21 mlp_w2:H=0.8660,top10E=0.11,eRank=323.2,q75/q25=46.05 vo_prod:H=0.7437,top10E=0.23,eRank=148.2,q75/q25=3397.45 train_time:538711ms step_avg:74.82ms +[2025-09-03 05:25:43] [Rank 0] PRINT: step:7200/10000 val_loss:3.9542 svd_entropy: attn_qk:H=0.7477,top10E=0.26,eRank=151.4,q75/q25=107.00 attn_vo:H=0.8314,top10E=0.14,eRank=274.5,q75/q25=63.07 mlp_w1:H=0.7809,top10E=0.27,eRank=202.5,q75/q25=23.21 mlp_w2:H=0.8660,top10E=0.11,eRank=323.2,q75/q25=46.05 vo_prod:H=0.7437,top10E=0.23,eRank=148.2,q75/q25=3397.45 train_time:538711ms step_avg:74.82ms +[2025-09-03 05:25:43] [Rank 0] step:7201/10000 train_time:538725ms step_avg:74.81ms +[2025-09-03 05:25:43] [Rank 0] step:7201/10000 train_time:538725ms step_avg:74.81ms +[2025-09-03 05:25:45] [Rank 0] step:7221/10000 train_time:540165ms step_avg:74.80ms +[2025-09-03 05:25:45] [Rank 0] step:7221/10000 train_time:540165ms step_avg:74.80ms +[2025-09-03 05:25:46] [Rank 0] step:7241/10000 train_time:541750ms step_avg:74.82ms +[2025-09-03 05:25:46] [Rank 0] step:7241/10000 train_time:541750ms step_avg:74.82ms +[2025-09-03 05:25:48] [Rank 0] step:7261/10000 train_time:543335ms step_avg:74.83ms +[2025-09-03 05:25:48] [Rank 0] step:7261/10000 train_time:543335ms step_avg:74.83ms +[2025-09-03 05:25:49] [Rank 0] step:7281/10000 train_time:544931ms step_avg:74.84ms +[2025-09-03 05:25:49] [Rank 0] step:7281/10000 train_time:544931ms step_avg:74.84ms +[2025-09-03 05:25:51] [Rank 0] step:7301/10000 train_time:546521ms step_avg:74.86ms +[2025-09-03 05:25:51] [Rank 0] step:7301/10000 train_time:546521ms step_avg:74.86ms +[2025-09-03 05:25:53] [Rank 0] step:7321/10000 train_time:548120ms step_avg:74.87ms +[2025-09-03 05:25:53] [Rank 0] step:7321/10000 train_time:548120ms step_avg:74.87ms +[2025-09-03 05:25:54] [Rank 0] step:7341/10000 train_time:549713ms step_avg:74.88ms +[2025-09-03 05:25:54] [Rank 0] step:7341/10000 train_time:549713ms step_avg:74.88ms +[2025-09-03 05:25:56] [Rank 0] step:7361/10000 train_time:551309ms step_avg:74.90ms +[2025-09-03 05:25:56] [Rank 0] step:7361/10000 train_time:551309ms step_avg:74.90ms +[2025-09-03 05:25:57] [Rank 0] step:7381/10000 train_time:552905ms step_avg:74.91ms +[2025-09-03 05:25:57] [Rank 0] step:7381/10000 train_time:552905ms step_avg:74.91ms +[2025-09-03 05:25:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:25:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:26:11] [Rank 0] PRINT: step:7400/10000 val_loss:3.9328 svd_entropy: attn_qk:H=0.7490,top10E=0.26,eRank=152.6,q75/q25=106.66 attn_vo:H=0.8326,top10E=0.14,eRank=276.2,q75/q25=61.46 mlp_w1:H=0.7824,top10E=0.26,eRank=204.2,q75/q25=23.54 mlp_w2:H=0.8667,top10E=0.11,eRank=324.8,q75/q25=46.65 vo_prod:H=0.7452,top10E=0.23,eRank=149.7,q75/q25=3223.04 train_time:554641ms step_avg:74.95ms +[2025-09-03 05:26:11] [Rank 0] PRINT: step:7400/10000 val_loss:3.9328 svd_entropy: attn_qk:H=0.7490,top10E=0.26,eRank=152.6,q75/q25=106.66 attn_vo:H=0.8326,top10E=0.14,eRank=276.2,q75/q25=61.46 mlp_w1:H=0.7824,top10E=0.26,eRank=204.2,q75/q25=23.54 mlp_w2:H=0.8667,top10E=0.11,eRank=324.8,q75/q25=46.65 vo_prod:H=0.7452,top10E=0.23,eRank=149.7,q75/q25=3223.04 train_time:554641ms step_avg:74.95ms +[2025-09-03 05:26:11] [Rank 0] step:7401/10000 train_time:554653ms step_avg:74.94ms +[2025-09-03 05:26:11] [Rank 0] step:7401/10000 train_time:554653ms step_avg:74.94ms +[2025-09-03 05:26:12] [Rank 0] step:7421/10000 train_time:556110ms step_avg:74.94ms +[2025-09-03 05:26:12] [Rank 0] step:7421/10000 train_time:556110ms step_avg:74.94ms +[2025-09-03 05:26:14] [Rank 0] step:7441/10000 train_time:557694ms step_avg:74.95ms +[2025-09-03 05:26:14] [Rank 0] step:7441/10000 train_time:557694ms step_avg:74.95ms +[2025-09-03 05:26:16] [Rank 0] step:7461/10000 train_time:559283ms step_avg:74.96ms +[2025-09-03 05:26:16] [Rank 0] step:7461/10000 train_time:559283ms step_avg:74.96ms +[2025-09-03 05:26:17] [Rank 0] step:7481/10000 train_time:560879ms step_avg:74.97ms +[2025-09-03 05:26:17] [Rank 0] step:7481/10000 train_time:560879ms step_avg:74.97ms +[2025-09-03 05:26:19] [Rank 0] step:7501/10000 train_time:562475ms step_avg:74.99ms +[2025-09-03 05:26:19] [Rank 0] step:7501/10000 train_time:562475ms step_avg:74.99ms +[2025-09-03 05:26:20] [Rank 0] step:7521/10000 train_time:564071ms step_avg:75.00ms +[2025-09-03 05:26:20] [Rank 0] step:7521/10000 train_time:564071ms step_avg:75.00ms +[2025-09-03 05:26:22] [Rank 0] step:7541/10000 train_time:565675ms step_avg:75.01ms +[2025-09-03 05:26:22] [Rank 0] step:7541/10000 train_time:565675ms step_avg:75.01ms +[2025-09-03 05:26:23] [Rank 0] step:7561/10000 train_time:567257ms step_avg:75.02ms +[2025-09-03 05:26:23] [Rank 0] step:7561/10000 train_time:567257ms step_avg:75.02ms +[2025-09-03 05:26:25] [Rank 0] step:7581/10000 train_time:568862ms step_avg:75.04ms +[2025-09-03 05:26:25] [Rank 0] step:7581/10000 train_time:568862ms step_avg:75.04ms +[2025-09-03 05:26:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:26:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:26:38] [Rank 0] PRINT: step:7600/10000 val_loss:3.9278 svd_entropy: attn_qk:H=0.7501,top10E=0.26,eRank=153.6,q75/q25=105.64 attn_vo:H=0.8335,top10E=0.14,eRank=277.7,q75/q25=60.23 mlp_w1:H=0.7837,top10E=0.26,eRank=205.9,q75/q25=23.77 mlp_w2:H=0.8673,top10E=0.11,eRank=326.2,q75/q25=47.16 vo_prod:H=0.7464,top10E=0.22,eRank=150.9,q75/q25=3044.02 train_time:570626ms step_avg:75.08ms +[2025-09-03 05:26:38] [Rank 0] PRINT: step:7600/10000 val_loss:3.9278 svd_entropy: attn_qk:H=0.7501,top10E=0.26,eRank=153.6,q75/q25=105.64 attn_vo:H=0.8335,top10E=0.14,eRank=277.7,q75/q25=60.23 mlp_w1:H=0.7837,top10E=0.26,eRank=205.9,q75/q25=23.77 mlp_w2:H=0.8673,top10E=0.11,eRank=326.2,q75/q25=47.16 vo_prod:H=0.7464,top10E=0.22,eRank=150.9,q75/q25=3044.02 train_time:570626ms step_avg:75.08ms +[2025-09-03 05:26:38] [Rank 0] step:7601/10000 train_time:570640ms step_avg:75.07ms +[2025-09-03 05:26:38] [Rank 0] step:7601/10000 train_time:570640ms step_avg:75.07ms +[2025-09-03 05:26:40] [Rank 0] step:7621/10000 train_time:572087ms step_avg:75.07ms +[2025-09-03 05:26:40] [Rank 0] step:7621/10000 train_time:572087ms step_avg:75.07ms +[2025-09-03 05:26:42] [Rank 0] step:7641/10000 train_time:573678ms step_avg:75.08ms +[2025-09-03 05:26:42] [Rank 0] step:7641/10000 train_time:573678ms step_avg:75.08ms +[2025-09-03 05:26:43] [Rank 0] step:7661/10000 train_time:575279ms step_avg:75.09ms +[2025-09-03 05:26:43] [Rank 0] step:7661/10000 train_time:575279ms step_avg:75.09ms +[2025-09-03 05:26:45] [Rank 0] step:7681/10000 train_time:576866ms step_avg:75.10ms +[2025-09-03 05:26:45] [Rank 0] step:7681/10000 train_time:576866ms step_avg:75.10ms +[2025-09-03 05:26:46] [Rank 0] step:7701/10000 train_time:578455ms step_avg:75.11ms +[2025-09-03 05:26:46] [Rank 0] step:7701/10000 train_time:578455ms step_avg:75.11ms +[2025-09-03 05:26:48] [Rank 0] step:7721/10000 train_time:580059ms step_avg:75.13ms +[2025-09-03 05:26:48] [Rank 0] step:7721/10000 train_time:580059ms step_avg:75.13ms +[2025-09-03 05:26:50] [Rank 0] step:7741/10000 train_time:581651ms step_avg:75.14ms +[2025-09-03 05:26:50] [Rank 0] step:7741/10000 train_time:581651ms step_avg:75.14ms +[2025-09-03 05:26:51] [Rank 0] step:7761/10000 train_time:583249ms step_avg:75.15ms +[2025-09-03 05:26:51] [Rank 0] step:7761/10000 train_time:583249ms step_avg:75.15ms +[2025-09-03 05:26:53] [Rank 0] step:7781/10000 train_time:584850ms step_avg:75.16ms +[2025-09-03 05:26:53] [Rank 0] step:7781/10000 train_time:584850ms step_avg:75.16ms +[2025-09-03 05:26:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:26:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:27:06] [Rank 0] PRINT: step:7800/10000 val_loss:3.9135 svd_entropy: attn_qk:H=0.7511,top10E=0.26,eRank=154.6,q75/q25=105.23 attn_vo:H=0.8345,top10E=0.14,eRank=279.2,q75/q25=58.80 mlp_w1:H=0.7850,top10E=0.26,eRank=207.4,q75/q25=23.97 mlp_w2:H=0.8679,top10E=0.11,eRank=327.5,q75/q25=47.53 vo_prod:H=0.7477,top10E=0.22,eRank=152.2,q75/q25=2928.38 train_time:586613ms step_avg:75.21ms +[2025-09-03 05:27:06] [Rank 0] PRINT: step:7800/10000 val_loss:3.9135 svd_entropy: attn_qk:H=0.7511,top10E=0.26,eRank=154.6,q75/q25=105.23 attn_vo:H=0.8345,top10E=0.14,eRank=279.2,q75/q25=58.80 mlp_w1:H=0.7850,top10E=0.26,eRank=207.4,q75/q25=23.97 mlp_w2:H=0.8679,top10E=0.11,eRank=327.5,q75/q25=47.53 vo_prod:H=0.7477,top10E=0.22,eRank=152.2,q75/q25=2928.38 train_time:586613ms step_avg:75.21ms +[2025-09-03 05:27:06] [Rank 0] step:7801/10000 train_time:586626ms step_avg:75.20ms +[2025-09-03 05:27:06] [Rank 0] step:7801/10000 train_time:586626ms step_avg:75.20ms +[2025-09-03 05:27:08] [Rank 0] step:7821/10000 train_time:588081ms step_avg:75.19ms +[2025-09-03 05:27:08] [Rank 0] step:7821/10000 train_time:588081ms step_avg:75.19ms +[2025-09-03 05:27:09] [Rank 0] step:7841/10000 train_time:589672ms step_avg:75.20ms +[2025-09-03 05:27:09] [Rank 0] step:7841/10000 train_time:589672ms step_avg:75.20ms +[2025-09-03 05:27:11] [Rank 0] step:7861/10000 train_time:591269ms step_avg:75.22ms +[2025-09-03 05:27:11] [Rank 0] step:7861/10000 train_time:591269ms step_avg:75.22ms +[2025-09-03 05:27:13] [Rank 0] step:7881/10000 train_time:592872ms step_avg:75.23ms +[2025-09-03 05:27:13] [Rank 0] step:7881/10000 train_time:592872ms step_avg:75.23ms +[2025-09-03 05:27:14] [Rank 0] step:7901/10000 train_time:594466ms step_avg:75.24ms +[2025-09-03 05:27:14] [Rank 0] step:7901/10000 train_time:594466ms step_avg:75.24ms +[2025-09-03 05:27:16] [Rank 0] step:7921/10000 train_time:596059ms step_avg:75.25ms +[2025-09-03 05:27:16] [Rank 0] step:7921/10000 train_time:596059ms step_avg:75.25ms +[2025-09-03 05:27:17] [Rank 0] step:7941/10000 train_time:597664ms step_avg:75.26ms +[2025-09-03 05:27:17] [Rank 0] step:7941/10000 train_time:597664ms step_avg:75.26ms +[2025-09-03 05:27:19] [Rank 0] step:7961/10000 train_time:599265ms step_avg:75.28ms +[2025-09-03 05:27:19] [Rank 0] step:7961/10000 train_time:599265ms step_avg:75.28ms +[2025-09-03 05:27:21] [Rank 0] step:7981/10000 train_time:600859ms step_avg:75.29ms +[2025-09-03 05:27:21] [Rank 0] step:7981/10000 train_time:600859ms step_avg:75.29ms +[2025-09-03 05:27:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:27:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:27:34] [Rank 0] PRINT: step:8000/10000 val_loss:3.8975 svd_entropy: attn_qk:H=0.7521,top10E=0.26,eRank=155.5,q75/q25=104.64 attn_vo:H=0.8353,top10E=0.14,eRank=280.6,q75/q25=57.74 mlp_w1:H=0.7861,top10E=0.26,eRank=208.7,q75/q25=24.20 mlp_w2:H=0.8684,top10E=0.11,eRank=328.7,q75/q25=47.67 vo_prod:H=0.7490,top10E=0.22,eRank=153.5,q75/q25=2788.85 train_time:602618ms step_avg:75.33ms +[2025-09-03 05:27:34] [Rank 0] PRINT: step:8000/10000 val_loss:3.8975 svd_entropy: attn_qk:H=0.7521,top10E=0.26,eRank=155.5,q75/q25=104.64 attn_vo:H=0.8353,top10E=0.14,eRank=280.6,q75/q25=57.74 mlp_w1:H=0.7861,top10E=0.26,eRank=208.7,q75/q25=24.20 mlp_w2:H=0.8684,top10E=0.11,eRank=328.7,q75/q25=47.67 vo_prod:H=0.7490,top10E=0.22,eRank=153.5,q75/q25=2788.85 train_time:602618ms step_avg:75.33ms +[2025-09-03 05:27:34] [Rank 0] step:8001/10000 train_time:602630ms step_avg:75.32ms +[2025-09-03 05:27:34] [Rank 0] step:8001/10000 train_time:602630ms step_avg:75.32ms +[2025-09-03 05:27:36] [Rank 0] step:8021/10000 train_time:604068ms step_avg:75.31ms +[2025-09-03 05:27:36] [Rank 0] step:8021/10000 train_time:604068ms step_avg:75.31ms +[2025-09-03 05:27:37] [Rank 0] step:8041/10000 train_time:605676ms step_avg:75.32ms +[2025-09-03 05:27:37] [Rank 0] step:8041/10000 train_time:605676ms step_avg:75.32ms +[2025-09-03 05:27:39] [Rank 0] step:8061/10000 train_time:607270ms step_avg:75.33ms +[2025-09-03 05:27:39] [Rank 0] step:8061/10000 train_time:607270ms step_avg:75.33ms +[2025-09-03 05:27:40] [Rank 0] step:8081/10000 train_time:608857ms step_avg:75.34ms +[2025-09-03 05:27:40] [Rank 0] step:8081/10000 train_time:608857ms step_avg:75.34ms +[2025-09-03 05:27:42] [Rank 0] step:8101/10000 train_time:610459ms step_avg:75.36ms +[2025-09-03 05:27:42] [Rank 0] step:8101/10000 train_time:610459ms step_avg:75.36ms +[2025-09-03 05:27:44] [Rank 0] step:8121/10000 train_time:612054ms step_avg:75.37ms +[2025-09-03 05:27:44] [Rank 0] step:8121/10000 train_time:612054ms step_avg:75.37ms +[2025-09-03 05:27:45] [Rank 0] step:8141/10000 train_time:613750ms step_avg:75.39ms +[2025-09-03 05:27:45] [Rank 0] step:8141/10000 train_time:613750ms step_avg:75.39ms +[2025-09-03 05:27:47] [Rank 0] step:8161/10000 train_time:615359ms step_avg:75.40ms +[2025-09-03 05:27:47] [Rank 0] step:8161/10000 train_time:615359ms step_avg:75.40ms +[2025-09-03 05:27:49] [Rank 0] step:8181/10000 train_time:616984ms step_avg:75.42ms +[2025-09-03 05:27:49] [Rank 0] step:8181/10000 train_time:616984ms step_avg:75.42ms +[2025-09-03 05:27:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:27:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:28:02] [Rank 0] PRINT: step:8200/10000 val_loss:3.8886 svd_entropy: attn_qk:H=0.7529,top10E=0.26,eRank=156.3,q75/q25=104.22 attn_vo:H=0.8361,top10E=0.14,eRank=281.9,q75/q25=56.92 mlp_w1:H=0.7871,top10E=0.26,eRank=209.9,q75/q25=24.32 mlp_w2:H=0.8689,top10E=0.11,eRank=329.9,q75/q25=47.79 vo_prod:H=0.7501,top10E=0.22,eRank=154.6,q75/q25=2672.63 train_time:618797ms step_avg:75.46ms +[2025-09-03 05:28:02] [Rank 0] PRINT: step:8200/10000 val_loss:3.8886 svd_entropy: attn_qk:H=0.7529,top10E=0.26,eRank=156.3,q75/q25=104.22 attn_vo:H=0.8361,top10E=0.14,eRank=281.9,q75/q25=56.92 mlp_w1:H=0.7871,top10E=0.26,eRank=209.9,q75/q25=24.32 mlp_w2:H=0.8689,top10E=0.11,eRank=329.9,q75/q25=47.79 vo_prod:H=0.7501,top10E=0.22,eRank=154.6,q75/q25=2672.63 train_time:618797ms step_avg:75.46ms +[2025-09-03 05:28:02] [Rank 0] step:8201/10000 train_time:618809ms step_avg:75.46ms +[2025-09-03 05:28:02] [Rank 0] step:8201/10000 train_time:618809ms step_avg:75.46ms +[2025-09-03 05:28:04] [Rank 0] step:8221/10000 train_time:620288ms step_avg:75.45ms +[2025-09-03 05:28:04] [Rank 0] step:8221/10000 train_time:620288ms step_avg:75.45ms +[2025-09-03 05:28:05] [Rank 0] step:8241/10000 train_time:621917ms step_avg:75.47ms +[2025-09-03 05:28:05] [Rank 0] step:8241/10000 train_time:621917ms step_avg:75.47ms +[2025-09-03 05:28:07] [Rank 0] step:8261/10000 train_time:623534ms step_avg:75.48ms +[2025-09-03 05:28:07] [Rank 0] step:8261/10000 train_time:623534ms step_avg:75.48ms +[2025-09-03 05:28:09] [Rank 0] step:8281/10000 train_time:625157ms step_avg:75.49ms +[2025-09-03 05:28:09] [Rank 0] step:8281/10000 train_time:625157ms step_avg:75.49ms +[2025-09-03 05:28:10] [Rank 0] step:8301/10000 train_time:626778ms step_avg:75.51ms +[2025-09-03 05:28:10] [Rank 0] step:8301/10000 train_time:626778ms step_avg:75.51ms +[2025-09-03 05:28:12] [Rank 0] step:8321/10000 train_time:628388ms step_avg:75.52ms +[2025-09-03 05:28:12] [Rank 0] step:8321/10000 train_time:628388ms step_avg:75.52ms +[2025-09-03 05:28:14] [Rank 0] step:8341/10000 train_time:630011ms step_avg:75.53ms +[2025-09-03 05:28:14] [Rank 0] step:8341/10000 train_time:630011ms step_avg:75.53ms +[2025-09-03 05:28:15] [Rank 0] step:8361/10000 train_time:631676ms step_avg:75.55ms +[2025-09-03 05:28:15] [Rank 0] step:8361/10000 train_time:631676ms step_avg:75.55ms +[2025-09-03 05:28:17] [Rank 0] step:8381/10000 train_time:633302ms step_avg:75.56ms +[2025-09-03 05:28:17] [Rank 0] step:8381/10000 train_time:633302ms step_avg:75.56ms +[2025-09-03 05:28:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:28:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:28:30] [Rank 0] PRINT: step:8400/10000 val_loss:3.8778 svd_entropy: attn_qk:H=0.7536,top10E=0.26,eRank=157.0,q75/q25=104.02 attn_vo:H=0.8368,top10E=0.14,eRank=283.0,q75/q25=55.87 mlp_w1:H=0.7880,top10E=0.26,eRank=211.1,q75/q25=24.45 mlp_w2:H=0.8693,top10E=0.11,eRank=330.9,q75/q25=47.87 vo_prod:H=0.7511,top10E=0.22,eRank=155.6,q75/q25=2575.91 train_time:635085ms step_avg:75.61ms +[2025-09-03 05:28:30] [Rank 0] PRINT: step:8400/10000 val_loss:3.8778 svd_entropy: attn_qk:H=0.7536,top10E=0.26,eRank=157.0,q75/q25=104.02 attn_vo:H=0.8368,top10E=0.14,eRank=283.0,q75/q25=55.87 mlp_w1:H=0.7880,top10E=0.26,eRank=211.1,q75/q25=24.45 mlp_w2:H=0.8693,top10E=0.11,eRank=330.9,q75/q25=47.87 vo_prod:H=0.7511,top10E=0.22,eRank=155.6,q75/q25=2575.91 train_time:635085ms step_avg:75.61ms +[2025-09-03 05:28:31] [Rank 0] step:8401/10000 train_time:635097ms step_avg:75.60ms +[2025-09-03 05:28:31] [Rank 0] step:8401/10000 train_time:635097ms step_avg:75.60ms +[2025-09-03 05:28:32] [Rank 0] step:8421/10000 train_time:636559ms step_avg:75.59ms +[2025-09-03 05:28:32] [Rank 0] step:8421/10000 train_time:636559ms step_avg:75.59ms +[2025-09-03 05:28:34] [Rank 0] step:8441/10000 train_time:638177ms step_avg:75.60ms +[2025-09-03 05:28:34] [Rank 0] step:8441/10000 train_time:638177ms step_avg:75.60ms +[2025-09-03 05:28:35] [Rank 0] step:8461/10000 train_time:639794ms step_avg:75.62ms +[2025-09-03 05:28:35] [Rank 0] step:8461/10000 train_time:639794ms step_avg:75.62ms +[2025-09-03 05:28:37] [Rank 0] step:8481/10000 train_time:641421ms step_avg:75.63ms +[2025-09-03 05:28:37] [Rank 0] step:8481/10000 train_time:641421ms step_avg:75.63ms +[2025-09-03 05:28:39] [Rank 0] step:8501/10000 train_time:643060ms step_avg:75.65ms +[2025-09-03 05:28:39] [Rank 0] step:8501/10000 train_time:643060ms step_avg:75.65ms +[2025-09-03 05:28:40] [Rank 0] step:8521/10000 train_time:644686ms step_avg:75.66ms +[2025-09-03 05:28:40] [Rank 0] step:8521/10000 train_time:644686ms step_avg:75.66ms +[2025-09-03 05:28:42] [Rank 0] step:8541/10000 train_time:646322ms step_avg:75.67ms +[2025-09-03 05:28:42] [Rank 0] step:8541/10000 train_time:646322ms step_avg:75.67ms +[2025-09-03 05:28:44] [Rank 0] step:8561/10000 train_time:647945ms step_avg:75.69ms +[2025-09-03 05:28:44] [Rank 0] step:8561/10000 train_time:647945ms step_avg:75.69ms +[2025-09-03 05:28:45] [Rank 0] step:8581/10000 train_time:649572ms step_avg:75.70ms +[2025-09-03 05:28:45] [Rank 0] step:8581/10000 train_time:649572ms step_avg:75.70ms +[2025-09-03 05:28:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:28:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:28:59] [Rank 0] PRINT: step:8600/10000 val_loss:3.8686 svd_entropy: attn_qk:H=0.7543,top10E=0.26,eRank=157.7,q75/q25=103.88 attn_vo:H=0.8374,top10E=0.14,eRank=283.9,q75/q25=55.10 mlp_w1:H=0.7888,top10E=0.26,eRank=212.1,q75/q25=24.55 mlp_w2:H=0.8697,top10E=0.11,eRank=331.8,q75/q25=48.00 vo_prod:H=0.7520,top10E=0.22,eRank=156.5,q75/q25=2490.41 train_time:651348ms step_avg:75.74ms +[2025-09-03 05:28:59] [Rank 0] PRINT: step:8600/10000 val_loss:3.8686 svd_entropy: attn_qk:H=0.7543,top10E=0.26,eRank=157.7,q75/q25=103.88 attn_vo:H=0.8374,top10E=0.14,eRank=283.9,q75/q25=55.10 mlp_w1:H=0.7888,top10E=0.26,eRank=212.1,q75/q25=24.55 mlp_w2:H=0.8697,top10E=0.11,eRank=331.8,q75/q25=48.00 vo_prod:H=0.7520,top10E=0.22,eRank=156.5,q75/q25=2490.41 train_time:651348ms step_avg:75.74ms +[2025-09-03 05:28:59] [Rank 0] step:8601/10000 train_time:651360ms step_avg:75.73ms +[2025-09-03 05:28:59] [Rank 0] step:8601/10000 train_time:651360ms step_avg:75.73ms +[2025-09-03 05:29:00] [Rank 0] step:8621/10000 train_time:652829ms step_avg:75.73ms +[2025-09-03 05:29:00] [Rank 0] step:8621/10000 train_time:652829ms step_avg:75.73ms +[2025-09-03 05:29:02] [Rank 0] step:8641/10000 train_time:654450ms step_avg:75.74ms +[2025-09-03 05:29:02] [Rank 0] step:8641/10000 train_time:654450ms step_avg:75.74ms +[2025-09-03 05:29:04] [Rank 0] step:8661/10000 train_time:656073ms step_avg:75.75ms +[2025-09-03 05:29:04] [Rank 0] step:8661/10000 train_time:656073ms step_avg:75.75ms +[2025-09-03 05:29:05] [Rank 0] step:8681/10000 train_time:657690ms step_avg:75.76ms +[2025-09-03 05:29:05] [Rank 0] step:8681/10000 train_time:657690ms step_avg:75.76ms +[2025-09-03 05:29:07] [Rank 0] step:8701/10000 train_time:659305ms step_avg:75.77ms +[2025-09-03 05:29:07] [Rank 0] step:8701/10000 train_time:659305ms step_avg:75.77ms +[2025-09-03 05:29:09] [Rank 0] step:8721/10000 train_time:660926ms step_avg:75.79ms +[2025-09-03 05:29:09] [Rank 0] step:8721/10000 train_time:660926ms step_avg:75.79ms +[2025-09-03 05:29:10] [Rank 0] step:8741/10000 train_time:662536ms step_avg:75.80ms +[2025-09-03 05:29:10] [Rank 0] step:8741/10000 train_time:662536ms step_avg:75.80ms +[2025-09-03 05:29:12] [Rank 0] step:8761/10000 train_time:664152ms step_avg:75.81ms +[2025-09-03 05:29:12] [Rank 0] step:8761/10000 train_time:664152ms step_avg:75.81ms +[2025-09-03 05:29:13] [Rank 0] step:8781/10000 train_time:665785ms step_avg:75.82ms +[2025-09-03 05:29:13] [Rank 0] step:8781/10000 train_time:665785ms step_avg:75.82ms +[2025-09-03 05:29:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:29:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:29:27] [Rank 0] PRINT: step:8800/10000 val_loss:3.8593 svd_entropy: attn_qk:H=0.7549,top10E=0.26,eRank=158.2,q75/q25=103.77 attn_vo:H=0.8380,top10E=0.14,eRank=284.8,q75/q25=54.36 mlp_w1:H=0.7896,top10E=0.26,eRank=213.1,q75/q25=24.69 mlp_w2:H=0.8701,top10E=0.11,eRank=332.6,q75/q25=48.00 vo_prod:H=0.7528,top10E=0.22,eRank=157.4,q75/q25=2412.54 train_time:667572ms step_avg:75.86ms +[2025-09-03 05:29:27] [Rank 0] PRINT: step:8800/10000 val_loss:3.8593 svd_entropy: attn_qk:H=0.7549,top10E=0.26,eRank=158.2,q75/q25=103.77 attn_vo:H=0.8380,top10E=0.14,eRank=284.8,q75/q25=54.36 mlp_w1:H=0.7896,top10E=0.26,eRank=213.1,q75/q25=24.69 mlp_w2:H=0.8701,top10E=0.11,eRank=332.6,q75/q25=48.00 vo_prod:H=0.7528,top10E=0.22,eRank=157.4,q75/q25=2412.54 train_time:667572ms step_avg:75.86ms +[2025-09-03 05:29:27] [Rank 0] step:8801/10000 train_time:667584ms step_avg:75.85ms +[2025-09-03 05:29:27] [Rank 0] step:8801/10000 train_time:667584ms step_avg:75.85ms +[2025-09-03 05:29:28] [Rank 0] step:8821/10000 train_time:669039ms step_avg:75.85ms +[2025-09-03 05:29:28] [Rank 0] step:8821/10000 train_time:669039ms step_avg:75.85ms +[2025-09-03 05:29:30] [Rank 0] step:8841/10000 train_time:670681ms step_avg:75.86ms +[2025-09-03 05:29:30] [Rank 0] step:8841/10000 train_time:670681ms step_avg:75.86ms +[2025-09-03 05:29:32] [Rank 0] step:8861/10000 train_time:672303ms step_avg:75.87ms +[2025-09-03 05:29:32] [Rank 0] step:8861/10000 train_time:672303ms step_avg:75.87ms +[2025-09-03 05:29:33] [Rank 0] step:8881/10000 train_time:673927ms step_avg:75.88ms +[2025-09-03 05:29:33] [Rank 0] step:8881/10000 train_time:673927ms step_avg:75.88ms +[2025-09-03 05:29:35] [Rank 0] step:8901/10000 train_time:675554ms step_avg:75.90ms +[2025-09-03 05:29:35] [Rank 0] step:8901/10000 train_time:675554ms step_avg:75.90ms +[2025-09-03 05:29:37] [Rank 0] step:8921/10000 train_time:677183ms step_avg:75.91ms +[2025-09-03 05:29:37] [Rank 0] step:8921/10000 train_time:677183ms step_avg:75.91ms +[2025-09-03 05:29:38] [Rank 0] step:8941/10000 train_time:678815ms step_avg:75.92ms +[2025-09-03 05:29:38] [Rank 0] step:8941/10000 train_time:678815ms step_avg:75.92ms +[2025-09-03 05:29:40] [Rank 0] step:8961/10000 train_time:680433ms step_avg:75.93ms +[2025-09-03 05:29:40] [Rank 0] step:8961/10000 train_time:680433ms step_avg:75.93ms +[2025-09-03 05:29:41] [Rank 0] step:8981/10000 train_time:682050ms step_avg:75.94ms +[2025-09-03 05:29:41] [Rank 0] step:8981/10000 train_time:682050ms step_avg:75.94ms +[2025-09-03 05:29:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:29:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:29:55] [Rank 0] PRINT: step:9000/10000 val_loss:3.8495 svd_entropy: attn_qk:H=0.7554,top10E=0.26,eRank=158.8,q75/q25=103.23 attn_vo:H=0.8384,top10E=0.14,eRank=285.6,q75/q25=53.80 mlp_w1:H=0.7902,top10E=0.25,eRank=213.8,q75/q25=24.79 mlp_w2:H=0.8704,top10E=0.11,eRank=333.4,q75/q25=48.00 vo_prod:H=0.7535,top10E=0.22,eRank=158.1,q75/q25=2352.02 train_time:683833ms step_avg:75.98ms +[2025-09-03 05:29:55] [Rank 0] PRINT: step:9000/10000 val_loss:3.8495 svd_entropy: attn_qk:H=0.7554,top10E=0.26,eRank=158.8,q75/q25=103.23 attn_vo:H=0.8384,top10E=0.14,eRank=285.6,q75/q25=53.80 mlp_w1:H=0.7902,top10E=0.25,eRank=213.8,q75/q25=24.79 mlp_w2:H=0.8704,top10E=0.11,eRank=333.4,q75/q25=48.00 vo_prod:H=0.7535,top10E=0.22,eRank=158.1,q75/q25=2352.02 train_time:683833ms step_avg:75.98ms +[2025-09-03 05:29:55] [Rank 0] step:9001/10000 train_time:683846ms step_avg:75.97ms +[2025-09-03 05:29:55] [Rank 0] step:9001/10000 train_time:683846ms step_avg:75.97ms +[2025-09-03 05:29:57] [Rank 0] step:9021/10000 train_time:685306ms step_avg:75.97ms +[2025-09-03 05:29:57] [Rank 0] step:9021/10000 train_time:685306ms step_avg:75.97ms +[2025-09-03 05:29:58] [Rank 0] step:9041/10000 train_time:686923ms step_avg:75.98ms +[2025-09-03 05:29:58] [Rank 0] step:9041/10000 train_time:686923ms step_avg:75.98ms +[2025-09-03 05:30:00] [Rank 0] step:9061/10000 train_time:688559ms step_avg:75.99ms +[2025-09-03 05:30:00] [Rank 0] step:9061/10000 train_time:688559ms step_avg:75.99ms +[2025-09-03 05:30:01] [Rank 0] step:9081/10000 train_time:690190ms step_avg:76.00ms +[2025-09-03 05:30:01] [Rank 0] step:9081/10000 train_time:690190ms step_avg:76.00ms +[2025-09-03 05:30:03] [Rank 0] step:9101/10000 train_time:691833ms step_avg:76.02ms +[2025-09-03 05:30:03] [Rank 0] step:9101/10000 train_time:691833ms step_avg:76.02ms +[2025-09-03 05:30:05] [Rank 0] step:9121/10000 train_time:693461ms step_avg:76.03ms +[2025-09-03 05:30:05] [Rank 0] step:9121/10000 train_time:693461ms step_avg:76.03ms +[2025-09-03 05:30:06] [Rank 0] step:9141/10000 train_time:695072ms step_avg:76.04ms +[2025-09-03 05:30:06] [Rank 0] step:9141/10000 train_time:695072ms step_avg:76.04ms +[2025-09-03 05:30:08] [Rank 0] step:9161/10000 train_time:696688ms step_avg:76.05ms +[2025-09-03 05:30:08] [Rank 0] step:9161/10000 train_time:696688ms step_avg:76.05ms +[2025-09-03 05:30:10] [Rank 0] step:9181/10000 train_time:698344ms step_avg:76.06ms +[2025-09-03 05:30:10] [Rank 0] step:9181/10000 train_time:698344ms step_avg:76.06ms +[2025-09-03 05:30:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:30:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:30:23] [Rank 0] PRINT: step:9200/10000 val_loss:3.8415 svd_entropy: attn_qk:H=0.7558,top10E=0.25,eRank=159.2,q75/q25=102.98 attn_vo:H=0.8388,top10E=0.14,eRank=286.3,q75/q25=53.08 mlp_w1:H=0.7907,top10E=0.25,eRank=214.5,q75/q25=24.90 mlp_w2:H=0.8707,top10E=0.11,eRank=334.1,q75/q25=48.07 vo_prod:H=0.7541,top10E=0.22,eRank=158.7,q75/q25=2264.88 train_time:700131ms step_avg:76.10ms +[2025-09-03 05:30:23] [Rank 0] PRINT: step:9200/10000 val_loss:3.8415 svd_entropy: attn_qk:H=0.7558,top10E=0.25,eRank=159.2,q75/q25=102.98 attn_vo:H=0.8388,top10E=0.14,eRank=286.3,q75/q25=53.08 mlp_w1:H=0.7907,top10E=0.25,eRank=214.5,q75/q25=24.90 mlp_w2:H=0.8707,top10E=0.11,eRank=334.1,q75/q25=48.07 vo_prod:H=0.7541,top10E=0.22,eRank=158.7,q75/q25=2264.88 train_time:700131ms step_avg:76.10ms +[2025-09-03 05:30:23] [Rank 0] step:9201/10000 train_time:700144ms step_avg:76.09ms +[2025-09-03 05:30:23] [Rank 0] step:9201/10000 train_time:700144ms step_avg:76.09ms +[2025-09-03 05:30:25] [Rank 0] step:9221/10000 train_time:701637ms step_avg:76.09ms +[2025-09-03 05:30:25] [Rank 0] step:9221/10000 train_time:701637ms step_avg:76.09ms +[2025-09-03 05:30:26] [Rank 0] step:9241/10000 train_time:703272ms step_avg:76.10ms +[2025-09-03 05:30:26] [Rank 0] step:9241/10000 train_time:703272ms step_avg:76.10ms +[2025-09-03 05:30:28] [Rank 0] step:9261/10000 train_time:704908ms step_avg:76.12ms +[2025-09-03 05:30:28] [Rank 0] step:9261/10000 train_time:704908ms step_avg:76.12ms +[2025-09-03 05:30:30] [Rank 0] step:9281/10000 train_time:706531ms step_avg:76.13ms +[2025-09-03 05:30:30] [Rank 0] step:9281/10000 train_time:706531ms step_avg:76.13ms +[2025-09-03 05:30:31] [Rank 0] step:9301/10000 train_time:708161ms step_avg:76.14ms +[2025-09-03 05:30:31] [Rank 0] step:9301/10000 train_time:708161ms step_avg:76.14ms +[2025-09-03 05:30:33] [Rank 0] step:9321/10000 train_time:709789ms step_avg:76.15ms +[2025-09-03 05:30:33] [Rank 0] step:9321/10000 train_time:709789ms step_avg:76.15ms +[2025-09-03 05:30:34] [Rank 0] step:9341/10000 train_time:711417ms step_avg:76.16ms +[2025-09-03 05:30:34] [Rank 0] step:9341/10000 train_time:711417ms step_avg:76.16ms +[2025-09-03 05:30:36] [Rank 0] step:9361/10000 train_time:713050ms step_avg:76.17ms +[2025-09-03 05:30:36] [Rank 0] step:9361/10000 train_time:713050ms step_avg:76.17ms +[2025-09-03 05:30:38] [Rank 0] step:9381/10000 train_time:714690ms step_avg:76.18ms +[2025-09-03 05:30:38] [Rank 0] step:9381/10000 train_time:714690ms step_avg:76.18ms +[2025-09-03 05:30:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:30:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:30:51] [Rank 0] PRINT: step:9400/10000 val_loss:3.8340 svd_entropy: attn_qk:H=0.7562,top10E=0.25,eRank=159.5,q75/q25=102.90 attn_vo:H=0.8392,top10E=0.14,eRank=286.8,q75/q25=52.64 mlp_w1:H=0.7911,top10E=0.25,eRank=215.1,q75/q25=24.88 mlp_w2:H=0.8709,top10E=0.11,eRank=334.6,q75/q25=48.12 vo_prod:H=0.7546,top10E=0.22,eRank=159.3,q75/q25=2219.50 train_time:716488ms step_avg:76.22ms +[2025-09-03 05:30:51] [Rank 0] PRINT: step:9400/10000 val_loss:3.8340 svd_entropy: attn_qk:H=0.7562,top10E=0.25,eRank=159.5,q75/q25=102.90 attn_vo:H=0.8392,top10E=0.14,eRank=286.8,q75/q25=52.64 mlp_w1:H=0.7911,top10E=0.25,eRank=215.1,q75/q25=24.88 mlp_w2:H=0.8709,top10E=0.11,eRank=334.6,q75/q25=48.12 vo_prod:H=0.7546,top10E=0.22,eRank=159.3,q75/q25=2219.50 train_time:716488ms step_avg:76.22ms +[2025-09-03 05:30:51] [Rank 0] step:9401/10000 train_time:716501ms step_avg:76.22ms +[2025-09-03 05:30:51] [Rank 0] step:9401/10000 train_time:716501ms step_avg:76.22ms +[2025-09-03 05:30:53] [Rank 0] step:9421/10000 train_time:717972ms step_avg:76.21ms +[2025-09-03 05:30:53] [Rank 0] step:9421/10000 train_time:717972ms step_avg:76.21ms +[2025-09-03 05:30:54] [Rank 0] step:9441/10000 train_time:719597ms step_avg:76.22ms +[2025-09-03 05:30:54] [Rank 0] step:9441/10000 train_time:719597ms step_avg:76.22ms +[2025-09-03 05:30:56] [Rank 0] step:9461/10000 train_time:721230ms step_avg:76.23ms +[2025-09-03 05:30:56] [Rank 0] step:9461/10000 train_time:721230ms step_avg:76.23ms +[2025-09-03 05:30:58] [Rank 0] step:9481/10000 train_time:722859ms step_avg:76.24ms +[2025-09-03 05:30:58] [Rank 0] step:9481/10000 train_time:722859ms step_avg:76.24ms +[2025-09-03 05:30:59] [Rank 0] step:9501/10000 train_time:724498ms step_avg:76.25ms +[2025-09-03 05:30:59] [Rank 0] step:9501/10000 train_time:724498ms step_avg:76.25ms +[2025-09-03 05:31:01] [Rank 0] step:9521/10000 train_time:726116ms step_avg:76.26ms +[2025-09-03 05:31:01] [Rank 0] step:9521/10000 train_time:726116ms step_avg:76.26ms +[2025-09-03 05:31:03] [Rank 0] step:9541/10000 train_time:727745ms step_avg:76.28ms +[2025-09-03 05:31:03] [Rank 0] step:9541/10000 train_time:727745ms step_avg:76.28ms +[2025-09-03 05:31:04] [Rank 0] step:9561/10000 train_time:729369ms step_avg:76.29ms +[2025-09-03 05:31:04] [Rank 0] step:9561/10000 train_time:729369ms step_avg:76.29ms +[2025-09-03 05:31:06] [Rank 0] step:9581/10000 train_time:730994ms step_avg:76.30ms +[2025-09-03 05:31:06] [Rank 0] step:9581/10000 train_time:730994ms step_avg:76.30ms +[2025-09-03 05:31:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:31:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:31:19] [Rank 0] PRINT: step:9600/10000 val_loss:3.8281 svd_entropy: attn_qk:H=0.7565,top10E=0.25,eRank=159.8,q75/q25=103.14 attn_vo:H=0.8395,top10E=0.14,eRank=287.3,q75/q25=52.31 mlp_w1:H=0.7915,top10E=0.25,eRank=215.6,q75/q25=24.89 mlp_w2:H=0.8711,top10E=0.11,eRank=335.1,q75/q25=48.11 vo_prod:H=0.7550,top10E=0.22,eRank=159.7,q75/q25=2179.77 train_time:732795ms step_avg:76.33ms +[2025-09-03 05:31:19] [Rank 0] PRINT: step:9600/10000 val_loss:3.8281 svd_entropy: attn_qk:H=0.7565,top10E=0.25,eRank=159.8,q75/q25=103.14 attn_vo:H=0.8395,top10E=0.14,eRank=287.3,q75/q25=52.31 mlp_w1:H=0.7915,top10E=0.25,eRank=215.6,q75/q25=24.89 mlp_w2:H=0.8711,top10E=0.11,eRank=335.1,q75/q25=48.11 vo_prod:H=0.7550,top10E=0.22,eRank=159.7,q75/q25=2179.77 train_time:732795ms step_avg:76.33ms +[2025-09-03 05:31:19] [Rank 0] step:9601/10000 train_time:732807ms step_avg:76.33ms +[2025-09-03 05:31:19] [Rank 0] step:9601/10000 train_time:732807ms step_avg:76.33ms +[2025-09-03 05:31:21] [Rank 0] step:9621/10000 train_time:734285ms step_avg:76.32ms +[2025-09-03 05:31:21] [Rank 0] step:9621/10000 train_time:734285ms step_avg:76.32ms +[2025-09-03 05:31:22] [Rank 0] step:9641/10000 train_time:735916ms step_avg:76.33ms +[2025-09-03 05:31:22] [Rank 0] step:9641/10000 train_time:735916ms step_avg:76.33ms +[2025-09-03 05:31:24] [Rank 0] step:9661/10000 train_time:737571ms step_avg:76.35ms +[2025-09-03 05:31:24] [Rank 0] step:9661/10000 train_time:737571ms step_avg:76.35ms +[2025-09-03 05:31:26] [Rank 0] step:9681/10000 train_time:739220ms step_avg:76.36ms +[2025-09-03 05:31:26] [Rank 0] step:9681/10000 train_time:739220ms step_avg:76.36ms +[2025-09-03 05:31:27] [Rank 0] step:9701/10000 train_time:740886ms step_avg:76.37ms +[2025-09-03 05:31:27] [Rank 0] step:9701/10000 train_time:740886ms step_avg:76.37ms +[2025-09-03 05:31:29] [Rank 0] step:9721/10000 train_time:742533ms step_avg:76.38ms +[2025-09-03 05:31:29] [Rank 0] step:9721/10000 train_time:742533ms step_avg:76.38ms +[2025-09-03 05:31:31] [Rank 0] step:9741/10000 train_time:744205ms step_avg:76.40ms +[2025-09-03 05:31:31] [Rank 0] step:9741/10000 train_time:744205ms step_avg:76.40ms +[2025-09-03 05:31:32] [Rank 0] step:9761/10000 train_time:745854ms step_avg:76.41ms +[2025-09-03 05:31:32] [Rank 0] step:9761/10000 train_time:745854ms step_avg:76.41ms +[2025-09-03 05:31:34] [Rank 0] step:9781/10000 train_time:747516ms step_avg:76.43ms +[2025-09-03 05:31:34] [Rank 0] step:9781/10000 train_time:747516ms step_avg:76.43ms +[2025-09-03 05:31:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:31:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:31:47] [Rank 0] PRINT: step:9800/10000 val_loss:3.8220 svd_entropy: attn_qk:H=0.7568,top10E=0.25,eRank=160.1,q75/q25=103.07 attn_vo:H=0.8397,top10E=0.14,eRank=287.7,q75/q25=51.99 mlp_w1:H=0.7918,top10E=0.25,eRank=216.0,q75/q25=24.96 mlp_w2:H=0.8713,top10E=0.11,eRank=335.5,q75/q25=48.10 vo_prod:H=0.7553,top10E=0.22,eRank=160.1,q75/q25=2151.23 train_time:749351ms step_avg:76.46ms +[2025-09-03 05:31:47] [Rank 0] PRINT: step:9800/10000 val_loss:3.8220 svd_entropy: attn_qk:H=0.7568,top10E=0.25,eRank=160.1,q75/q25=103.07 attn_vo:H=0.8397,top10E=0.14,eRank=287.7,q75/q25=51.99 mlp_w1:H=0.7918,top10E=0.25,eRank=216.0,q75/q25=24.96 mlp_w2:H=0.8713,top10E=0.11,eRank=335.5,q75/q25=48.10 vo_prod:H=0.7553,top10E=0.22,eRank=160.1,q75/q25=2151.23 train_time:749351ms step_avg:76.46ms +[2025-09-03 05:31:48] [Rank 0] step:9801/10000 train_time:749363ms step_avg:76.46ms +[2025-09-03 05:31:48] [Rank 0] step:9801/10000 train_time:749363ms step_avg:76.46ms +[2025-09-03 05:31:49] [Rank 0] step:9821/10000 train_time:750855ms step_avg:76.45ms +[2025-09-03 05:31:49] [Rank 0] step:9821/10000 train_time:750855ms step_avg:76.45ms +[2025-09-03 05:31:51] [Rank 0] step:9841/10000 train_time:752520ms step_avg:76.47ms +[2025-09-03 05:31:51] [Rank 0] step:9841/10000 train_time:752520ms step_avg:76.47ms +[2025-09-03 05:31:52] [Rank 0] step:9861/10000 train_time:754164ms step_avg:76.48ms +[2025-09-03 05:31:52] [Rank 0] step:9861/10000 train_time:754164ms step_avg:76.48ms +[2025-09-03 05:31:54] [Rank 0] step:9881/10000 train_time:755806ms step_avg:76.49ms +[2025-09-03 05:31:54] [Rank 0] step:9881/10000 train_time:755806ms step_avg:76.49ms +[2025-09-03 05:31:56] [Rank 0] step:9901/10000 train_time:757465ms step_avg:76.50ms +[2025-09-03 05:31:56] [Rank 0] step:9901/10000 train_time:757465ms step_avg:76.50ms +[2025-09-03 05:31:57] [Rank 0] step:9921/10000 train_time:759111ms step_avg:76.52ms +[2025-09-03 05:31:57] [Rank 0] step:9921/10000 train_time:759111ms step_avg:76.52ms +[2025-09-03 05:31:59] [Rank 0] step:9941/10000 train_time:760769ms step_avg:76.53ms +[2025-09-03 05:31:59] [Rank 0] step:9941/10000 train_time:760769ms step_avg:76.53ms +[2025-09-03 05:32:01] [Rank 0] step:9961/10000 train_time:762419ms step_avg:76.54ms +[2025-09-03 05:32:01] [Rank 0] step:9961/10000 train_time:762419ms step_avg:76.54ms +[2025-09-03 05:32:02] [Rank 0] step:9981/10000 train_time:764071ms step_avg:76.55ms +[2025-09-03 05:32:02] [Rank 0] step:9981/10000 train_time:764071ms step_avg:76.55ms +[2025-09-03 05:32:04] [Rank 0] step:10000/10000 train_time:765649ms step_avg:76.56ms +[2025-09-03 05:32:04] [Rank 0] step:10000/10000 train_time:765649ms step_avg:76.56ms +[2025-09-03 05:32:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:32:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-09-03 05:32:16] [Rank 0] PRINT: step:10000/10000 val_loss:3.8165 svd_entropy: attn_qk:H=0.7569,top10E=0.25,eRank=160.2,q75/q25=103.05 attn_vo:H=0.8399,top10E=0.14,eRank=287.9,q75/q25=51.78 mlp_w1:H=0.7920,top10E=0.25,eRank=216.2,q75/q25=24.97 mlp_w2:H=0.8714,top10E=0.11,eRank=335.7,q75/q25=48.06 vo_prod:H=0.7556,top10E=0.21,eRank=160.3,q75/q25=2127.04 train_time:765904ms step_avg:76.59ms +[2025-09-03 05:32:16] [Rank 0] PRINT: step:10000/10000 val_loss:3.8165 svd_entropy: attn_qk:H=0.7569,top10E=0.25,eRank=160.2,q75/q25=103.05 attn_vo:H=0.8399,top10E=0.14,eRank=287.9,q75/q25=51.78 mlp_w1:H=0.7920,top10E=0.25,eRank=216.2,q75/q25=24.97 mlp_w2:H=0.8714,top10E=0.11,eRank=335.7,q75/q25=48.06 vo_prod:H=0.7556,top10E=0.21,eRank=160.3,q75/q25=2127.04 train_time:765904ms step_avg:76.59ms +[2025-09-03 05:32:16] [Rank 0] PRINT: --- Training Finished: Wed Sep 3 05:32:16 2025 --- +[2025-09-03 05:32:16] [Rank 0] PRINT: --- Training Finished: Wed Sep 3 05:32:16 2025 --- +[2025-09-03 05:32:16] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14356 MiB +[2025-09-03 05:32:16] [Rank 0] PRINT: Peak memory allocated: 10196 MiB reserved: 14356 MiB